blob: d5c4e3cbe5105633ff334665f8bb1b80a04d0b27 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <malloc.h>
Claudiu Beznea4d139f32020-09-07 17:46:34 +030017#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070018#include <dm/devres.h>
19#include <dm/read.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060020#include <linux/bug.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020021#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070022#include <linux/err.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060023
Mario Six268453b2018-01-15 11:06:51 +010024static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060025{
Mario Six268453b2018-01-15 11:06:51 +010026 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060027}
28
Simon Glassfb989e02020-07-19 10:15:56 -060029struct clk *dev_get_clk_ptr(struct udevice *dev)
30{
31 return (struct clk *)dev_get_uclass_priv(dev);
32}
33
Simon Glasse70cc432016-01-20 19:43:02 -070034#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060035# if CONFIG_IS_ENABLED(OF_PLATDATA)
Walter Lozano51f12632020-06-25 01:10:13 -030036int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
37 struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060038{
39 int ret;
40
Simon Glass8a38abf2020-10-03 11:31:40 -060041 ret = device_get_by_driver_info_idx(cells->idx, &clk->dev);
Simon Glass7423daa2016-07-04 11:58:03 -060042 if (ret)
43 return ret;
Walter Lozano51f12632020-06-25 01:10:13 -030044 clk->id = cells->arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060045
46 return 0;
47}
48# else
Stephen Warren135aa952016-06-17 09:44:00 -060049static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060050 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060051{
52 debug("%s(clk=%p)\n", __func__, clk);
53
54 if (args->args_count > 1) {
55 debug("Invaild args_count: %d\n", args->args_count);
56 return -EINVAL;
57 }
58
59 if (args->args_count)
60 clk->id = args->args[0];
61 else
62 clk->id = 0;
63
Sekhar Norie497fab2019-07-11 14:30:24 +053064 clk->data = 0;
65
Stephen Warren135aa952016-06-17 09:44:00 -060066 return 0;
67}
68
Jagan Teki75f98312019-02-28 00:26:52 +053069static int clk_get_by_index_tail(int ret, ofnode node,
70 struct ofnode_phandle_args *args,
71 const char *list_name, int index,
72 struct clk *clk)
73{
74 struct udevice *dev_clk;
75 const struct clk_ops *ops;
76
77 assert(clk);
78 clk->dev = NULL;
79 if (ret)
80 goto err;
81
82 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
83 if (ret) {
84 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
85 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -070086 return log_msg_ret("get", ret);
Jagan Teki75f98312019-02-28 00:26:52 +053087 }
88
89 clk->dev = dev_clk;
90
91 ops = clk_dev_ops(dev_clk);
92
93 if (ops->of_xlate)
94 ret = ops->of_xlate(clk, args);
95 else
96 ret = clk_of_xlate_default(clk, args);
97 if (ret) {
98 debug("of_xlate() failed: %d\n", ret);
Simon Glass5c5992c2021-01-21 13:57:11 -070099 return log_msg_ret("xlate", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530100 }
101
102 return clk_request(dev_clk, clk);
103err:
104 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
105 __func__, ofnode_get_name(node), list_name, index, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700106
107 return log_msg_ret("prop", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530108}
109
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100110static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
111 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600112{
113 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600114 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600115
116 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
117
118 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200119 clk->dev = NULL;
120
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100121 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100122 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700123 if (ret) {
124 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
125 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700126 return log_ret(ret);
Simon Glasse70cc432016-01-20 19:43:02 -0700127 }
128
Wenyou Yang3f56b132016-09-27 11:00:28 +0800129
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530130 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400131 index, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600132}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100133
134int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
135{
Jagan Teki75f98312019-02-28 00:26:52 +0530136 struct ofnode_phandle_args args;
137 int ret;
138
139 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
140 index, &args);
141
142 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400143 index, clk);
Jagan Teki75f98312019-02-28 00:26:52 +0530144}
145
146int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
147{
148 struct ofnode_phandle_args args;
149 int ret;
150
151 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Anderson675d7902020-06-24 06:41:08 -0400152 index, &args);
Jagan Teki75f98312019-02-28 00:26:52 +0530153
154 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400155 index, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100156}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100157
Neil Armstronga855be82018-04-03 11:44:18 +0200158int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
159{
160 int i, ret, err, count;
161
162 bulk->count = 0;
163
Patrick Delaunay89f68302020-09-25 09:41:14 +0200164 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
Neil Armstrong721881c2018-04-17 11:30:31 +0200165 if (count < 1)
166 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200167
168 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
169 if (!bulk->clks)
170 return -ENOMEM;
171
172 for (i = 0; i < count; i++) {
173 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
174 if (ret < 0)
175 goto bulk_get_err;
176
177 ++bulk->count;
178 }
179
180 return 0;
181
182bulk_get_err:
183 err = clk_release_all(bulk->clks, bulk->count);
184 if (err)
185 debug("%s: could release all clocks for %p\n",
186 __func__, dev);
187
188 return ret;
189}
190
Claudiu Bezneab3641342020-09-07 17:46:36 +0300191static struct clk *clk_set_default_get_by_id(struct clk *clk)
192{
193 struct clk *c = clk;
194
195 if (CONFIG_IS_ENABLED(CLK_CCF)) {
196 int ret = clk_get_by_id(clk->id, &c);
197
198 if (ret) {
199 debug("%s(): could not get parent clock pointer, id %lu\n",
200 __func__, clk->id);
201 ERR_PTR(ret);
202 }
203 }
204
205 return c;
206}
207
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200208static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100209{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300210 struct clk clk, parent_clk, *c, *p;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100211 int index;
212 int num_parents;
213 int ret;
214
215 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
Patrick Delaunay89f68302020-09-25 09:41:14 +0200216 "#clock-cells", 0);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100217 if (num_parents < 0) {
218 debug("%s: could not read assigned-clock-parents for %p\n",
219 __func__, dev);
220 return 0;
221 }
222
223 for (index = 0; index < num_parents; index++) {
224 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
225 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200226 /* If -ENOENT, this is a no-op entry */
227 if (ret == -ENOENT)
228 continue;
229
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100230 if (ret) {
231 debug("%s: could not get parent clock %d for %s\n",
232 __func__, index, dev_read_name(dev));
233 return ret;
234 }
235
Claudiu Bezneab3641342020-09-07 17:46:36 +0300236 p = clk_set_default_get_by_id(&parent_clk);
237 if (IS_ERR(p))
238 return PTR_ERR(p);
239
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100240 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
241 index, &clk);
242 if (ret) {
243 debug("%s: could not get assigned clock %d for %s\n",
244 __func__, index, dev_read_name(dev));
245 return ret;
246 }
247
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200248 /* This is clk provider device trying to reparent itself
249 * It cannot be done right now but need to wait after the
250 * device is probed
251 */
252 if (stage == 0 && clk.dev == dev)
253 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100254
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200255 if (stage > 0 && clk.dev != dev)
256 /* do not setup twice the parent clocks */
257 continue;
258
Claudiu Bezneab3641342020-09-07 17:46:36 +0300259 c = clk_set_default_get_by_id(&clk);
260 if (IS_ERR(c))
261 return PTR_ERR(c);
262
263 ret = clk_set_parent(c, p);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100264 /*
265 * Not all drivers may support clock-reparenting (as of now).
266 * Ignore errors due to this.
267 */
268 if (ret == -ENOSYS)
269 continue;
270
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200271 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100272 debug("%s: failed to reparent clock %d for %s\n",
273 __func__, index, dev_read_name(dev));
274 return ret;
275 }
276 }
277
278 return 0;
279}
280
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200281static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100282{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300283 struct clk clk, *c;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100284 int index;
285 int num_rates;
286 int size;
287 int ret = 0;
288 u32 *rates = NULL;
289
290 size = dev_read_size(dev, "assigned-clock-rates");
291 if (size < 0)
292 return 0;
293
294 num_rates = size / sizeof(u32);
295 rates = calloc(num_rates, sizeof(u32));
296 if (!rates)
297 return -ENOMEM;
298
299 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
300 if (ret)
301 goto fail;
302
303 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200304 /* If 0 is passed, this is a no-op */
305 if (!rates[index])
306 continue;
307
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100308 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
309 index, &clk);
310 if (ret) {
311 debug("%s: could not get assigned clock %d for %s\n",
312 __func__, index, dev_read_name(dev));
313 continue;
314 }
315
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200316 /* This is clk provider device trying to program itself
317 * It cannot be done right now but need to wait after the
318 * device is probed
319 */
320 if (stage == 0 && clk.dev == dev)
321 continue;
322
323 if (stage > 0 && clk.dev != dev)
324 /* do not setup twice the parent clocks */
325 continue;
326
Claudiu Bezneab3641342020-09-07 17:46:36 +0300327 c = clk_set_default_get_by_id(&clk);
328 if (IS_ERR(c))
329 return PTR_ERR(c);
330
331 ret = clk_set_rate(c, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200332
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100333 if (ret < 0) {
Simon Glass68316162019-01-21 14:53:19 -0700334 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
335 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100336 break;
337 }
338 }
339
340fail:
341 free(rates);
342 return ret;
343}
344
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200345int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100346{
347 int ret;
348
Simon Glass7d14ee42020-12-19 10:40:13 -0700349 if (!dev_has_ofnode(dev))
Peng Fan91944ef2019-07-31 07:01:49 +0000350 return 0;
351
Philipp Tomsich291da962018-11-26 20:20:19 +0100352 /* If this not in SPL and pre-reloc state, don't take any action. */
353 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
354 return 0;
355
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100356 debug("%s(%s)\n", __func__, dev_read_name(dev));
357
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200358 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100359 if (ret)
360 return ret;
361
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200362 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100363 if (ret < 0)
364 return ret;
365
366 return 0;
367}
Stephen Warren135aa952016-06-17 09:44:00 -0600368
369int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
370{
371 int index;
372
373 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200374 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600375
Simon Glassaa9bb092017-05-30 21:47:29 -0600376 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600377 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600378 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600379 return index;
380 }
381
382 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700383}
Giulio Benettiefbdad32019-12-12 23:53:19 +0100384# endif /* OF_PLATDATA */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200385
Chunfeng Yund6464202020-01-09 11:35:07 +0800386int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
387{
388 int index;
389
390 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
391 ofnode_get_name(node), name, clk);
392 clk->dev = NULL;
393
394 index = ofnode_stringlist_search(node, "clock-names", name);
395 if (index < 0) {
396 debug("fdt_stringlist_search() failed: %d\n", index);
397 return index;
398 }
399
400 return clk_get_by_index_nodev(node, index, clk);
401}
402
403int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
404{
405 int ret;
406
407 ret = clk_get_by_name_nodev(node, name, clk);
408 if (ret == -ENODATA)
409 return 0;
410
411 return ret;
412}
413
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200414int clk_release_all(struct clk *clk, int count)
415{
416 int i, ret;
417
418 for (i = 0; i < count; i++) {
419 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
420
421 /* check if clock has been previously requested */
422 if (!clk[i].dev)
423 continue;
424
425 ret = clk_disable(&clk[i]);
426 if (ret && ret != -ENOSYS)
427 return ret;
428
429 ret = clk_free(&clk[i]);
430 if (ret && ret != -ENOSYS)
431 return ret;
432 }
433
434 return 0;
435}
436
Simon Glass7423daa2016-07-04 11:58:03 -0600437#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600438
439int clk_request(struct udevice *dev, struct clk *clk)
440{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200441 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600442
443 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200444 if (!clk)
445 return 0;
446 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600447
448 clk->dev = dev;
449
450 if (!ops->request)
451 return 0;
452
453 return ops->request(clk);
454}
455
456int clk_free(struct clk *clk)
457{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200458 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600459
460 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800461 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200462 return 0;
463 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600464
Simon Glassfb8c0d52020-02-03 07:35:54 -0700465 if (!ops->rfree)
Stephen Warren135aa952016-06-17 09:44:00 -0600466 return 0;
467
Simon Glassfb8c0d52020-02-03 07:35:54 -0700468 return ops->rfree(clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600469}
470
471ulong clk_get_rate(struct clk *clk)
472{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200473 const struct clk_ops *ops;
Simon Glass5c5992c2021-01-21 13:57:11 -0700474 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600475
476 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800477 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200478 return 0;
479 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600480
481 if (!ops->get_rate)
482 return -ENOSYS;
483
Simon Glass5c5992c2021-01-21 13:57:11 -0700484 ret = ops->get_rate(clk);
485 if (ret)
486 return log_ret(ret);
487
488 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600489}
490
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200491struct clk *clk_get_parent(struct clk *clk)
492{
493 struct udevice *pdev;
494 struct clk *pclk;
495
496 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800497 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200498 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200499
500 pdev = dev_get_parent(clk->dev);
501 pclk = dev_get_clk_ptr(pdev);
502 if (!pclk)
503 return ERR_PTR(-ENODEV);
504
505 return pclk;
506}
507
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200508long long clk_get_parent_rate(struct clk *clk)
509{
510 const struct clk_ops *ops;
511 struct clk *pclk;
512
513 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800514 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200515 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200516
517 pclk = clk_get_parent(clk);
518 if (IS_ERR(pclk))
519 return -ENODEV;
520
521 ops = clk_dev_ops(pclk->dev);
522 if (!ops->get_rate)
523 return -ENOSYS;
524
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200525 /* Read the 'rate' if not already set or if proper flag set*/
526 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200527 pclk->rate = clk_get_rate(pclk);
528
529 return pclk->rate;
530}
531
Dario Binacchi2983ad52020-12-30 00:06:31 +0100532ulong clk_round_rate(struct clk *clk, ulong rate)
533{
534 const struct clk_ops *ops;
535
536 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
537 if (!clk_valid(clk))
538 return 0;
539
540 ops = clk_dev_ops(clk->dev);
541 if (!ops->round_rate)
542 return -ENOSYS;
543
544 return ops->round_rate(clk, rate);
545}
546
Stephen Warren135aa952016-06-17 09:44:00 -0600547ulong clk_set_rate(struct clk *clk, ulong rate)
548{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200549 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600550
551 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800552 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200553 return 0;
554 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600555
556 if (!ops->set_rate)
557 return -ENOSYS;
558
559 return ops->set_rate(clk, rate);
560}
561
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100562int clk_set_parent(struct clk *clk, struct clk *parent)
563{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200564 const struct clk_ops *ops;
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300565 int ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100566
567 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800568 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200569 return 0;
570 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100571
572 if (!ops->set_parent)
573 return -ENOSYS;
574
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300575 ret = ops->set_parent(clk, parent);
576 if (ret)
577 return ret;
578
579 if (CONFIG_IS_ENABLED(CLK_CCF))
580 ret = device_reparent(clk->dev, parent->dev);
581
582 return ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100583}
584
Stephen Warren135aa952016-06-17 09:44:00 -0600585int clk_enable(struct clk *clk)
586{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200587 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000588 struct clk *clkp = NULL;
589 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600590
591 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800592 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200593 return 0;
594 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600595
Peng Fan0520be02019-08-21 13:35:09 +0000596 if (CONFIG_IS_ENABLED(CLK_CCF)) {
597 /* Take id 0 as a non-valid clk, such as dummy */
598 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
599 if (clkp->enable_count) {
600 clkp->enable_count++;
601 return 0;
602 }
603 if (clkp->dev->parent &&
604 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
605 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
606 if (ret) {
607 printf("Enable %s failed\n",
608 clkp->dev->parent->name);
609 return ret;
610 }
611 }
612 }
Stephen Warren135aa952016-06-17 09:44:00 -0600613
Peng Fan0520be02019-08-21 13:35:09 +0000614 if (ops->enable) {
615 ret = ops->enable(clk);
616 if (ret) {
617 printf("Enable %s failed\n", clk->dev->name);
618 return ret;
619 }
620 }
621 if (clkp)
622 clkp->enable_count++;
623 } else {
624 if (!ops->enable)
625 return -ENOSYS;
626 return ops->enable(clk);
627 }
628
629 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600630}
631
Neil Armstronga855be82018-04-03 11:44:18 +0200632int clk_enable_bulk(struct clk_bulk *bulk)
633{
634 int i, ret;
635
636 for (i = 0; i < bulk->count; i++) {
637 ret = clk_enable(&bulk->clks[i]);
638 if (ret < 0 && ret != -ENOSYS)
639 return ret;
640 }
641
642 return 0;
643}
644
Stephen Warren135aa952016-06-17 09:44:00 -0600645int clk_disable(struct clk *clk)
646{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200647 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000648 struct clk *clkp = NULL;
649 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600650
651 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800652 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200653 return 0;
654 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600655
Peng Fan0520be02019-08-21 13:35:09 +0000656 if (CONFIG_IS_ENABLED(CLK_CCF)) {
657 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Claudiu Beznea9a5d59d2020-09-07 17:46:35 +0300658 if (clkp->flags & CLK_IS_CRITICAL)
659 return 0;
660
Peng Fan0520be02019-08-21 13:35:09 +0000661 if (clkp->enable_count == 0) {
662 printf("clk %s already disabled\n",
663 clkp->dev->name);
664 return 0;
665 }
Stephen Warren135aa952016-06-17 09:44:00 -0600666
Peng Fan0520be02019-08-21 13:35:09 +0000667 if (--clkp->enable_count > 0)
668 return 0;
669 }
670
671 if (ops->disable) {
672 ret = ops->disable(clk);
673 if (ret)
674 return ret;
675 }
676
677 if (clkp && clkp->dev->parent &&
678 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
679 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
680 if (ret) {
681 printf("Disable %s failed\n",
682 clkp->dev->parent->name);
683 return ret;
684 }
685 }
686 } else {
687 if (!ops->disable)
688 return -ENOSYS;
689
690 return ops->disable(clk);
691 }
692
693 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600694}
Simon Glasse70cc432016-01-20 19:43:02 -0700695
Neil Armstronga855be82018-04-03 11:44:18 +0200696int clk_disable_bulk(struct clk_bulk *bulk)
697{
698 int i, ret;
699
700 for (i = 0; i < bulk->count; i++) {
701 ret = clk_disable(&bulk->clks[i]);
702 if (ret < 0 && ret != -ENOSYS)
703 return ret;
704 }
705
706 return 0;
707}
708
Lukasz Majewski2796af72019-06-24 15:50:44 +0200709int clk_get_by_id(ulong id, struct clk **clkp)
710{
711 struct udevice *dev;
712 struct uclass *uc;
713 int ret;
714
715 ret = uclass_get(UCLASS_CLK, &uc);
716 if (ret)
717 return ret;
718
719 uclass_foreach_dev(dev, uc) {
720 struct clk *clk = dev_get_clk_ptr(dev);
721
722 if (clk && clk->id == id) {
723 *clkp = clk;
724 return 0;
725 }
726 }
727
728 return -ENOENT;
729}
730
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530731bool clk_is_match(const struct clk *p, const struct clk *q)
732{
733 /* trivial case: identical struct clk's or both NULL */
734 if (p == q)
735 return true;
736
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200737 /* trivial case #2: on the clk pointer is NULL */
738 if (!p || !q)
739 return false;
740
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530741 /* same device, id and data */
742 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
743 return true;
744
745 return false;
746}
747
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200748static void devm_clk_release(struct udevice *dev, void *res)
749{
750 clk_free(res);
751}
752
753static int devm_clk_match(struct udevice *dev, void *res, void *data)
754{
755 return res == data;
756}
757
758struct clk *devm_clk_get(struct udevice *dev, const char *id)
759{
760 int rc;
761 struct clk *clk;
762
763 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
764 if (unlikely(!clk))
765 return ERR_PTR(-ENOMEM);
766
767 rc = clk_get_by_name(dev, id, clk);
768 if (rc)
769 return ERR_PTR(rc);
770
771 devres_add(dev, clk);
772 return clk;
773}
774
775struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
776{
777 struct clk *clk = devm_clk_get(dev, id);
778
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800779 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200780 return NULL;
781
782 return clk;
783}
784
785void devm_clk_put(struct udevice *dev, struct clk *clk)
786{
787 int rc;
788
789 if (!clk)
790 return;
791
792 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
793 WARN_ON(rc);
794}
795
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200796int clk_uclass_post_probe(struct udevice *dev)
797{
798 /*
799 * when a clock provider is probed. Call clk_set_defaults()
800 * also after the device is probed. This takes care of cases
801 * where the DT is used to setup default parents and rates
802 * using assigned-clocks
803 */
804 clk_set_defaults(dev, 1);
805
806 return 0;
807}
808
Simon Glassf26c8a82015-06-23 15:39:15 -0600809UCLASS_DRIVER(clk) = {
810 .id = UCLASS_CLK,
811 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200812 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600813};