blob: 14254212ca7286235d3779c2a675814dd6093705 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <malloc.h>
Sean Anderson8c12cb32021-04-08 22:13:03 -040017#include <dm/device_compat.h>
Claudiu Beznea4d139f32020-09-07 17:46:34 +030018#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070019#include <dm/devres.h>
20#include <dm/read.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060021#include <linux/bug.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020022#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070023#include <linux/err.h>
Simon Glass401d1c42020-10-30 21:38:53 -060024#include <asm/global_data.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060025
Mario Six268453b2018-01-15 11:06:51 +010026static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060027{
Mario Six268453b2018-01-15 11:06:51 +010028 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060029}
30
Simon Glassfb989e02020-07-19 10:15:56 -060031struct clk *dev_get_clk_ptr(struct udevice *dev)
32{
33 return (struct clk *)dev_get_uclass_priv(dev);
34}
35
Simon Glasse70cc432016-01-20 19:43:02 -070036#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060037# if CONFIG_IS_ENABLED(OF_PLATDATA)
Walter Lozano51f12632020-06-25 01:10:13 -030038int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
39 struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060040{
41 int ret;
42
Simon Glasscc469b72021-03-15 17:25:28 +130043 ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
Simon Glass7423daa2016-07-04 11:58:03 -060044 if (ret)
45 return ret;
Walter Lozano51f12632020-06-25 01:10:13 -030046 clk->id = cells->arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060047
48 return 0;
49}
50# else
Stephen Warren135aa952016-06-17 09:44:00 -060051static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060052 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060053{
54 debug("%s(clk=%p)\n", __func__, clk);
55
56 if (args->args_count > 1) {
57 debug("Invaild args_count: %d\n", args->args_count);
58 return -EINVAL;
59 }
60
61 if (args->args_count)
62 clk->id = args->args[0];
63 else
64 clk->id = 0;
65
Sekhar Norie497fab2019-07-11 14:30:24 +053066 clk->data = 0;
67
Stephen Warren135aa952016-06-17 09:44:00 -060068 return 0;
69}
70
Jagan Teki75f98312019-02-28 00:26:52 +053071static int clk_get_by_index_tail(int ret, ofnode node,
72 struct ofnode_phandle_args *args,
73 const char *list_name, int index,
74 struct clk *clk)
75{
76 struct udevice *dev_clk;
77 const struct clk_ops *ops;
78
79 assert(clk);
80 clk->dev = NULL;
81 if (ret)
82 goto err;
83
84 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
85 if (ret) {
86 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
87 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -070088 return log_msg_ret("get", ret);
Jagan Teki75f98312019-02-28 00:26:52 +053089 }
90
91 clk->dev = dev_clk;
92
93 ops = clk_dev_ops(dev_clk);
94
95 if (ops->of_xlate)
96 ret = ops->of_xlate(clk, args);
97 else
98 ret = clk_of_xlate_default(clk, args);
99 if (ret) {
100 debug("of_xlate() failed: %d\n", ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700101 return log_msg_ret("xlate", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530102 }
103
104 return clk_request(dev_clk, clk);
105err:
106 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
107 __func__, ofnode_get_name(node), list_name, index, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700108
109 return log_msg_ret("prop", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530110}
111
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100112static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
113 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600114{
115 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600116 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600117
118 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
119
120 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200121 clk->dev = NULL;
122
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100123 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100124 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700125 if (ret) {
126 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
127 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700128 return log_ret(ret);
Simon Glasse70cc432016-01-20 19:43:02 -0700129 }
130
Wenyou Yang3f56b132016-09-27 11:00:28 +0800131
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530132 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400133 index, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600134}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100135
136int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
137{
Jagan Teki75f98312019-02-28 00:26:52 +0530138 struct ofnode_phandle_args args;
139 int ret;
140
141 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
142 index, &args);
143
144 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400145 index, clk);
Jagan Teki75f98312019-02-28 00:26:52 +0530146}
147
148int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
149{
150 struct ofnode_phandle_args args;
151 int ret;
152
153 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Anderson675d7902020-06-24 06:41:08 -0400154 index, &args);
Jagan Teki75f98312019-02-28 00:26:52 +0530155
156 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400157 index, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100158}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100159
Neil Armstronga855be82018-04-03 11:44:18 +0200160int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
161{
162 int i, ret, err, count;
Patrick Delaunayc2625222021-04-27 10:57:54 +0200163
Neil Armstronga855be82018-04-03 11:44:18 +0200164 bulk->count = 0;
165
Patrick Delaunay89f68302020-09-25 09:41:14 +0200166 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
Neil Armstrong721881c2018-04-17 11:30:31 +0200167 if (count < 1)
168 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200169
170 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
171 if (!bulk->clks)
172 return -ENOMEM;
173
174 for (i = 0; i < count; i++) {
175 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
176 if (ret < 0)
177 goto bulk_get_err;
178
179 ++bulk->count;
180 }
181
182 return 0;
183
184bulk_get_err:
185 err = clk_release_all(bulk->clks, bulk->count);
186 if (err)
187 debug("%s: could release all clocks for %p\n",
188 __func__, dev);
189
190 return ret;
191}
192
Claudiu Bezneab3641342020-09-07 17:46:36 +0300193static struct clk *clk_set_default_get_by_id(struct clk *clk)
194{
195 struct clk *c = clk;
196
197 if (CONFIG_IS_ENABLED(CLK_CCF)) {
198 int ret = clk_get_by_id(clk->id, &c);
199
200 if (ret) {
201 debug("%s(): could not get parent clock pointer, id %lu\n",
202 __func__, clk->id);
203 ERR_PTR(ret);
204 }
205 }
206
207 return c;
208}
209
Sean Anderson6e33eba2021-06-11 00:16:07 -0400210static int clk_set_default_parents(struct udevice *dev,
211 enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100212{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300213 struct clk clk, parent_clk, *c, *p;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100214 int index;
215 int num_parents;
216 int ret;
217
218 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
Patrick Delaunay89f68302020-09-25 09:41:14 +0200219 "#clock-cells", 0);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100220 if (num_parents < 0) {
221 debug("%s: could not read assigned-clock-parents for %p\n",
222 __func__, dev);
223 return 0;
224 }
225
226 for (index = 0; index < num_parents; index++) {
227 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
228 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200229 /* If -ENOENT, this is a no-op entry */
230 if (ret == -ENOENT)
231 continue;
232
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100233 if (ret) {
234 debug("%s: could not get parent clock %d for %s\n",
235 __func__, index, dev_read_name(dev));
236 return ret;
237 }
238
Claudiu Bezneab3641342020-09-07 17:46:36 +0300239 p = clk_set_default_get_by_id(&parent_clk);
240 if (IS_ERR(p))
241 return PTR_ERR(p);
242
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100243 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
244 index, &clk);
Tero Kristo1e1fab02021-06-11 11:45:11 +0300245 /*
246 * If the clock provider is not ready yet, let it handle
247 * the re-programming later.
248 */
249 if (ret == -EPROBE_DEFER) {
250 ret = 0;
251 continue;
252 }
253
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100254 if (ret) {
255 debug("%s: could not get assigned clock %d for %s\n",
256 __func__, index, dev_read_name(dev));
257 return ret;
258 }
259
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200260 /* This is clk provider device trying to reparent itself
261 * It cannot be done right now but need to wait after the
262 * device is probed
263 */
Sean Anderson6e33eba2021-06-11 00:16:07 -0400264 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200265 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100266
Sean Anderson6e33eba2021-06-11 00:16:07 -0400267 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200268 /* do not setup twice the parent clocks */
269 continue;
270
Claudiu Bezneab3641342020-09-07 17:46:36 +0300271 c = clk_set_default_get_by_id(&clk);
272 if (IS_ERR(c))
273 return PTR_ERR(c);
274
275 ret = clk_set_parent(c, p);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100276 /*
277 * Not all drivers may support clock-reparenting (as of now).
278 * Ignore errors due to this.
279 */
280 if (ret == -ENOSYS)
281 continue;
282
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200283 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100284 debug("%s: failed to reparent clock %d for %s\n",
285 __func__, index, dev_read_name(dev));
286 return ret;
287 }
288 }
289
290 return 0;
291}
292
Sean Anderson6e33eba2021-06-11 00:16:07 -0400293static int clk_set_default_rates(struct udevice *dev,
294 enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100295{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300296 struct clk clk, *c;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100297 int index;
298 int num_rates;
299 int size;
300 int ret = 0;
301 u32 *rates = NULL;
302
303 size = dev_read_size(dev, "assigned-clock-rates");
304 if (size < 0)
305 return 0;
306
307 num_rates = size / sizeof(u32);
308 rates = calloc(num_rates, sizeof(u32));
309 if (!rates)
310 return -ENOMEM;
311
312 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
313 if (ret)
314 goto fail;
315
316 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200317 /* If 0 is passed, this is a no-op */
318 if (!rates[index])
319 continue;
320
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100321 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
322 index, &clk);
Tero Kristo1e1fab02021-06-11 11:45:11 +0300323 /*
324 * If the clock provider is not ready yet, let it handle
325 * the re-programming later.
326 */
327 if (ret == -EPROBE_DEFER) {
328 ret = 0;
329 continue;
330 }
331
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100332 if (ret) {
Sean Anderson8c12cb32021-04-08 22:13:03 -0400333 dev_dbg(dev,
334 "could not get assigned clock %d (err = %d)\n",
335 index, ret);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100336 continue;
337 }
338
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200339 /* This is clk provider device trying to program itself
340 * It cannot be done right now but need to wait after the
341 * device is probed
342 */
Sean Anderson6e33eba2021-06-11 00:16:07 -0400343 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200344 continue;
345
Sean Anderson6e33eba2021-06-11 00:16:07 -0400346 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200347 /* do not setup twice the parent clocks */
348 continue;
349
Claudiu Bezneab3641342020-09-07 17:46:36 +0300350 c = clk_set_default_get_by_id(&clk);
351 if (IS_ERR(c))
352 return PTR_ERR(c);
353
354 ret = clk_set_rate(c, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200355
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100356 if (ret < 0) {
Sean Anderson8c12cb32021-04-08 22:13:03 -0400357 dev_warn(dev,
358 "failed to set rate on clock index %d (%ld) (error = %d)\n",
359 index, clk.id, ret);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100360 break;
361 }
362 }
363
364fail:
365 free(rates);
366 return ret;
367}
368
Sean Anderson6e33eba2021-06-11 00:16:07 -0400369int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100370{
371 int ret;
372
Simon Glass7d14ee42020-12-19 10:40:13 -0700373 if (!dev_has_ofnode(dev))
Peng Fan91944ef2019-07-31 07:01:49 +0000374 return 0;
375
Sean Anderson6e33eba2021-06-11 00:16:07 -0400376 /*
377 * To avoid setting defaults twice, don't set them before relocation.
378 * However, still set them for SPL. And still set them if explicitly
379 * asked.
380 */
Philipp Tomsich291da962018-11-26 20:20:19 +0100381 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
Sean Anderson6e33eba2021-06-11 00:16:07 -0400382 if (stage != CLK_DEFAULTS_POST_FORCE)
383 return 0;
Philipp Tomsich291da962018-11-26 20:20:19 +0100384
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100385 debug("%s(%s)\n", __func__, dev_read_name(dev));
386
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200387 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100388 if (ret)
389 return ret;
390
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200391 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100392 if (ret < 0)
393 return ret;
394
395 return 0;
396}
Stephen Warren135aa952016-06-17 09:44:00 -0600397
398int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
399{
400 int index;
401
402 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200403 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600404
Simon Glassaa9bb092017-05-30 21:47:29 -0600405 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600406 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600407 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600408 return index;
409 }
410
411 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700412}
Giulio Benettiefbdad32019-12-12 23:53:19 +0100413# endif /* OF_PLATDATA */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200414
Chunfeng Yund6464202020-01-09 11:35:07 +0800415int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
416{
417 int index;
418
419 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
420 ofnode_get_name(node), name, clk);
421 clk->dev = NULL;
422
423 index = ofnode_stringlist_search(node, "clock-names", name);
424 if (index < 0) {
425 debug("fdt_stringlist_search() failed: %d\n", index);
426 return index;
427 }
428
429 return clk_get_by_index_nodev(node, index, clk);
430}
431
432int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
433{
434 int ret;
435
436 ret = clk_get_by_name_nodev(node, name, clk);
437 if (ret == -ENODATA)
438 return 0;
439
440 return ret;
441}
442
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200443int clk_release_all(struct clk *clk, int count)
444{
445 int i, ret;
446
447 for (i = 0; i < count; i++) {
448 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
449
450 /* check if clock has been previously requested */
451 if (!clk[i].dev)
452 continue;
453
454 ret = clk_disable(&clk[i]);
455 if (ret && ret != -ENOSYS)
456 return ret;
457
458 ret = clk_free(&clk[i]);
459 if (ret && ret != -ENOSYS)
460 return ret;
461 }
462
463 return 0;
464}
465
Simon Glass7423daa2016-07-04 11:58:03 -0600466#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600467
468int clk_request(struct udevice *dev, struct clk *clk)
469{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200470 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600471
472 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200473 if (!clk)
474 return 0;
475 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600476
477 clk->dev = dev;
478
479 if (!ops->request)
480 return 0;
481
482 return ops->request(clk);
483}
484
485int clk_free(struct clk *clk)
486{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200487 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600488
489 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800490 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200491 return 0;
492 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600493
Simon Glassfb8c0d52020-02-03 07:35:54 -0700494 if (!ops->rfree)
Stephen Warren135aa952016-06-17 09:44:00 -0600495 return 0;
496
Simon Glassfb8c0d52020-02-03 07:35:54 -0700497 return ops->rfree(clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600498}
499
500ulong clk_get_rate(struct clk *clk)
501{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200502 const struct clk_ops *ops;
Simon Glass5c5992c2021-01-21 13:57:11 -0700503 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600504
505 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800506 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200507 return 0;
508 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600509
510 if (!ops->get_rate)
511 return -ENOSYS;
512
Simon Glass5c5992c2021-01-21 13:57:11 -0700513 ret = ops->get_rate(clk);
514 if (ret)
515 return log_ret(ret);
516
517 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600518}
519
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200520struct clk *clk_get_parent(struct clk *clk)
521{
522 struct udevice *pdev;
523 struct clk *pclk;
524
525 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800526 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200527 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200528
529 pdev = dev_get_parent(clk->dev);
Tero Kristo920ea5a2021-06-11 11:45:08 +0300530 if (!pdev)
531 return ERR_PTR(-ENODEV);
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200532 pclk = dev_get_clk_ptr(pdev);
533 if (!pclk)
534 return ERR_PTR(-ENODEV);
535
536 return pclk;
537}
538
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200539long long clk_get_parent_rate(struct clk *clk)
540{
541 const struct clk_ops *ops;
542 struct clk *pclk;
543
544 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800545 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200546 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200547
548 pclk = clk_get_parent(clk);
549 if (IS_ERR(pclk))
550 return -ENODEV;
551
552 ops = clk_dev_ops(pclk->dev);
553 if (!ops->get_rate)
554 return -ENOSYS;
555
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200556 /* Read the 'rate' if not already set or if proper flag set*/
557 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200558 pclk->rate = clk_get_rate(pclk);
559
560 return pclk->rate;
561}
562
Dario Binacchi2983ad52020-12-30 00:06:31 +0100563ulong clk_round_rate(struct clk *clk, ulong rate)
564{
565 const struct clk_ops *ops;
566
567 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
568 if (!clk_valid(clk))
569 return 0;
570
571 ops = clk_dev_ops(clk->dev);
572 if (!ops->round_rate)
573 return -ENOSYS;
574
575 return ops->round_rate(clk, rate);
576}
577
Tero Kristo6b7fd312021-06-11 11:45:12 +0300578static void clk_clean_rate_cache(struct clk *clk)
579{
580 struct udevice *child_dev;
581 struct clk *clkp;
582
583 if (!clk)
584 return;
585
586 clk->rate = 0;
587
588 list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
589 clkp = dev_get_clk_ptr(child_dev);
590 clk_clean_rate_cache(clkp);
591 }
592}
593
Stephen Warren135aa952016-06-17 09:44:00 -0600594ulong clk_set_rate(struct clk *clk, ulong rate)
595{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200596 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600597
598 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800599 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200600 return 0;
601 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600602
603 if (!ops->set_rate)
604 return -ENOSYS;
605
Tero Kristo6b7fd312021-06-11 11:45:12 +0300606 /* Clean up cached rates for us and all child clocks */
607 clk_clean_rate_cache(clk);
608
Stephen Warren135aa952016-06-17 09:44:00 -0600609 return ops->set_rate(clk, rate);
610}
611
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100612int clk_set_parent(struct clk *clk, struct clk *parent)
613{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200614 const struct clk_ops *ops;
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300615 int ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100616
617 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800618 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200619 return 0;
620 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100621
622 if (!ops->set_parent)
623 return -ENOSYS;
624
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300625 ret = ops->set_parent(clk, parent);
626 if (ret)
627 return ret;
628
629 if (CONFIG_IS_ENABLED(CLK_CCF))
630 ret = device_reparent(clk->dev, parent->dev);
631
632 return ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100633}
634
Stephen Warren135aa952016-06-17 09:44:00 -0600635int clk_enable(struct clk *clk)
636{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200637 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000638 struct clk *clkp = NULL;
639 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600640
641 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800642 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200643 return 0;
644 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600645
Peng Fan0520be02019-08-21 13:35:09 +0000646 if (CONFIG_IS_ENABLED(CLK_CCF)) {
647 /* Take id 0 as a non-valid clk, such as dummy */
648 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
649 if (clkp->enable_count) {
650 clkp->enable_count++;
651 return 0;
652 }
653 if (clkp->dev->parent &&
654 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
655 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
656 if (ret) {
657 printf("Enable %s failed\n",
658 clkp->dev->parent->name);
659 return ret;
660 }
661 }
662 }
Stephen Warren135aa952016-06-17 09:44:00 -0600663
Peng Fan0520be02019-08-21 13:35:09 +0000664 if (ops->enable) {
665 ret = ops->enable(clk);
666 if (ret) {
667 printf("Enable %s failed\n", clk->dev->name);
668 return ret;
669 }
670 }
671 if (clkp)
672 clkp->enable_count++;
673 } else {
674 if (!ops->enable)
675 return -ENOSYS;
676 return ops->enable(clk);
677 }
678
679 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600680}
681
Neil Armstronga855be82018-04-03 11:44:18 +0200682int clk_enable_bulk(struct clk_bulk *bulk)
683{
684 int i, ret;
685
686 for (i = 0; i < bulk->count; i++) {
687 ret = clk_enable(&bulk->clks[i]);
688 if (ret < 0 && ret != -ENOSYS)
689 return ret;
690 }
691
692 return 0;
693}
694
Stephen Warren135aa952016-06-17 09:44:00 -0600695int clk_disable(struct clk *clk)
696{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200697 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000698 struct clk *clkp = NULL;
699 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600700
701 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800702 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200703 return 0;
704 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600705
Peng Fan0520be02019-08-21 13:35:09 +0000706 if (CONFIG_IS_ENABLED(CLK_CCF)) {
707 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Claudiu Beznea9a5d59d2020-09-07 17:46:35 +0300708 if (clkp->flags & CLK_IS_CRITICAL)
709 return 0;
710
Peng Fan0520be02019-08-21 13:35:09 +0000711 if (clkp->enable_count == 0) {
712 printf("clk %s already disabled\n",
713 clkp->dev->name);
714 return 0;
715 }
Stephen Warren135aa952016-06-17 09:44:00 -0600716
Peng Fan0520be02019-08-21 13:35:09 +0000717 if (--clkp->enable_count > 0)
718 return 0;
719 }
720
721 if (ops->disable) {
722 ret = ops->disable(clk);
723 if (ret)
724 return ret;
725 }
726
727 if (clkp && clkp->dev->parent &&
728 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
729 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
730 if (ret) {
731 printf("Disable %s failed\n",
732 clkp->dev->parent->name);
733 return ret;
734 }
735 }
736 } else {
737 if (!ops->disable)
738 return -ENOSYS;
739
740 return ops->disable(clk);
741 }
742
743 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600744}
Simon Glasse70cc432016-01-20 19:43:02 -0700745
Neil Armstronga855be82018-04-03 11:44:18 +0200746int clk_disable_bulk(struct clk_bulk *bulk)
747{
748 int i, ret;
749
750 for (i = 0; i < bulk->count; i++) {
751 ret = clk_disable(&bulk->clks[i]);
752 if (ret < 0 && ret != -ENOSYS)
753 return ret;
754 }
755
756 return 0;
757}
758
Lukasz Majewski2796af72019-06-24 15:50:44 +0200759int clk_get_by_id(ulong id, struct clk **clkp)
760{
761 struct udevice *dev;
762 struct uclass *uc;
763 int ret;
764
765 ret = uclass_get(UCLASS_CLK, &uc);
766 if (ret)
767 return ret;
768
769 uclass_foreach_dev(dev, uc) {
770 struct clk *clk = dev_get_clk_ptr(dev);
771
772 if (clk && clk->id == id) {
773 *clkp = clk;
774 return 0;
775 }
776 }
777
778 return -ENOENT;
779}
780
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530781bool clk_is_match(const struct clk *p, const struct clk *q)
782{
783 /* trivial case: identical struct clk's or both NULL */
784 if (p == q)
785 return true;
786
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200787 /* trivial case #2: on the clk pointer is NULL */
788 if (!p || !q)
789 return false;
790
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530791 /* same device, id and data */
792 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
793 return true;
794
795 return false;
796}
797
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200798static void devm_clk_release(struct udevice *dev, void *res)
799{
800 clk_free(res);
801}
802
803static int devm_clk_match(struct udevice *dev, void *res, void *data)
804{
805 return res == data;
806}
807
808struct clk *devm_clk_get(struct udevice *dev, const char *id)
809{
810 int rc;
811 struct clk *clk;
812
813 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
814 if (unlikely(!clk))
815 return ERR_PTR(-ENOMEM);
816
817 rc = clk_get_by_name(dev, id, clk);
818 if (rc)
819 return ERR_PTR(rc);
820
821 devres_add(dev, clk);
822 return clk;
823}
824
825struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
826{
827 struct clk *clk = devm_clk_get(dev, id);
828
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800829 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200830 return NULL;
831
832 return clk;
833}
834
835void devm_clk_put(struct udevice *dev, struct clk *clk)
836{
837 int rc;
838
839 if (!clk)
840 return;
841
842 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
843 WARN_ON(rc);
844}
845
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200846int clk_uclass_post_probe(struct udevice *dev)
847{
848 /*
849 * when a clock provider is probed. Call clk_set_defaults()
850 * also after the device is probed. This takes care of cases
851 * where the DT is used to setup default parents and rates
852 * using assigned-clocks
853 */
Sean Anderson6e33eba2021-06-11 00:16:07 -0400854 clk_set_defaults(dev, CLK_DEFAULTS_POST);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200855
856 return 0;
857}
858
Simon Glassf26c8a82015-06-23 15:39:15 -0600859UCLASS_DRIVER(clk) = {
860 .id = UCLASS_CLK,
861 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200862 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600863};