blob: 2cc798e93684d56916ca26e3bf3bfb30fbc71950 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
Patrick Delaunayb953ec22021-04-27 11:02:19 +02009#define LOG_CATEGORY UCLASS_CLK
10
Simon Glassf26c8a82015-06-23 15:39:15 -060011#include <common.h>
12#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060013#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060015#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060016#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060017#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070018#include <malloc.h>
Patrick Delaunay572c4462021-11-19 15:12:06 +010019#include <asm/global_data.h>
Sean Anderson8c12cb32021-04-08 22:13:03 -040020#include <dm/device_compat.h>
Claudiu Beznea4d139f32020-09-07 17:46:34 +030021#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070022#include <dm/devres.h>
23#include <dm/read.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060024#include <linux/bug.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020025#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070026#include <linux/err.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060027
Mario Six268453b2018-01-15 11:06:51 +010028static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060029{
Mario Six268453b2018-01-15 11:06:51 +010030 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060031}
32
Simon Glassfb989e02020-07-19 10:15:56 -060033struct clk *dev_get_clk_ptr(struct udevice *dev)
34{
35 return (struct clk *)dev_get_uclass_priv(dev);
36}
37
Simon Glass414cc152021-08-07 07:24:03 -060038#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glassf0ab8f92021-08-07 07:24:09 -060039int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
40 struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060041{
42 int ret;
43
Simon Glasscc469b72021-03-15 17:25:28 +130044 ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
Simon Glass7423daa2016-07-04 11:58:03 -060045 if (ret)
46 return ret;
Walter Lozano51f12632020-06-25 01:10:13 -030047 clk->id = cells->arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060048
49 return 0;
50}
Simon Glass414cc152021-08-07 07:24:03 -060051#endif
52
53#if CONFIG_IS_ENABLED(OF_REAL)
Stephen Warren135aa952016-06-17 09:44:00 -060054static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060055 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060056{
57 debug("%s(clk=%p)\n", __func__, clk);
58
59 if (args->args_count > 1) {
Sean Anderson46ad7ce2021-12-01 14:26:53 -050060 debug("Invalid args_count: %d\n", args->args_count);
Stephen Warren135aa952016-06-17 09:44:00 -060061 return -EINVAL;
62 }
63
64 if (args->args_count)
65 clk->id = args->args[0];
66 else
67 clk->id = 0;
68
Sekhar Norie497fab2019-07-11 14:30:24 +053069 clk->data = 0;
70
Stephen Warren135aa952016-06-17 09:44:00 -060071 return 0;
72}
73
Jagan Teki75f98312019-02-28 00:26:52 +053074static int clk_get_by_index_tail(int ret, ofnode node,
75 struct ofnode_phandle_args *args,
76 const char *list_name, int index,
77 struct clk *clk)
78{
79 struct udevice *dev_clk;
80 const struct clk_ops *ops;
81
82 assert(clk);
83 clk->dev = NULL;
84 if (ret)
85 goto err;
86
87 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
88 if (ret) {
89 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
90 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -070091 return log_msg_ret("get", ret);
Jagan Teki75f98312019-02-28 00:26:52 +053092 }
93
94 clk->dev = dev_clk;
95
96 ops = clk_dev_ops(dev_clk);
97
98 if (ops->of_xlate)
99 ret = ops->of_xlate(clk, args);
100 else
101 ret = clk_of_xlate_default(clk, args);
102 if (ret) {
103 debug("of_xlate() failed: %d\n", ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700104 return log_msg_ret("xlate", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530105 }
106
107 return clk_request(dev_clk, clk);
108err:
109 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
110 __func__, ofnode_get_name(node), list_name, index, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700111
112 return log_msg_ret("prop", ret);
Jagan Teki75f98312019-02-28 00:26:52 +0530113}
114
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100115static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
116 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600117{
118 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600119 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600120
121 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
122
123 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200124 clk->dev = NULL;
125
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100126 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100127 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700128 if (ret) {
129 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
130 __func__, ret);
Simon Glass5c5992c2021-01-21 13:57:11 -0700131 return log_ret(ret);
Simon Glasse70cc432016-01-20 19:43:02 -0700132 }
133
Wenyou Yang3f56b132016-09-27 11:00:28 +0800134
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530135 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400136 index, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600137}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100138
139int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
140{
Jagan Teki75f98312019-02-28 00:26:52 +0530141 struct ofnode_phandle_args args;
142 int ret;
143
144 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
145 index, &args);
146
147 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400148 index, clk);
Jagan Teki75f98312019-02-28 00:26:52 +0530149}
150
151int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
152{
153 struct ofnode_phandle_args args;
154 int ret;
155
156 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Anderson675d7902020-06-24 06:41:08 -0400157 index, &args);
Jagan Teki75f98312019-02-28 00:26:52 +0530158
159 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400160 index, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100161}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100162
Neil Armstronga855be82018-04-03 11:44:18 +0200163int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
164{
165 int i, ret, err, count;
Patrick Delaunayc2625222021-04-27 10:57:54 +0200166
Neil Armstronga855be82018-04-03 11:44:18 +0200167 bulk->count = 0;
168
Patrick Delaunay89f68302020-09-25 09:41:14 +0200169 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
Neil Armstrong721881c2018-04-17 11:30:31 +0200170 if (count < 1)
171 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200172
173 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
174 if (!bulk->clks)
175 return -ENOMEM;
176
177 for (i = 0; i < count; i++) {
178 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
179 if (ret < 0)
180 goto bulk_get_err;
181
182 ++bulk->count;
183 }
184
185 return 0;
186
187bulk_get_err:
188 err = clk_release_all(bulk->clks, bulk->count);
189 if (err)
190 debug("%s: could release all clocks for %p\n",
191 __func__, dev);
192
193 return ret;
194}
195
Claudiu Bezneab3641342020-09-07 17:46:36 +0300196static struct clk *clk_set_default_get_by_id(struct clk *clk)
197{
198 struct clk *c = clk;
199
200 if (CONFIG_IS_ENABLED(CLK_CCF)) {
201 int ret = clk_get_by_id(clk->id, &c);
202
203 if (ret) {
204 debug("%s(): could not get parent clock pointer, id %lu\n",
205 __func__, clk->id);
206 ERR_PTR(ret);
207 }
208 }
209
210 return c;
211}
212
Sean Anderson6e33eba2021-06-11 00:16:07 -0400213static int clk_set_default_parents(struct udevice *dev,
214 enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100215{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300216 struct clk clk, parent_clk, *c, *p;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100217 int index;
218 int num_parents;
219 int ret;
220
221 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
Patrick Delaunay89f68302020-09-25 09:41:14 +0200222 "#clock-cells", 0);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100223 if (num_parents < 0) {
224 debug("%s: could not read assigned-clock-parents for %p\n",
225 __func__, dev);
226 return 0;
227 }
228
229 for (index = 0; index < num_parents; index++) {
230 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
231 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200232 /* If -ENOENT, this is a no-op entry */
233 if (ret == -ENOENT)
234 continue;
235
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100236 if (ret) {
237 debug("%s: could not get parent clock %d for %s\n",
238 __func__, index, dev_read_name(dev));
239 return ret;
240 }
241
Claudiu Bezneab3641342020-09-07 17:46:36 +0300242 p = clk_set_default_get_by_id(&parent_clk);
243 if (IS_ERR(p))
244 return PTR_ERR(p);
245
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100246 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
247 index, &clk);
Tero Kristo1e1fab02021-06-11 11:45:11 +0300248 /*
249 * If the clock provider is not ready yet, let it handle
250 * the re-programming later.
251 */
252 if (ret == -EPROBE_DEFER) {
253 ret = 0;
254 continue;
255 }
256
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100257 if (ret) {
258 debug("%s: could not get assigned clock %d for %s\n",
259 __func__, index, dev_read_name(dev));
260 return ret;
261 }
262
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200263 /* This is clk provider device trying to reparent itself
264 * It cannot be done right now but need to wait after the
265 * device is probed
266 */
Sean Anderson6e33eba2021-06-11 00:16:07 -0400267 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200268 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100269
Sean Anderson6e33eba2021-06-11 00:16:07 -0400270 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200271 /* do not setup twice the parent clocks */
272 continue;
273
Claudiu Bezneab3641342020-09-07 17:46:36 +0300274 c = clk_set_default_get_by_id(&clk);
275 if (IS_ERR(c))
276 return PTR_ERR(c);
277
278 ret = clk_set_parent(c, p);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100279 /*
280 * Not all drivers may support clock-reparenting (as of now).
281 * Ignore errors due to this.
282 */
283 if (ret == -ENOSYS)
284 continue;
285
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200286 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100287 debug("%s: failed to reparent clock %d for %s\n",
288 __func__, index, dev_read_name(dev));
289 return ret;
290 }
291 }
292
293 return 0;
294}
295
Sean Anderson6e33eba2021-06-11 00:16:07 -0400296static int clk_set_default_rates(struct udevice *dev,
297 enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100298{
Claudiu Bezneab3641342020-09-07 17:46:36 +0300299 struct clk clk, *c;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100300 int index;
301 int num_rates;
302 int size;
303 int ret = 0;
304 u32 *rates = NULL;
305
306 size = dev_read_size(dev, "assigned-clock-rates");
307 if (size < 0)
308 return 0;
309
310 num_rates = size / sizeof(u32);
311 rates = calloc(num_rates, sizeof(u32));
312 if (!rates)
313 return -ENOMEM;
314
315 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
316 if (ret)
317 goto fail;
318
319 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200320 /* If 0 is passed, this is a no-op */
321 if (!rates[index])
322 continue;
323
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100324 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
325 index, &clk);
Tero Kristo1e1fab02021-06-11 11:45:11 +0300326 /*
327 * If the clock provider is not ready yet, let it handle
328 * the re-programming later.
329 */
330 if (ret == -EPROBE_DEFER) {
331 ret = 0;
332 continue;
333 }
334
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100335 if (ret) {
Sean Anderson8c12cb32021-04-08 22:13:03 -0400336 dev_dbg(dev,
337 "could not get assigned clock %d (err = %d)\n",
338 index, ret);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100339 continue;
340 }
341
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200342 /* This is clk provider device trying to program itself
343 * It cannot be done right now but need to wait after the
344 * device is probed
345 */
Sean Anderson6e33eba2021-06-11 00:16:07 -0400346 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200347 continue;
348
Sean Anderson6e33eba2021-06-11 00:16:07 -0400349 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200350 /* do not setup twice the parent clocks */
351 continue;
352
Claudiu Bezneab3641342020-09-07 17:46:36 +0300353 c = clk_set_default_get_by_id(&clk);
354 if (IS_ERR(c))
355 return PTR_ERR(c);
356
357 ret = clk_set_rate(c, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200358
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100359 if (ret < 0) {
Sean Anderson8c12cb32021-04-08 22:13:03 -0400360 dev_warn(dev,
361 "failed to set rate on clock index %d (%ld) (error = %d)\n",
362 index, clk.id, ret);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100363 break;
364 }
365 }
366
367fail:
368 free(rates);
369 return ret;
370}
371
Sean Anderson6e33eba2021-06-11 00:16:07 -0400372int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100373{
374 int ret;
375
Simon Glass7d14ee42020-12-19 10:40:13 -0700376 if (!dev_has_ofnode(dev))
Peng Fan91944ef2019-07-31 07:01:49 +0000377 return 0;
378
Sean Anderson6e33eba2021-06-11 00:16:07 -0400379 /*
380 * To avoid setting defaults twice, don't set them before relocation.
381 * However, still set them for SPL. And still set them if explicitly
382 * asked.
383 */
Philipp Tomsich291da962018-11-26 20:20:19 +0100384 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
Sean Anderson6e33eba2021-06-11 00:16:07 -0400385 if (stage != CLK_DEFAULTS_POST_FORCE)
386 return 0;
Philipp Tomsich291da962018-11-26 20:20:19 +0100387
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100388 debug("%s(%s)\n", __func__, dev_read_name(dev));
389
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200390 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100391 if (ret)
392 return ret;
393
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200394 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100395 if (ret < 0)
396 return ret;
397
398 return 0;
399}
Stephen Warren135aa952016-06-17 09:44:00 -0600400
401int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
402{
403 int index;
404
405 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200406 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600407
Simon Glassaa9bb092017-05-30 21:47:29 -0600408 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600409 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600410 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600411 return index;
412 }
413
414 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700415}
Simon Glassf0ab8f92021-08-07 07:24:09 -0600416#endif /* OF_REAL */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200417
Chunfeng Yund6464202020-01-09 11:35:07 +0800418int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
419{
420 int index;
421
422 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
423 ofnode_get_name(node), name, clk);
424 clk->dev = NULL;
425
426 index = ofnode_stringlist_search(node, "clock-names", name);
427 if (index < 0) {
428 debug("fdt_stringlist_search() failed: %d\n", index);
429 return index;
430 }
431
432 return clk_get_by_index_nodev(node, index, clk);
433}
434
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200435int clk_release_all(struct clk *clk, int count)
436{
437 int i, ret;
438
439 for (i = 0; i < count; i++) {
440 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
441
442 /* check if clock has been previously requested */
443 if (!clk[i].dev)
444 continue;
445
446 ret = clk_disable(&clk[i]);
447 if (ret && ret != -ENOSYS)
448 return ret;
449
450 ret = clk_free(&clk[i]);
451 if (ret && ret != -ENOSYS)
452 return ret;
453 }
454
455 return 0;
456}
457
Stephen Warren135aa952016-06-17 09:44:00 -0600458int clk_request(struct udevice *dev, struct clk *clk)
459{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200460 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600461
462 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200463 if (!clk)
464 return 0;
465 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600466
467 clk->dev = dev;
468
469 if (!ops->request)
470 return 0;
471
472 return ops->request(clk);
473}
474
475int clk_free(struct clk *clk)
476{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200477 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600478
479 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800480 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200481 return 0;
482 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600483
Sean Anderson276d4462022-01-15 17:24:58 -0500484 if (ops->rfree)
485 ops->rfree(clk);
486 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600487}
488
489ulong clk_get_rate(struct clk *clk)
490{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200491 const struct clk_ops *ops;
Simon Glass5c5992c2021-01-21 13:57:11 -0700492 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600493
494 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800495 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200496 return 0;
497 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600498
499 if (!ops->get_rate)
500 return -ENOSYS;
501
Simon Glass5c5992c2021-01-21 13:57:11 -0700502 ret = ops->get_rate(clk);
503 if (ret)
504 return log_ret(ret);
505
506 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600507}
508
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200509struct clk *clk_get_parent(struct clk *clk)
510{
511 struct udevice *pdev;
512 struct clk *pclk;
513
514 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800515 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200516 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200517
518 pdev = dev_get_parent(clk->dev);
Tero Kristo920ea5a2021-06-11 11:45:08 +0300519 if (!pdev)
520 return ERR_PTR(-ENODEV);
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200521 pclk = dev_get_clk_ptr(pdev);
522 if (!pclk)
523 return ERR_PTR(-ENODEV);
524
525 return pclk;
526}
527
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200528long long clk_get_parent_rate(struct clk *clk)
529{
530 const struct clk_ops *ops;
531 struct clk *pclk;
532
533 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800534 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200535 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200536
537 pclk = clk_get_parent(clk);
538 if (IS_ERR(pclk))
539 return -ENODEV;
540
541 ops = clk_dev_ops(pclk->dev);
542 if (!ops->get_rate)
543 return -ENOSYS;
544
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200545 /* Read the 'rate' if not already set or if proper flag set*/
546 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200547 pclk->rate = clk_get_rate(pclk);
548
549 return pclk->rate;
550}
551
Dario Binacchi2983ad52020-12-30 00:06:31 +0100552ulong clk_round_rate(struct clk *clk, ulong rate)
553{
554 const struct clk_ops *ops;
555
556 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
557 if (!clk_valid(clk))
558 return 0;
559
560 ops = clk_dev_ops(clk->dev);
561 if (!ops->round_rate)
562 return -ENOSYS;
563
564 return ops->round_rate(clk, rate);
565}
566
Tero Kristo6b7fd312021-06-11 11:45:12 +0300567static void clk_clean_rate_cache(struct clk *clk)
568{
569 struct udevice *child_dev;
570 struct clk *clkp;
571
572 if (!clk)
573 return;
574
575 clk->rate = 0;
576
577 list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
578 clkp = dev_get_clk_ptr(child_dev);
579 clk_clean_rate_cache(clkp);
580 }
581}
582
Stephen Warren135aa952016-06-17 09:44:00 -0600583ulong clk_set_rate(struct clk *clk, ulong rate)
584{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200585 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600586
587 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800588 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200589 return 0;
590 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600591
592 if (!ops->set_rate)
593 return -ENOSYS;
594
Tero Kristo6b7fd312021-06-11 11:45:12 +0300595 /* Clean up cached rates for us and all child clocks */
596 clk_clean_rate_cache(clk);
597
Stephen Warren135aa952016-06-17 09:44:00 -0600598 return ops->set_rate(clk, rate);
599}
600
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100601int clk_set_parent(struct clk *clk, struct clk *parent)
602{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200603 const struct clk_ops *ops;
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300604 int ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100605
606 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800607 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200608 return 0;
609 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100610
611 if (!ops->set_parent)
612 return -ENOSYS;
613
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300614 ret = ops->set_parent(clk, parent);
615 if (ret)
616 return ret;
617
618 if (CONFIG_IS_ENABLED(CLK_CCF))
619 ret = device_reparent(clk->dev, parent->dev);
620
621 return ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100622}
623
Stephen Warren135aa952016-06-17 09:44:00 -0600624int clk_enable(struct clk *clk)
625{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200626 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000627 struct clk *clkp = NULL;
628 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600629
630 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800631 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200632 return 0;
633 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600634
Peng Fan0520be02019-08-21 13:35:09 +0000635 if (CONFIG_IS_ENABLED(CLK_CCF)) {
636 /* Take id 0 as a non-valid clk, such as dummy */
637 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
638 if (clkp->enable_count) {
639 clkp->enable_count++;
640 return 0;
641 }
642 if (clkp->dev->parent &&
Patrick Delaunayb0cdd822022-01-24 14:17:14 +0100643 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
Peng Fan0520be02019-08-21 13:35:09 +0000644 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
645 if (ret) {
646 printf("Enable %s failed\n",
647 clkp->dev->parent->name);
648 return ret;
649 }
650 }
651 }
Stephen Warren135aa952016-06-17 09:44:00 -0600652
Peng Fan0520be02019-08-21 13:35:09 +0000653 if (ops->enable) {
654 ret = ops->enable(clk);
655 if (ret) {
656 printf("Enable %s failed\n", clk->dev->name);
657 return ret;
658 }
659 }
660 if (clkp)
661 clkp->enable_count++;
662 } else {
663 if (!ops->enable)
664 return -ENOSYS;
665 return ops->enable(clk);
666 }
667
668 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600669}
670
Neil Armstronga855be82018-04-03 11:44:18 +0200671int clk_enable_bulk(struct clk_bulk *bulk)
672{
673 int i, ret;
674
675 for (i = 0; i < bulk->count; i++) {
676 ret = clk_enable(&bulk->clks[i]);
677 if (ret < 0 && ret != -ENOSYS)
678 return ret;
679 }
680
681 return 0;
682}
683
Stephen Warren135aa952016-06-17 09:44:00 -0600684int clk_disable(struct clk *clk)
685{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200686 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000687 struct clk *clkp = NULL;
688 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600689
690 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800691 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200692 return 0;
693 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600694
Peng Fan0520be02019-08-21 13:35:09 +0000695 if (CONFIG_IS_ENABLED(CLK_CCF)) {
696 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Claudiu Beznea9a5d59d2020-09-07 17:46:35 +0300697 if (clkp->flags & CLK_IS_CRITICAL)
698 return 0;
699
Peng Fan0520be02019-08-21 13:35:09 +0000700 if (clkp->enable_count == 0) {
701 printf("clk %s already disabled\n",
702 clkp->dev->name);
703 return 0;
704 }
Stephen Warren135aa952016-06-17 09:44:00 -0600705
Peng Fan0520be02019-08-21 13:35:09 +0000706 if (--clkp->enable_count > 0)
707 return 0;
708 }
709
710 if (ops->disable) {
711 ret = ops->disable(clk);
712 if (ret)
713 return ret;
714 }
715
716 if (clkp && clkp->dev->parent &&
Patrick Delaunayb0cdd822022-01-24 14:17:14 +0100717 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
Peng Fan0520be02019-08-21 13:35:09 +0000718 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
719 if (ret) {
720 printf("Disable %s failed\n",
721 clkp->dev->parent->name);
722 return ret;
723 }
724 }
725 } else {
726 if (!ops->disable)
727 return -ENOSYS;
728
729 return ops->disable(clk);
730 }
731
732 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600733}
Simon Glasse70cc432016-01-20 19:43:02 -0700734
Neil Armstronga855be82018-04-03 11:44:18 +0200735int clk_disable_bulk(struct clk_bulk *bulk)
736{
737 int i, ret;
738
739 for (i = 0; i < bulk->count; i++) {
740 ret = clk_disable(&bulk->clks[i]);
741 if (ret < 0 && ret != -ENOSYS)
742 return ret;
743 }
744
745 return 0;
746}
747
Lukasz Majewski2796af72019-06-24 15:50:44 +0200748int clk_get_by_id(ulong id, struct clk **clkp)
749{
750 struct udevice *dev;
751 struct uclass *uc;
752 int ret;
753
754 ret = uclass_get(UCLASS_CLK, &uc);
755 if (ret)
756 return ret;
757
758 uclass_foreach_dev(dev, uc) {
759 struct clk *clk = dev_get_clk_ptr(dev);
760
761 if (clk && clk->id == id) {
762 *clkp = clk;
763 return 0;
764 }
765 }
766
767 return -ENOENT;
768}
769
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530770bool clk_is_match(const struct clk *p, const struct clk *q)
771{
772 /* trivial case: identical struct clk's or both NULL */
773 if (p == q)
774 return true;
775
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200776 /* trivial case #2: on the clk pointer is NULL */
777 if (!p || !q)
778 return false;
779
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530780 /* same device, id and data */
781 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
782 return true;
783
784 return false;
785}
786
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200787static void devm_clk_release(struct udevice *dev, void *res)
788{
789 clk_free(res);
790}
791
792static int devm_clk_match(struct udevice *dev, void *res, void *data)
793{
794 return res == data;
795}
796
797struct clk *devm_clk_get(struct udevice *dev, const char *id)
798{
799 int rc;
800 struct clk *clk;
801
802 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
803 if (unlikely(!clk))
804 return ERR_PTR(-ENOMEM);
805
806 rc = clk_get_by_name(dev, id, clk);
807 if (rc)
808 return ERR_PTR(rc);
809
810 devres_add(dev, clk);
811 return clk;
812}
813
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200814void devm_clk_put(struct udevice *dev, struct clk *clk)
815{
816 int rc;
817
818 if (!clk)
819 return;
820
821 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
822 WARN_ON(rc);
823}
824
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200825int clk_uclass_post_probe(struct udevice *dev)
826{
827 /*
828 * when a clock provider is probed. Call clk_set_defaults()
829 * also after the device is probed. This takes care of cases
830 * where the DT is used to setup default parents and rates
831 * using assigned-clocks
832 */
Marek Vasut75f080d2022-01-01 19:51:39 +0100833 clk_set_defaults(dev, CLK_DEFAULTS_POST);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200834
835 return 0;
836}
837
Simon Glassf26c8a82015-06-23 15:39:15 -0600838UCLASS_DRIVER(clk) = {
839 .id = UCLASS_CLK,
840 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200841 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600842};