blob: b8538f342a0c41cd38a709fffc9ae7075c82df1e [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <malloc.h>
Claudiu Beznea4d139f32020-09-07 17:46:34 +030017#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070018#include <dm/devres.h>
19#include <dm/read.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060020#include <linux/bug.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020021#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070022#include <linux/err.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060023
Mario Six268453b2018-01-15 11:06:51 +010024static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060025{
Mario Six268453b2018-01-15 11:06:51 +010026 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060027}
28
Simon Glassfb989e02020-07-19 10:15:56 -060029struct clk *dev_get_clk_ptr(struct udevice *dev)
30{
31 return (struct clk *)dev_get_uclass_priv(dev);
32}
33
Simon Glasse70cc432016-01-20 19:43:02 -070034#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060035# if CONFIG_IS_ENABLED(OF_PLATDATA)
Walter Lozano51f12632020-06-25 01:10:13 -030036int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
37 struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060038{
39 int ret;
40
Walter Lozano51f12632020-06-25 01:10:13 -030041 ret = device_get_by_driver_info((struct driver_info *)cells->node,
42 &clk->dev);
Simon Glass7423daa2016-07-04 11:58:03 -060043 if (ret)
44 return ret;
Walter Lozano51f12632020-06-25 01:10:13 -030045 clk->id = cells->arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060046
47 return 0;
48}
49# else
Stephen Warren135aa952016-06-17 09:44:00 -060050static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060051 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060052{
53 debug("%s(clk=%p)\n", __func__, clk);
54
55 if (args->args_count > 1) {
56 debug("Invaild args_count: %d\n", args->args_count);
57 return -EINVAL;
58 }
59
60 if (args->args_count)
61 clk->id = args->args[0];
62 else
63 clk->id = 0;
64
Sekhar Norie497fab2019-07-11 14:30:24 +053065 clk->data = 0;
66
Stephen Warren135aa952016-06-17 09:44:00 -060067 return 0;
68}
69
Jagan Teki75f98312019-02-28 00:26:52 +053070static int clk_get_by_index_tail(int ret, ofnode node,
71 struct ofnode_phandle_args *args,
72 const char *list_name, int index,
73 struct clk *clk)
74{
75 struct udevice *dev_clk;
76 const struct clk_ops *ops;
77
78 assert(clk);
79 clk->dev = NULL;
80 if (ret)
81 goto err;
82
83 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
84 if (ret) {
85 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
86 __func__, ret);
87 return ret;
88 }
89
90 clk->dev = dev_clk;
91
92 ops = clk_dev_ops(dev_clk);
93
94 if (ops->of_xlate)
95 ret = ops->of_xlate(clk, args);
96 else
97 ret = clk_of_xlate_default(clk, args);
98 if (ret) {
99 debug("of_xlate() failed: %d\n", ret);
100 return ret;
101 }
102
103 return clk_request(dev_clk, clk);
104err:
105 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
106 __func__, ofnode_get_name(node), list_name, index, ret);
107 return ret;
108}
109
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100110static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
111 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600112{
113 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600114 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600115
116 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
117
118 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200119 clk->dev = NULL;
120
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100121 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100122 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700123 if (ret) {
124 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
125 __func__, ret);
126 return ret;
127 }
128
Wenyou Yang3f56b132016-09-27 11:00:28 +0800129
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530130 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400131 index, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600132}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100133
134int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
135{
Jagan Teki75f98312019-02-28 00:26:52 +0530136 struct ofnode_phandle_args args;
137 int ret;
138
139 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
140 index, &args);
141
142 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400143 index, clk);
Jagan Teki75f98312019-02-28 00:26:52 +0530144}
145
146int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
147{
148 struct ofnode_phandle_args args;
149 int ret;
150
151 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Anderson675d7902020-06-24 06:41:08 -0400152 index, &args);
Jagan Teki75f98312019-02-28 00:26:52 +0530153
154 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400155 index, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100156}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100157
Neil Armstronga855be82018-04-03 11:44:18 +0200158int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
159{
160 int i, ret, err, count;
161
162 bulk->count = 0;
163
164 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
Neil Armstrong721881c2018-04-17 11:30:31 +0200165 if (count < 1)
166 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200167
168 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
169 if (!bulk->clks)
170 return -ENOMEM;
171
172 for (i = 0; i < count; i++) {
173 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
174 if (ret < 0)
175 goto bulk_get_err;
176
177 ++bulk->count;
178 }
179
180 return 0;
181
182bulk_get_err:
183 err = clk_release_all(bulk->clks, bulk->count);
184 if (err)
185 debug("%s: could release all clocks for %p\n",
186 __func__, dev);
187
188 return ret;
189}
190
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200191static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100192{
193 struct clk clk, parent_clk;
194 int index;
195 int num_parents;
196 int ret;
197
198 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
199 "#clock-cells");
200 if (num_parents < 0) {
201 debug("%s: could not read assigned-clock-parents for %p\n",
202 __func__, dev);
203 return 0;
204 }
205
206 for (index = 0; index < num_parents; index++) {
207 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
208 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200209 /* If -ENOENT, this is a no-op entry */
210 if (ret == -ENOENT)
211 continue;
212
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100213 if (ret) {
214 debug("%s: could not get parent clock %d for %s\n",
215 __func__, index, dev_read_name(dev));
216 return ret;
217 }
218
219 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
220 index, &clk);
221 if (ret) {
222 debug("%s: could not get assigned clock %d for %s\n",
223 __func__, index, dev_read_name(dev));
224 return ret;
225 }
226
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200227 /* This is clk provider device trying to reparent itself
228 * It cannot be done right now but need to wait after the
229 * device is probed
230 */
231 if (stage == 0 && clk.dev == dev)
232 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100233
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200234 if (stage > 0 && clk.dev != dev)
235 /* do not setup twice the parent clocks */
236 continue;
237
238 ret = clk_set_parent(&clk, &parent_clk);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100239 /*
240 * Not all drivers may support clock-reparenting (as of now).
241 * Ignore errors due to this.
242 */
243 if (ret == -ENOSYS)
244 continue;
245
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200246 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100247 debug("%s: failed to reparent clock %d for %s\n",
248 __func__, index, dev_read_name(dev));
249 return ret;
250 }
251 }
252
253 return 0;
254}
255
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200256static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100257{
258 struct clk clk;
259 int index;
260 int num_rates;
261 int size;
262 int ret = 0;
263 u32 *rates = NULL;
264
265 size = dev_read_size(dev, "assigned-clock-rates");
266 if (size < 0)
267 return 0;
268
269 num_rates = size / sizeof(u32);
270 rates = calloc(num_rates, sizeof(u32));
271 if (!rates)
272 return -ENOMEM;
273
274 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
275 if (ret)
276 goto fail;
277
278 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200279 /* If 0 is passed, this is a no-op */
280 if (!rates[index])
281 continue;
282
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100283 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
284 index, &clk);
285 if (ret) {
286 debug("%s: could not get assigned clock %d for %s\n",
287 __func__, index, dev_read_name(dev));
288 continue;
289 }
290
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200291 /* This is clk provider device trying to program itself
292 * It cannot be done right now but need to wait after the
293 * device is probed
294 */
295 if (stage == 0 && clk.dev == dev)
296 continue;
297
298 if (stage > 0 && clk.dev != dev)
299 /* do not setup twice the parent clocks */
300 continue;
301
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100302 ret = clk_set_rate(&clk, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200303
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100304 if (ret < 0) {
Simon Glass68316162019-01-21 14:53:19 -0700305 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
306 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100307 break;
308 }
309 }
310
311fail:
312 free(rates);
313 return ret;
314}
315
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200316int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100317{
318 int ret;
319
Peng Fan91944ef2019-07-31 07:01:49 +0000320 if (!dev_of_valid(dev))
321 return 0;
322
Philipp Tomsich291da962018-11-26 20:20:19 +0100323 /* If this not in SPL and pre-reloc state, don't take any action. */
324 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
325 return 0;
326
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100327 debug("%s(%s)\n", __func__, dev_read_name(dev));
328
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200329 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100330 if (ret)
331 return ret;
332
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200333 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100334 if (ret < 0)
335 return ret;
336
337 return 0;
338}
Stephen Warren135aa952016-06-17 09:44:00 -0600339
340int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
341{
342 int index;
343
344 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200345 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600346
Simon Glassaa9bb092017-05-30 21:47:29 -0600347 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600348 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600349 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600350 return index;
351 }
352
353 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700354}
Giulio Benettiefbdad32019-12-12 23:53:19 +0100355# endif /* OF_PLATDATA */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200356
Chunfeng Yund6464202020-01-09 11:35:07 +0800357int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
358{
359 int index;
360
361 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
362 ofnode_get_name(node), name, clk);
363 clk->dev = NULL;
364
365 index = ofnode_stringlist_search(node, "clock-names", name);
366 if (index < 0) {
367 debug("fdt_stringlist_search() failed: %d\n", index);
368 return index;
369 }
370
371 return clk_get_by_index_nodev(node, index, clk);
372}
373
374int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
375{
376 int ret;
377
378 ret = clk_get_by_name_nodev(node, name, clk);
379 if (ret == -ENODATA)
380 return 0;
381
382 return ret;
383}
384
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200385int clk_release_all(struct clk *clk, int count)
386{
387 int i, ret;
388
389 for (i = 0; i < count; i++) {
390 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
391
392 /* check if clock has been previously requested */
393 if (!clk[i].dev)
394 continue;
395
396 ret = clk_disable(&clk[i]);
397 if (ret && ret != -ENOSYS)
398 return ret;
399
400 ret = clk_free(&clk[i]);
401 if (ret && ret != -ENOSYS)
402 return ret;
403 }
404
405 return 0;
406}
407
Simon Glass7423daa2016-07-04 11:58:03 -0600408#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600409
410int clk_request(struct udevice *dev, struct clk *clk)
411{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200412 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600413
414 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200415 if (!clk)
416 return 0;
417 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600418
419 clk->dev = dev;
420
421 if (!ops->request)
422 return 0;
423
424 return ops->request(clk);
425}
426
427int clk_free(struct clk *clk)
428{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200429 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600430
431 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800432 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200433 return 0;
434 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600435
Simon Glassfb8c0d52020-02-03 07:35:54 -0700436 if (!ops->rfree)
Stephen Warren135aa952016-06-17 09:44:00 -0600437 return 0;
438
Simon Glassfb8c0d52020-02-03 07:35:54 -0700439 return ops->rfree(clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600440}
441
442ulong clk_get_rate(struct clk *clk)
443{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200444 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600445
446 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800447 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200448 return 0;
449 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600450
451 if (!ops->get_rate)
452 return -ENOSYS;
453
454 return ops->get_rate(clk);
455}
456
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200457struct clk *clk_get_parent(struct clk *clk)
458{
459 struct udevice *pdev;
460 struct clk *pclk;
461
462 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800463 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200464 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200465
466 pdev = dev_get_parent(clk->dev);
467 pclk = dev_get_clk_ptr(pdev);
468 if (!pclk)
469 return ERR_PTR(-ENODEV);
470
471 return pclk;
472}
473
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200474long long clk_get_parent_rate(struct clk *clk)
475{
476 const struct clk_ops *ops;
477 struct clk *pclk;
478
479 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800480 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200481 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200482
483 pclk = clk_get_parent(clk);
484 if (IS_ERR(pclk))
485 return -ENODEV;
486
487 ops = clk_dev_ops(pclk->dev);
488 if (!ops->get_rate)
489 return -ENOSYS;
490
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200491 /* Read the 'rate' if not already set or if proper flag set*/
492 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200493 pclk->rate = clk_get_rate(pclk);
494
495 return pclk->rate;
496}
497
Stephen Warren135aa952016-06-17 09:44:00 -0600498ulong clk_set_rate(struct clk *clk, ulong rate)
499{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200500 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600501
502 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800503 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200504 return 0;
505 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600506
507 if (!ops->set_rate)
508 return -ENOSYS;
509
510 return ops->set_rate(clk, rate);
511}
512
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100513int clk_set_parent(struct clk *clk, struct clk *parent)
514{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200515 const struct clk_ops *ops;
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300516 int ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100517
518 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800519 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200520 return 0;
521 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100522
523 if (!ops->set_parent)
524 return -ENOSYS;
525
Claudiu Beznea4d139f32020-09-07 17:46:34 +0300526 ret = ops->set_parent(clk, parent);
527 if (ret)
528 return ret;
529
530 if (CONFIG_IS_ENABLED(CLK_CCF))
531 ret = device_reparent(clk->dev, parent->dev);
532
533 return ret;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100534}
535
Stephen Warren135aa952016-06-17 09:44:00 -0600536int clk_enable(struct clk *clk)
537{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200538 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000539 struct clk *clkp = NULL;
540 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600541
542 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800543 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200544 return 0;
545 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600546
Peng Fan0520be02019-08-21 13:35:09 +0000547 if (CONFIG_IS_ENABLED(CLK_CCF)) {
548 /* Take id 0 as a non-valid clk, such as dummy */
549 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
550 if (clkp->enable_count) {
551 clkp->enable_count++;
552 return 0;
553 }
554 if (clkp->dev->parent &&
555 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
556 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
557 if (ret) {
558 printf("Enable %s failed\n",
559 clkp->dev->parent->name);
560 return ret;
561 }
562 }
563 }
Stephen Warren135aa952016-06-17 09:44:00 -0600564
Peng Fan0520be02019-08-21 13:35:09 +0000565 if (ops->enable) {
566 ret = ops->enable(clk);
567 if (ret) {
568 printf("Enable %s failed\n", clk->dev->name);
569 return ret;
570 }
571 }
572 if (clkp)
573 clkp->enable_count++;
574 } else {
575 if (!ops->enable)
576 return -ENOSYS;
577 return ops->enable(clk);
578 }
579
580 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600581}
582
Neil Armstronga855be82018-04-03 11:44:18 +0200583int clk_enable_bulk(struct clk_bulk *bulk)
584{
585 int i, ret;
586
587 for (i = 0; i < bulk->count; i++) {
588 ret = clk_enable(&bulk->clks[i]);
589 if (ret < 0 && ret != -ENOSYS)
590 return ret;
591 }
592
593 return 0;
594}
595
Stephen Warren135aa952016-06-17 09:44:00 -0600596int clk_disable(struct clk *clk)
597{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200598 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000599 struct clk *clkp = NULL;
600 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600601
602 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800603 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200604 return 0;
605 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600606
Peng Fan0520be02019-08-21 13:35:09 +0000607 if (CONFIG_IS_ENABLED(CLK_CCF)) {
608 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Claudiu Beznea9a5d59d2020-09-07 17:46:35 +0300609 if (clkp->flags & CLK_IS_CRITICAL)
610 return 0;
611
Peng Fan0520be02019-08-21 13:35:09 +0000612 if (clkp->enable_count == 0) {
613 printf("clk %s already disabled\n",
614 clkp->dev->name);
615 return 0;
616 }
Stephen Warren135aa952016-06-17 09:44:00 -0600617
Peng Fan0520be02019-08-21 13:35:09 +0000618 if (--clkp->enable_count > 0)
619 return 0;
620 }
621
622 if (ops->disable) {
623 ret = ops->disable(clk);
624 if (ret)
625 return ret;
626 }
627
628 if (clkp && clkp->dev->parent &&
629 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
630 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
631 if (ret) {
632 printf("Disable %s failed\n",
633 clkp->dev->parent->name);
634 return ret;
635 }
636 }
637 } else {
638 if (!ops->disable)
639 return -ENOSYS;
640
641 return ops->disable(clk);
642 }
643
644 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600645}
Simon Glasse70cc432016-01-20 19:43:02 -0700646
Neil Armstronga855be82018-04-03 11:44:18 +0200647int clk_disable_bulk(struct clk_bulk *bulk)
648{
649 int i, ret;
650
651 for (i = 0; i < bulk->count; i++) {
652 ret = clk_disable(&bulk->clks[i]);
653 if (ret < 0 && ret != -ENOSYS)
654 return ret;
655 }
656
657 return 0;
658}
659
Lukasz Majewski2796af72019-06-24 15:50:44 +0200660int clk_get_by_id(ulong id, struct clk **clkp)
661{
662 struct udevice *dev;
663 struct uclass *uc;
664 int ret;
665
666 ret = uclass_get(UCLASS_CLK, &uc);
667 if (ret)
668 return ret;
669
670 uclass_foreach_dev(dev, uc) {
671 struct clk *clk = dev_get_clk_ptr(dev);
672
673 if (clk && clk->id == id) {
674 *clkp = clk;
675 return 0;
676 }
677 }
678
679 return -ENOENT;
680}
681
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530682bool clk_is_match(const struct clk *p, const struct clk *q)
683{
684 /* trivial case: identical struct clk's or both NULL */
685 if (p == q)
686 return true;
687
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200688 /* trivial case #2: on the clk pointer is NULL */
689 if (!p || !q)
690 return false;
691
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530692 /* same device, id and data */
693 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
694 return true;
695
696 return false;
697}
698
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200699static void devm_clk_release(struct udevice *dev, void *res)
700{
701 clk_free(res);
702}
703
704static int devm_clk_match(struct udevice *dev, void *res, void *data)
705{
706 return res == data;
707}
708
709struct clk *devm_clk_get(struct udevice *dev, const char *id)
710{
711 int rc;
712 struct clk *clk;
713
714 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
715 if (unlikely(!clk))
716 return ERR_PTR(-ENOMEM);
717
718 rc = clk_get_by_name(dev, id, clk);
719 if (rc)
720 return ERR_PTR(rc);
721
722 devres_add(dev, clk);
723 return clk;
724}
725
726struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
727{
728 struct clk *clk = devm_clk_get(dev, id);
729
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800730 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200731 return NULL;
732
733 return clk;
734}
735
736void devm_clk_put(struct udevice *dev, struct clk *clk)
737{
738 int rc;
739
740 if (!clk)
741 return;
742
743 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
744 WARN_ON(rc);
745}
746
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200747int clk_uclass_post_probe(struct udevice *dev)
748{
749 /*
750 * when a clock provider is probed. Call clk_set_defaults()
751 * also after the device is probed. This takes care of cases
752 * where the DT is used to setup default parents and rates
753 * using assigned-clocks
754 */
755 clk_set_defaults(dev, 1);
756
757 return 0;
758}
759
Simon Glassf26c8a82015-06-23 15:39:15 -0600760UCLASS_DRIVER(clk) = {
761 .id = UCLASS_CLK,
762 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200763 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600764};