blob: 71878474ebe1a1648fcca53dac2228611c955760 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <errno.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Simon Glass61b29b82020-02-03 07:36:15 -070016#include <dm/devres.h>
17#include <dm/read.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020018#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070019#include <linux/err.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060020
Mario Six268453b2018-01-15 11:06:51 +010021static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060022{
Mario Six268453b2018-01-15 11:06:51 +010023 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060024}
25
Simon Glasse70cc432016-01-20 19:43:02 -070026#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060027# if CONFIG_IS_ENABLED(OF_PLATDATA)
28int clk_get_by_index_platdata(struct udevice *dev, int index,
Simon Glass0d154632017-08-29 14:15:56 -060029 struct phandle_1_arg *cells, struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060030{
31 int ret;
32
33 if (index != 0)
34 return -ENOSYS;
35 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
36 if (ret)
37 return ret;
Simon Glassbc796172017-08-29 14:15:58 -060038 clk->id = cells[0].arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060039
40 return 0;
41}
42# else
Stephen Warren135aa952016-06-17 09:44:00 -060043static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060044 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060045{
46 debug("%s(clk=%p)\n", __func__, clk);
47
48 if (args->args_count > 1) {
49 debug("Invaild args_count: %d\n", args->args_count);
50 return -EINVAL;
51 }
52
53 if (args->args_count)
54 clk->id = args->args[0];
55 else
56 clk->id = 0;
57
Sekhar Norie497fab2019-07-11 14:30:24 +053058 clk->data = 0;
59
Stephen Warren135aa952016-06-17 09:44:00 -060060 return 0;
61}
62
Jagan Teki75f98312019-02-28 00:26:52 +053063static int clk_get_by_index_tail(int ret, ofnode node,
64 struct ofnode_phandle_args *args,
65 const char *list_name, int index,
66 struct clk *clk)
67{
68 struct udevice *dev_clk;
69 const struct clk_ops *ops;
70
71 assert(clk);
72 clk->dev = NULL;
73 if (ret)
74 goto err;
75
76 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
77 if (ret) {
78 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
79 __func__, ret);
80 return ret;
81 }
82
83 clk->dev = dev_clk;
84
85 ops = clk_dev_ops(dev_clk);
86
87 if (ops->of_xlate)
88 ret = ops->of_xlate(clk, args);
89 else
90 ret = clk_of_xlate_default(clk, args);
91 if (ret) {
92 debug("of_xlate() failed: %d\n", ret);
93 return ret;
94 }
95
96 return clk_request(dev_clk, clk);
97err:
98 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
99 __func__, ofnode_get_name(node), list_name, index, ret);
100 return ret;
101}
102
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100103static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
104 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600105{
106 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600107 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600108
109 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
110
111 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200112 clk->dev = NULL;
113
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100114 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100115 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700116 if (ret) {
117 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
118 __func__, ret);
119 return ret;
120 }
121
Wenyou Yang3f56b132016-09-27 11:00:28 +0800122
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530123 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
124 index > 0, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600125}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100126
127int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
128{
Jagan Teki75f98312019-02-28 00:26:52 +0530129 struct ofnode_phandle_args args;
130 int ret;
131
132 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
133 index, &args);
134
135 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
136 index > 0, clk);
137}
138
139int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
140{
141 struct ofnode_phandle_args args;
142 int ret;
143
144 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
145 index > 0, &args);
146
147 return clk_get_by_index_tail(ret, node, &args, "clocks",
148 index > 0, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100149}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100150
Neil Armstronga855be82018-04-03 11:44:18 +0200151int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
152{
153 int i, ret, err, count;
154
155 bulk->count = 0;
156
157 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
Neil Armstrong721881c2018-04-17 11:30:31 +0200158 if (count < 1)
159 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200160
161 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
162 if (!bulk->clks)
163 return -ENOMEM;
164
165 for (i = 0; i < count; i++) {
166 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
167 if (ret < 0)
168 goto bulk_get_err;
169
170 ++bulk->count;
171 }
172
173 return 0;
174
175bulk_get_err:
176 err = clk_release_all(bulk->clks, bulk->count);
177 if (err)
178 debug("%s: could release all clocks for %p\n",
179 __func__, dev);
180
181 return ret;
182}
183
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200184static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100185{
186 struct clk clk, parent_clk;
187 int index;
188 int num_parents;
189 int ret;
190
191 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
192 "#clock-cells");
193 if (num_parents < 0) {
194 debug("%s: could not read assigned-clock-parents for %p\n",
195 __func__, dev);
196 return 0;
197 }
198
199 for (index = 0; index < num_parents; index++) {
200 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
201 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200202 /* If -ENOENT, this is a no-op entry */
203 if (ret == -ENOENT)
204 continue;
205
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100206 if (ret) {
207 debug("%s: could not get parent clock %d for %s\n",
208 __func__, index, dev_read_name(dev));
209 return ret;
210 }
211
212 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
213 index, &clk);
214 if (ret) {
215 debug("%s: could not get assigned clock %d for %s\n",
216 __func__, index, dev_read_name(dev));
217 return ret;
218 }
219
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200220 /* This is clk provider device trying to reparent itself
221 * It cannot be done right now but need to wait after the
222 * device is probed
223 */
224 if (stage == 0 && clk.dev == dev)
225 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100226
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200227 if (stage > 0 && clk.dev != dev)
228 /* do not setup twice the parent clocks */
229 continue;
230
231 ret = clk_set_parent(&clk, &parent_clk);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100232 /*
233 * Not all drivers may support clock-reparenting (as of now).
234 * Ignore errors due to this.
235 */
236 if (ret == -ENOSYS)
237 continue;
238
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200239 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100240 debug("%s: failed to reparent clock %d for %s\n",
241 __func__, index, dev_read_name(dev));
242 return ret;
243 }
244 }
245
246 return 0;
247}
248
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200249static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100250{
251 struct clk clk;
252 int index;
253 int num_rates;
254 int size;
255 int ret = 0;
256 u32 *rates = NULL;
257
258 size = dev_read_size(dev, "assigned-clock-rates");
259 if (size < 0)
260 return 0;
261
262 num_rates = size / sizeof(u32);
263 rates = calloc(num_rates, sizeof(u32));
264 if (!rates)
265 return -ENOMEM;
266
267 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
268 if (ret)
269 goto fail;
270
271 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200272 /* If 0 is passed, this is a no-op */
273 if (!rates[index])
274 continue;
275
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100276 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
277 index, &clk);
278 if (ret) {
279 debug("%s: could not get assigned clock %d for %s\n",
280 __func__, index, dev_read_name(dev));
281 continue;
282 }
283
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200284 /* This is clk provider device trying to program itself
285 * It cannot be done right now but need to wait after the
286 * device is probed
287 */
288 if (stage == 0 && clk.dev == dev)
289 continue;
290
291 if (stage > 0 && clk.dev != dev)
292 /* do not setup twice the parent clocks */
293 continue;
294
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100295 ret = clk_set_rate(&clk, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200296
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100297 if (ret < 0) {
Simon Glass68316162019-01-21 14:53:19 -0700298 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
299 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100300 break;
301 }
302 }
303
304fail:
305 free(rates);
306 return ret;
307}
308
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200309int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100310{
311 int ret;
312
Peng Fan91944ef2019-07-31 07:01:49 +0000313 if (!dev_of_valid(dev))
314 return 0;
315
Philipp Tomsich291da962018-11-26 20:20:19 +0100316 /* If this not in SPL and pre-reloc state, don't take any action. */
317 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
318 return 0;
319
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100320 debug("%s(%s)\n", __func__, dev_read_name(dev));
321
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200322 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100323 if (ret)
324 return ret;
325
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200326 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100327 if (ret < 0)
328 return ret;
329
330 return 0;
331}
Stephen Warren135aa952016-06-17 09:44:00 -0600332
333int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
334{
335 int index;
336
337 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200338 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600339
Simon Glassaa9bb092017-05-30 21:47:29 -0600340 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600341 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600342 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600343 return index;
344 }
345
346 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700347}
Giulio Benettiefbdad32019-12-12 23:53:19 +0100348# endif /* OF_PLATDATA */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200349
Chunfeng Yund6464202020-01-09 11:35:07 +0800350int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
351{
352 int index;
353
354 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
355 ofnode_get_name(node), name, clk);
356 clk->dev = NULL;
357
358 index = ofnode_stringlist_search(node, "clock-names", name);
359 if (index < 0) {
360 debug("fdt_stringlist_search() failed: %d\n", index);
361 return index;
362 }
363
364 return clk_get_by_index_nodev(node, index, clk);
365}
366
367int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
368{
369 int ret;
370
371 ret = clk_get_by_name_nodev(node, name, clk);
372 if (ret == -ENODATA)
373 return 0;
374
375 return ret;
376}
377
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200378int clk_release_all(struct clk *clk, int count)
379{
380 int i, ret;
381
382 for (i = 0; i < count; i++) {
383 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
384
385 /* check if clock has been previously requested */
386 if (!clk[i].dev)
387 continue;
388
389 ret = clk_disable(&clk[i]);
390 if (ret && ret != -ENOSYS)
391 return ret;
392
393 ret = clk_free(&clk[i]);
394 if (ret && ret != -ENOSYS)
395 return ret;
396 }
397
398 return 0;
399}
400
Simon Glass7423daa2016-07-04 11:58:03 -0600401#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600402
403int clk_request(struct udevice *dev, struct clk *clk)
404{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200405 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600406
407 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200408 if (!clk)
409 return 0;
410 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600411
412 clk->dev = dev;
413
414 if (!ops->request)
415 return 0;
416
417 return ops->request(clk);
418}
419
420int clk_free(struct clk *clk)
421{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200422 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600423
424 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800425 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200426 return 0;
427 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600428
Simon Glassfb8c0d52020-02-03 07:35:54 -0700429 if (!ops->rfree)
Stephen Warren135aa952016-06-17 09:44:00 -0600430 return 0;
431
Simon Glassfb8c0d52020-02-03 07:35:54 -0700432 return ops->rfree(clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600433}
434
435ulong clk_get_rate(struct clk *clk)
436{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200437 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600438
439 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800440 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200441 return 0;
442 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600443
444 if (!ops->get_rate)
445 return -ENOSYS;
446
447 return ops->get_rate(clk);
448}
449
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200450struct clk *clk_get_parent(struct clk *clk)
451{
452 struct udevice *pdev;
453 struct clk *pclk;
454
455 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800456 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200457 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200458
459 pdev = dev_get_parent(clk->dev);
460 pclk = dev_get_clk_ptr(pdev);
461 if (!pclk)
462 return ERR_PTR(-ENODEV);
463
464 return pclk;
465}
466
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200467long long clk_get_parent_rate(struct clk *clk)
468{
469 const struct clk_ops *ops;
470 struct clk *pclk;
471
472 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800473 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200474 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200475
476 pclk = clk_get_parent(clk);
477 if (IS_ERR(pclk))
478 return -ENODEV;
479
480 ops = clk_dev_ops(pclk->dev);
481 if (!ops->get_rate)
482 return -ENOSYS;
483
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200484 /* Read the 'rate' if not already set or if proper flag set*/
485 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200486 pclk->rate = clk_get_rate(pclk);
487
488 return pclk->rate;
489}
490
Stephen Warren135aa952016-06-17 09:44:00 -0600491ulong clk_set_rate(struct clk *clk, ulong rate)
492{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200493 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600494
495 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800496 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200497 return 0;
498 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600499
500 if (!ops->set_rate)
501 return -ENOSYS;
502
503 return ops->set_rate(clk, rate);
504}
505
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100506int clk_set_parent(struct clk *clk, struct clk *parent)
507{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200508 const struct clk_ops *ops;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100509
510 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800511 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200512 return 0;
513 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100514
515 if (!ops->set_parent)
516 return -ENOSYS;
517
518 return ops->set_parent(clk, parent);
519}
520
Stephen Warren135aa952016-06-17 09:44:00 -0600521int clk_enable(struct clk *clk)
522{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200523 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000524 struct clk *clkp = NULL;
525 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600526
527 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800528 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200529 return 0;
530 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600531
Peng Fan0520be02019-08-21 13:35:09 +0000532 if (CONFIG_IS_ENABLED(CLK_CCF)) {
533 /* Take id 0 as a non-valid clk, such as dummy */
534 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
535 if (clkp->enable_count) {
536 clkp->enable_count++;
537 return 0;
538 }
539 if (clkp->dev->parent &&
540 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
541 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
542 if (ret) {
543 printf("Enable %s failed\n",
544 clkp->dev->parent->name);
545 return ret;
546 }
547 }
548 }
Stephen Warren135aa952016-06-17 09:44:00 -0600549
Peng Fan0520be02019-08-21 13:35:09 +0000550 if (ops->enable) {
551 ret = ops->enable(clk);
552 if (ret) {
553 printf("Enable %s failed\n", clk->dev->name);
554 return ret;
555 }
556 }
557 if (clkp)
558 clkp->enable_count++;
559 } else {
560 if (!ops->enable)
561 return -ENOSYS;
562 return ops->enable(clk);
563 }
564
565 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600566}
567
Neil Armstronga855be82018-04-03 11:44:18 +0200568int clk_enable_bulk(struct clk_bulk *bulk)
569{
570 int i, ret;
571
572 for (i = 0; i < bulk->count; i++) {
573 ret = clk_enable(&bulk->clks[i]);
574 if (ret < 0 && ret != -ENOSYS)
575 return ret;
576 }
577
578 return 0;
579}
580
Stephen Warren135aa952016-06-17 09:44:00 -0600581int clk_disable(struct clk *clk)
582{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200583 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000584 struct clk *clkp = NULL;
585 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600586
587 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800588 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200589 return 0;
590 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600591
Peng Fan0520be02019-08-21 13:35:09 +0000592 if (CONFIG_IS_ENABLED(CLK_CCF)) {
593 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
594 if (clkp->enable_count == 0) {
595 printf("clk %s already disabled\n",
596 clkp->dev->name);
597 return 0;
598 }
Stephen Warren135aa952016-06-17 09:44:00 -0600599
Peng Fan0520be02019-08-21 13:35:09 +0000600 if (--clkp->enable_count > 0)
601 return 0;
602 }
603
604 if (ops->disable) {
605 ret = ops->disable(clk);
606 if (ret)
607 return ret;
608 }
609
610 if (clkp && clkp->dev->parent &&
611 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
612 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
613 if (ret) {
614 printf("Disable %s failed\n",
615 clkp->dev->parent->name);
616 return ret;
617 }
618 }
619 } else {
620 if (!ops->disable)
621 return -ENOSYS;
622
623 return ops->disable(clk);
624 }
625
626 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600627}
Simon Glasse70cc432016-01-20 19:43:02 -0700628
Neil Armstronga855be82018-04-03 11:44:18 +0200629int clk_disable_bulk(struct clk_bulk *bulk)
630{
631 int i, ret;
632
633 for (i = 0; i < bulk->count; i++) {
634 ret = clk_disable(&bulk->clks[i]);
635 if (ret < 0 && ret != -ENOSYS)
636 return ret;
637 }
638
639 return 0;
640}
641
Lukasz Majewski2796af72019-06-24 15:50:44 +0200642int clk_get_by_id(ulong id, struct clk **clkp)
643{
644 struct udevice *dev;
645 struct uclass *uc;
646 int ret;
647
648 ret = uclass_get(UCLASS_CLK, &uc);
649 if (ret)
650 return ret;
651
652 uclass_foreach_dev(dev, uc) {
653 struct clk *clk = dev_get_clk_ptr(dev);
654
655 if (clk && clk->id == id) {
656 *clkp = clk;
657 return 0;
658 }
659 }
660
661 return -ENOENT;
662}
663
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530664bool clk_is_match(const struct clk *p, const struct clk *q)
665{
666 /* trivial case: identical struct clk's or both NULL */
667 if (p == q)
668 return true;
669
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200670 /* trivial case #2: on the clk pointer is NULL */
671 if (!p || !q)
672 return false;
673
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530674 /* same device, id and data */
675 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
676 return true;
677
678 return false;
679}
680
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200681static void devm_clk_release(struct udevice *dev, void *res)
682{
683 clk_free(res);
684}
685
686static int devm_clk_match(struct udevice *dev, void *res, void *data)
687{
688 return res == data;
689}
690
691struct clk *devm_clk_get(struct udevice *dev, const char *id)
692{
693 int rc;
694 struct clk *clk;
695
696 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
697 if (unlikely(!clk))
698 return ERR_PTR(-ENOMEM);
699
700 rc = clk_get_by_name(dev, id, clk);
701 if (rc)
702 return ERR_PTR(rc);
703
704 devres_add(dev, clk);
705 return clk;
706}
707
708struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
709{
710 struct clk *clk = devm_clk_get(dev, id);
711
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800712 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200713 return NULL;
714
715 return clk;
716}
717
718void devm_clk_put(struct udevice *dev, struct clk *clk)
719{
720 int rc;
721
722 if (!clk)
723 return;
724
725 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
726 WARN_ON(rc);
727}
728
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200729int clk_uclass_post_probe(struct udevice *dev)
730{
731 /*
732 * when a clock provider is probed. Call clk_set_defaults()
733 * also after the device is probed. This takes care of cases
734 * where the DT is used to setup default parents and rates
735 * using assigned-clocks
736 */
737 clk_set_defaults(dev, 1);
738
739 return 0;
740}
741
Simon Glassf26c8a82015-06-23 15:39:15 -0600742UCLASS_DRIVER(clk) = {
743 .id = UCLASS_CLK,
744 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200745 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600746};