blob: 93cb490eb5372b8eb5aa094288e979144ab5c047 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Philipp Tomsichf4fcba52018-01-08 13:59:18 +010013#include <dm/read.h>
Simon Glass7423daa2016-07-04 11:58:03 -060014#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060015#include <errno.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020016#include <linux/clk-provider.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060017
Mario Six268453b2018-01-15 11:06:51 +010018static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060019{
Mario Six268453b2018-01-15 11:06:51 +010020 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060021}
22
Simon Glasse70cc432016-01-20 19:43:02 -070023#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060024# if CONFIG_IS_ENABLED(OF_PLATDATA)
25int clk_get_by_index_platdata(struct udevice *dev, int index,
Simon Glass0d154632017-08-29 14:15:56 -060026 struct phandle_1_arg *cells, struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060027{
28 int ret;
29
30 if (index != 0)
31 return -ENOSYS;
32 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
33 if (ret)
34 return ret;
Simon Glassbc796172017-08-29 14:15:58 -060035 clk->id = cells[0].arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060036
37 return 0;
38}
39# else
Stephen Warren135aa952016-06-17 09:44:00 -060040static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060041 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060042{
43 debug("%s(clk=%p)\n", __func__, clk);
44
45 if (args->args_count > 1) {
46 debug("Invaild args_count: %d\n", args->args_count);
47 return -EINVAL;
48 }
49
50 if (args->args_count)
51 clk->id = args->args[0];
52 else
53 clk->id = 0;
54
Sekhar Norie497fab2019-07-11 14:30:24 +053055 clk->data = 0;
56
Stephen Warren135aa952016-06-17 09:44:00 -060057 return 0;
58}
59
Jagan Teki75f98312019-02-28 00:26:52 +053060static int clk_get_by_index_tail(int ret, ofnode node,
61 struct ofnode_phandle_args *args,
62 const char *list_name, int index,
63 struct clk *clk)
64{
65 struct udevice *dev_clk;
66 const struct clk_ops *ops;
67
68 assert(clk);
69 clk->dev = NULL;
70 if (ret)
71 goto err;
72
73 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
74 if (ret) {
75 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
76 __func__, ret);
77 return ret;
78 }
79
80 clk->dev = dev_clk;
81
82 ops = clk_dev_ops(dev_clk);
83
84 if (ops->of_xlate)
85 ret = ops->of_xlate(clk, args);
86 else
87 ret = clk_of_xlate_default(clk, args);
88 if (ret) {
89 debug("of_xlate() failed: %d\n", ret);
90 return ret;
91 }
92
93 return clk_request(dev_clk, clk);
94err:
95 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
96 __func__, ofnode_get_name(node), list_name, index, ret);
97 return ret;
98}
99
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100100static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
101 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600102{
103 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600104 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600105
106 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
107
108 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200109 clk->dev = NULL;
110
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100111 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100112 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700113 if (ret) {
114 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
115 __func__, ret);
116 return ret;
117 }
118
Wenyou Yang3f56b132016-09-27 11:00:28 +0800119
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530120 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
121 index > 0, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600122}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100123
124int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
125{
Jagan Teki75f98312019-02-28 00:26:52 +0530126 struct ofnode_phandle_args args;
127 int ret;
128
129 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
130 index, &args);
131
132 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
133 index > 0, clk);
134}
135
136int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
137{
138 struct ofnode_phandle_args args;
139 int ret;
140
141 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
142 index > 0, &args);
143
144 return clk_get_by_index_tail(ret, node, &args, "clocks",
145 index > 0, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100146}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100147
Neil Armstronga855be82018-04-03 11:44:18 +0200148int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
149{
150 int i, ret, err, count;
151
152 bulk->count = 0;
153
154 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
Neil Armstrong721881c2018-04-17 11:30:31 +0200155 if (count < 1)
156 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200157
158 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
159 if (!bulk->clks)
160 return -ENOMEM;
161
162 for (i = 0; i < count; i++) {
163 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
164 if (ret < 0)
165 goto bulk_get_err;
166
167 ++bulk->count;
168 }
169
170 return 0;
171
172bulk_get_err:
173 err = clk_release_all(bulk->clks, bulk->count);
174 if (err)
175 debug("%s: could release all clocks for %p\n",
176 __func__, dev);
177
178 return ret;
179}
180
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200181static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100182{
183 struct clk clk, parent_clk;
184 int index;
185 int num_parents;
186 int ret;
187
188 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
189 "#clock-cells");
190 if (num_parents < 0) {
191 debug("%s: could not read assigned-clock-parents for %p\n",
192 __func__, dev);
193 return 0;
194 }
195
196 for (index = 0; index < num_parents; index++) {
197 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
198 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200199 /* If -ENOENT, this is a no-op entry */
200 if (ret == -ENOENT)
201 continue;
202
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100203 if (ret) {
204 debug("%s: could not get parent clock %d for %s\n",
205 __func__, index, dev_read_name(dev));
206 return ret;
207 }
208
209 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
210 index, &clk);
211 if (ret) {
212 debug("%s: could not get assigned clock %d for %s\n",
213 __func__, index, dev_read_name(dev));
214 return ret;
215 }
216
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200217 /* This is clk provider device trying to reparent itself
218 * It cannot be done right now but need to wait after the
219 * device is probed
220 */
221 if (stage == 0 && clk.dev == dev)
222 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100223
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200224 if (stage > 0 && clk.dev != dev)
225 /* do not setup twice the parent clocks */
226 continue;
227
228 ret = clk_set_parent(&clk, &parent_clk);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100229 /*
230 * Not all drivers may support clock-reparenting (as of now).
231 * Ignore errors due to this.
232 */
233 if (ret == -ENOSYS)
234 continue;
235
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200236 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100237 debug("%s: failed to reparent clock %d for %s\n",
238 __func__, index, dev_read_name(dev));
239 return ret;
240 }
241 }
242
243 return 0;
244}
245
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200246static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100247{
248 struct clk clk;
249 int index;
250 int num_rates;
251 int size;
252 int ret = 0;
253 u32 *rates = NULL;
254
255 size = dev_read_size(dev, "assigned-clock-rates");
256 if (size < 0)
257 return 0;
258
259 num_rates = size / sizeof(u32);
260 rates = calloc(num_rates, sizeof(u32));
261 if (!rates)
262 return -ENOMEM;
263
264 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
265 if (ret)
266 goto fail;
267
268 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200269 /* If 0 is passed, this is a no-op */
270 if (!rates[index])
271 continue;
272
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100273 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
274 index, &clk);
275 if (ret) {
276 debug("%s: could not get assigned clock %d for %s\n",
277 __func__, index, dev_read_name(dev));
278 continue;
279 }
280
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200281 /* This is clk provider device trying to program itself
282 * It cannot be done right now but need to wait after the
283 * device is probed
284 */
285 if (stage == 0 && clk.dev == dev)
286 continue;
287
288 if (stage > 0 && clk.dev != dev)
289 /* do not setup twice the parent clocks */
290 continue;
291
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100292 ret = clk_set_rate(&clk, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200293
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100294 if (ret < 0) {
Simon Glass68316162019-01-21 14:53:19 -0700295 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
296 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100297 break;
298 }
299 }
300
301fail:
302 free(rates);
303 return ret;
304}
305
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200306int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100307{
308 int ret;
309
Peng Fan91944ef2019-07-31 07:01:49 +0000310 if (!dev_of_valid(dev))
311 return 0;
312
Philipp Tomsich291da962018-11-26 20:20:19 +0100313 /* If this not in SPL and pre-reloc state, don't take any action. */
314 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
315 return 0;
316
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100317 debug("%s(%s)\n", __func__, dev_read_name(dev));
318
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200319 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100320 if (ret)
321 return ret;
322
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200323 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100324 if (ret < 0)
325 return ret;
326
327 return 0;
328}
Michal Simek9e0758b2016-07-14 13:11:37 +0200329# endif /* OF_PLATDATA */
Stephen Warren135aa952016-06-17 09:44:00 -0600330
331int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
332{
333 int index;
334
335 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200336 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600337
Simon Glassaa9bb092017-05-30 21:47:29 -0600338 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600339 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600340 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600341 return index;
342 }
343
344 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700345}
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200346
Chunfeng Yund6464202020-01-09 11:35:07 +0800347int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
348{
349 int index;
350
351 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
352 ofnode_get_name(node), name, clk);
353 clk->dev = NULL;
354
355 index = ofnode_stringlist_search(node, "clock-names", name);
356 if (index < 0) {
357 debug("fdt_stringlist_search() failed: %d\n", index);
358 return index;
359 }
360
361 return clk_get_by_index_nodev(node, index, clk);
362}
363
364int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
365{
366 int ret;
367
368 ret = clk_get_by_name_nodev(node, name, clk);
369 if (ret == -ENODATA)
370 return 0;
371
372 return ret;
373}
374
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200375int clk_release_all(struct clk *clk, int count)
376{
377 int i, ret;
378
379 for (i = 0; i < count; i++) {
380 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
381
382 /* check if clock has been previously requested */
383 if (!clk[i].dev)
384 continue;
385
386 ret = clk_disable(&clk[i]);
387 if (ret && ret != -ENOSYS)
388 return ret;
389
390 ret = clk_free(&clk[i]);
391 if (ret && ret != -ENOSYS)
392 return ret;
393 }
394
395 return 0;
396}
397
Simon Glass7423daa2016-07-04 11:58:03 -0600398#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600399
400int clk_request(struct udevice *dev, struct clk *clk)
401{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200402 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600403
404 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200405 if (!clk)
406 return 0;
407 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600408
409 clk->dev = dev;
410
411 if (!ops->request)
412 return 0;
413
414 return ops->request(clk);
415}
416
417int clk_free(struct clk *clk)
418{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200419 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600420
421 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800422 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200423 return 0;
424 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600425
426 if (!ops->free)
427 return 0;
428
429 return ops->free(clk);
430}
431
432ulong clk_get_rate(struct clk *clk)
433{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200434 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600435
436 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800437 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200438 return 0;
439 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600440
441 if (!ops->get_rate)
442 return -ENOSYS;
443
444 return ops->get_rate(clk);
445}
446
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200447struct clk *clk_get_parent(struct clk *clk)
448{
449 struct udevice *pdev;
450 struct clk *pclk;
451
452 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800453 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200454 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200455
456 pdev = dev_get_parent(clk->dev);
457 pclk = dev_get_clk_ptr(pdev);
458 if (!pclk)
459 return ERR_PTR(-ENODEV);
460
461 return pclk;
462}
463
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200464long long clk_get_parent_rate(struct clk *clk)
465{
466 const struct clk_ops *ops;
467 struct clk *pclk;
468
469 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800470 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200471 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200472
473 pclk = clk_get_parent(clk);
474 if (IS_ERR(pclk))
475 return -ENODEV;
476
477 ops = clk_dev_ops(pclk->dev);
478 if (!ops->get_rate)
479 return -ENOSYS;
480
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200481 /* Read the 'rate' if not already set or if proper flag set*/
482 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200483 pclk->rate = clk_get_rate(pclk);
484
485 return pclk->rate;
486}
487
Stephen Warren135aa952016-06-17 09:44:00 -0600488ulong clk_set_rate(struct clk *clk, ulong rate)
489{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200490 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600491
492 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800493 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200494 return 0;
495 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600496
497 if (!ops->set_rate)
498 return -ENOSYS;
499
500 return ops->set_rate(clk, rate);
501}
502
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100503int clk_set_parent(struct clk *clk, struct clk *parent)
504{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200505 const struct clk_ops *ops;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100506
507 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800508 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200509 return 0;
510 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100511
512 if (!ops->set_parent)
513 return -ENOSYS;
514
515 return ops->set_parent(clk, parent);
516}
517
Stephen Warren135aa952016-06-17 09:44:00 -0600518int clk_enable(struct clk *clk)
519{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200520 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000521 struct clk *clkp = NULL;
522 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600523
524 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800525 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200526 return 0;
527 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600528
Peng Fan0520be02019-08-21 13:35:09 +0000529 if (CONFIG_IS_ENABLED(CLK_CCF)) {
530 /* Take id 0 as a non-valid clk, such as dummy */
531 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
532 if (clkp->enable_count) {
533 clkp->enable_count++;
534 return 0;
535 }
536 if (clkp->dev->parent &&
537 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
538 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
539 if (ret) {
540 printf("Enable %s failed\n",
541 clkp->dev->parent->name);
542 return ret;
543 }
544 }
545 }
Stephen Warren135aa952016-06-17 09:44:00 -0600546
Peng Fan0520be02019-08-21 13:35:09 +0000547 if (ops->enable) {
548 ret = ops->enable(clk);
549 if (ret) {
550 printf("Enable %s failed\n", clk->dev->name);
551 return ret;
552 }
553 }
554 if (clkp)
555 clkp->enable_count++;
556 } else {
557 if (!ops->enable)
558 return -ENOSYS;
559 return ops->enable(clk);
560 }
561
562 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600563}
564
Neil Armstronga855be82018-04-03 11:44:18 +0200565int clk_enable_bulk(struct clk_bulk *bulk)
566{
567 int i, ret;
568
569 for (i = 0; i < bulk->count; i++) {
570 ret = clk_enable(&bulk->clks[i]);
571 if (ret < 0 && ret != -ENOSYS)
572 return ret;
573 }
574
575 return 0;
576}
577
Stephen Warren135aa952016-06-17 09:44:00 -0600578int clk_disable(struct clk *clk)
579{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200580 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000581 struct clk *clkp = NULL;
582 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600583
584 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800585 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200586 return 0;
587 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600588
Peng Fan0520be02019-08-21 13:35:09 +0000589 if (CONFIG_IS_ENABLED(CLK_CCF)) {
590 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
591 if (clkp->enable_count == 0) {
592 printf("clk %s already disabled\n",
593 clkp->dev->name);
594 return 0;
595 }
Stephen Warren135aa952016-06-17 09:44:00 -0600596
Peng Fan0520be02019-08-21 13:35:09 +0000597 if (--clkp->enable_count > 0)
598 return 0;
599 }
600
601 if (ops->disable) {
602 ret = ops->disable(clk);
603 if (ret)
604 return ret;
605 }
606
607 if (clkp && clkp->dev->parent &&
608 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
609 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
610 if (ret) {
611 printf("Disable %s failed\n",
612 clkp->dev->parent->name);
613 return ret;
614 }
615 }
616 } else {
617 if (!ops->disable)
618 return -ENOSYS;
619
620 return ops->disable(clk);
621 }
622
623 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600624}
Simon Glasse70cc432016-01-20 19:43:02 -0700625
Neil Armstronga855be82018-04-03 11:44:18 +0200626int clk_disable_bulk(struct clk_bulk *bulk)
627{
628 int i, ret;
629
630 for (i = 0; i < bulk->count; i++) {
631 ret = clk_disable(&bulk->clks[i]);
632 if (ret < 0 && ret != -ENOSYS)
633 return ret;
634 }
635
636 return 0;
637}
638
Lukasz Majewski2796af72019-06-24 15:50:44 +0200639int clk_get_by_id(ulong id, struct clk **clkp)
640{
641 struct udevice *dev;
642 struct uclass *uc;
643 int ret;
644
645 ret = uclass_get(UCLASS_CLK, &uc);
646 if (ret)
647 return ret;
648
649 uclass_foreach_dev(dev, uc) {
650 struct clk *clk = dev_get_clk_ptr(dev);
651
652 if (clk && clk->id == id) {
653 *clkp = clk;
654 return 0;
655 }
656 }
657
658 return -ENOENT;
659}
660
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530661bool clk_is_match(const struct clk *p, const struct clk *q)
662{
663 /* trivial case: identical struct clk's or both NULL */
664 if (p == q)
665 return true;
666
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200667 /* trivial case #2: on the clk pointer is NULL */
668 if (!p || !q)
669 return false;
670
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530671 /* same device, id and data */
672 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
673 return true;
674
675 return false;
676}
677
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200678static void devm_clk_release(struct udevice *dev, void *res)
679{
680 clk_free(res);
681}
682
683static int devm_clk_match(struct udevice *dev, void *res, void *data)
684{
685 return res == data;
686}
687
688struct clk *devm_clk_get(struct udevice *dev, const char *id)
689{
690 int rc;
691 struct clk *clk;
692
693 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
694 if (unlikely(!clk))
695 return ERR_PTR(-ENOMEM);
696
697 rc = clk_get_by_name(dev, id, clk);
698 if (rc)
699 return ERR_PTR(rc);
700
701 devres_add(dev, clk);
702 return clk;
703}
704
705struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
706{
707 struct clk *clk = devm_clk_get(dev, id);
708
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800709 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200710 return NULL;
711
712 return clk;
713}
714
715void devm_clk_put(struct udevice *dev, struct clk *clk)
716{
717 int rc;
718
719 if (!clk)
720 return;
721
722 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
723 WARN_ON(rc);
724}
725
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200726int clk_uclass_post_probe(struct udevice *dev)
727{
728 /*
729 * when a clock provider is probed. Call clk_set_defaults()
730 * also after the device is probed. This takes care of cases
731 * where the DT is used to setup default parents and rates
732 * using assigned-clocks
733 */
734 clk_set_defaults(dev, 1);
735
736 return 0;
737}
738
Simon Glassf26c8a82015-06-23 15:39:15 -0600739UCLASS_DRIVER(clk) = {
740 .id = UCLASS_CLK,
741 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200742 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600743};