blob: 70df9d410f4c2e943968737bf24cad7340da815b [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassf26c8a82015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warren135aa952016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsichf4fcba52018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glassf26c8a82015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warren135aa952016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass7423daa2016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060014#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <malloc.h>
Simon Glass61b29b82020-02-03 07:36:15 -070017#include <dm/devres.h>
18#include <dm/read.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060019#include <linux/bug.h>
Lukasz Majewski0c660c22019-06-24 15:50:42 +020020#include <linux/clk-provider.h>
Simon Glass61b29b82020-02-03 07:36:15 -070021#include <linux/err.h>
Simon Glassf26c8a82015-06-23 15:39:15 -060022
Mario Six268453b2018-01-15 11:06:51 +010023static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glassf26c8a82015-06-23 15:39:15 -060024{
Mario Six268453b2018-01-15 11:06:51 +010025 return (const struct clk_ops *)dev->driver->ops;
Simon Glassf26c8a82015-06-23 15:39:15 -060026}
27
Simon Glasse70cc432016-01-20 19:43:02 -070028#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass7423daa2016-07-04 11:58:03 -060029# if CONFIG_IS_ENABLED(OF_PLATDATA)
30int clk_get_by_index_platdata(struct udevice *dev, int index,
Simon Glass0d154632017-08-29 14:15:56 -060031 struct phandle_1_arg *cells, struct clk *clk)
Simon Glass7423daa2016-07-04 11:58:03 -060032{
33 int ret;
34
35 if (index != 0)
36 return -ENOSYS;
37 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
38 if (ret)
39 return ret;
Simon Glassbc796172017-08-29 14:15:58 -060040 clk->id = cells[0].arg[0];
Simon Glass7423daa2016-07-04 11:58:03 -060041
42 return 0;
43}
44# else
Stephen Warren135aa952016-06-17 09:44:00 -060045static int clk_of_xlate_default(struct clk *clk,
Simon Glassa4e0ef52017-05-18 20:09:40 -060046 struct ofnode_phandle_args *args)
Stephen Warren135aa952016-06-17 09:44:00 -060047{
48 debug("%s(clk=%p)\n", __func__, clk);
49
50 if (args->args_count > 1) {
51 debug("Invaild args_count: %d\n", args->args_count);
52 return -EINVAL;
53 }
54
55 if (args->args_count)
56 clk->id = args->args[0];
57 else
58 clk->id = 0;
59
Sekhar Norie497fab2019-07-11 14:30:24 +053060 clk->data = 0;
61
Stephen Warren135aa952016-06-17 09:44:00 -060062 return 0;
63}
64
Jagan Teki75f98312019-02-28 00:26:52 +053065static int clk_get_by_index_tail(int ret, ofnode node,
66 struct ofnode_phandle_args *args,
67 const char *list_name, int index,
68 struct clk *clk)
69{
70 struct udevice *dev_clk;
71 const struct clk_ops *ops;
72
73 assert(clk);
74 clk->dev = NULL;
75 if (ret)
76 goto err;
77
78 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
79 if (ret) {
80 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
81 __func__, ret);
82 return ret;
83 }
84
85 clk->dev = dev_clk;
86
87 ops = clk_dev_ops(dev_clk);
88
89 if (ops->of_xlate)
90 ret = ops->of_xlate(clk, args);
91 else
92 ret = clk_of_xlate_default(clk, args);
93 if (ret) {
94 debug("of_xlate() failed: %d\n", ret);
95 return ret;
96 }
97
98 return clk_request(dev_clk, clk);
99err:
100 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
101 __func__, ofnode_get_name(node), list_name, index, ret);
102 return ret;
103}
104
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100105static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
106 int index, struct clk *clk)
Stephen Warren135aa952016-06-17 09:44:00 -0600107{
108 int ret;
Simon Glassaa9bb092017-05-30 21:47:29 -0600109 struct ofnode_phandle_args args;
Stephen Warren135aa952016-06-17 09:44:00 -0600110
111 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
112
113 assert(clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200114 clk->dev = NULL;
115
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100116 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six268453b2018-01-15 11:06:51 +0100117 index, &args);
Simon Glasse70cc432016-01-20 19:43:02 -0700118 if (ret) {
119 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
120 __func__, ret);
121 return ret;
122 }
123
Wenyou Yang3f56b132016-09-27 11:00:28 +0800124
Jagan Tekidcb63fc2019-02-28 00:26:53 +0530125 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400126 index, clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600127}
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100128
129int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
130{
Jagan Teki75f98312019-02-28 00:26:52 +0530131 struct ofnode_phandle_args args;
132 int ret;
133
134 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
135 index, &args);
136
137 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400138 index, clk);
Jagan Teki75f98312019-02-28 00:26:52 +0530139}
140
141int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
142{
143 struct ofnode_phandle_args args;
144 int ret;
145
146 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Anderson675d7902020-06-24 06:41:08 -0400147 index, &args);
Jagan Teki75f98312019-02-28 00:26:52 +0530148
149 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Anderson675d7902020-06-24 06:41:08 -0400150 index, clk);
Philipp Tomsich95f9a7e2018-01-08 11:18:18 +0100151}
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100152
Neil Armstronga855be82018-04-03 11:44:18 +0200153int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
154{
155 int i, ret, err, count;
156
157 bulk->count = 0;
158
159 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
Neil Armstrong721881c2018-04-17 11:30:31 +0200160 if (count < 1)
161 return count;
Neil Armstronga855be82018-04-03 11:44:18 +0200162
163 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
164 if (!bulk->clks)
165 return -ENOMEM;
166
167 for (i = 0; i < count; i++) {
168 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
169 if (ret < 0)
170 goto bulk_get_err;
171
172 ++bulk->count;
173 }
174
175 return 0;
176
177bulk_get_err:
178 err = clk_release_all(bulk->clks, bulk->count);
179 if (err)
180 debug("%s: could release all clocks for %p\n",
181 __func__, dev);
182
183 return ret;
184}
185
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200186static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100187{
188 struct clk clk, parent_clk;
189 int index;
190 int num_parents;
191 int ret;
192
193 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
194 "#clock-cells");
195 if (num_parents < 0) {
196 debug("%s: could not read assigned-clock-parents for %p\n",
197 __func__, dev);
198 return 0;
199 }
200
201 for (index = 0; index < num_parents; index++) {
202 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
203 index, &parent_clk);
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200204 /* If -ENOENT, this is a no-op entry */
205 if (ret == -ENOENT)
206 continue;
207
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100208 if (ret) {
209 debug("%s: could not get parent clock %d for %s\n",
210 __func__, index, dev_read_name(dev));
211 return ret;
212 }
213
214 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
215 index, &clk);
216 if (ret) {
217 debug("%s: could not get assigned clock %d for %s\n",
218 __func__, index, dev_read_name(dev));
219 return ret;
220 }
221
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200222 /* This is clk provider device trying to reparent itself
223 * It cannot be done right now but need to wait after the
224 * device is probed
225 */
226 if (stage == 0 && clk.dev == dev)
227 continue;
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100228
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200229 if (stage > 0 && clk.dev != dev)
230 /* do not setup twice the parent clocks */
231 continue;
232
233 ret = clk_set_parent(&clk, &parent_clk);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100234 /*
235 * Not all drivers may support clock-reparenting (as of now).
236 * Ignore errors due to this.
237 */
238 if (ret == -ENOSYS)
239 continue;
240
Jean-Jacques Hiblot02e2a2a2019-09-26 15:42:42 +0200241 if (ret < 0) {
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100242 debug("%s: failed to reparent clock %d for %s\n",
243 __func__, index, dev_read_name(dev));
244 return ret;
245 }
246 }
247
248 return 0;
249}
250
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200251static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100252{
253 struct clk clk;
254 int index;
255 int num_rates;
256 int size;
257 int ret = 0;
258 u32 *rates = NULL;
259
260 size = dev_read_size(dev, "assigned-clock-rates");
261 if (size < 0)
262 return 0;
263
264 num_rates = size / sizeof(u32);
265 rates = calloc(num_rates, sizeof(u32));
266 if (!rates)
267 return -ENOMEM;
268
269 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
270 if (ret)
271 goto fail;
272
273 for (index = 0; index < num_rates; index++) {
Neil Armstrongd64caaf2018-07-26 15:19:32 +0200274 /* If 0 is passed, this is a no-op */
275 if (!rates[index])
276 continue;
277
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100278 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
279 index, &clk);
280 if (ret) {
281 debug("%s: could not get assigned clock %d for %s\n",
282 __func__, index, dev_read_name(dev));
283 continue;
284 }
285
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200286 /* This is clk provider device trying to program itself
287 * It cannot be done right now but need to wait after the
288 * device is probed
289 */
290 if (stage == 0 && clk.dev == dev)
291 continue;
292
293 if (stage > 0 && clk.dev != dev)
294 /* do not setup twice the parent clocks */
295 continue;
296
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100297 ret = clk_set_rate(&clk, rates[index]);
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200298
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100299 if (ret < 0) {
Simon Glass68316162019-01-21 14:53:19 -0700300 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
301 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100302 break;
303 }
304 }
305
306fail:
307 free(rates);
308 return ret;
309}
310
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200311int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100312{
313 int ret;
314
Peng Fan91944ef2019-07-31 07:01:49 +0000315 if (!dev_of_valid(dev))
316 return 0;
317
Philipp Tomsich291da962018-11-26 20:20:19 +0100318 /* If this not in SPL and pre-reloc state, don't take any action. */
319 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
320 return 0;
321
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100322 debug("%s(%s)\n", __func__, dev_read_name(dev));
323
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200324 ret = clk_set_default_parents(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100325 if (ret)
326 return ret;
327
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200328 ret = clk_set_default_rates(dev, stage);
Philipp Tomsichf4fcba52018-01-08 13:59:18 +0100329 if (ret < 0)
330 return ret;
331
332 return 0;
333}
Stephen Warren135aa952016-06-17 09:44:00 -0600334
335int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
336{
337 int index;
338
339 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard82a8a662017-07-18 11:57:07 +0200340 clk->dev = NULL;
Stephen Warren135aa952016-06-17 09:44:00 -0600341
Simon Glassaa9bb092017-05-30 21:47:29 -0600342 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warren135aa952016-06-17 09:44:00 -0600343 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600344 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren135aa952016-06-17 09:44:00 -0600345 return index;
346 }
347
348 return clk_get_by_index(dev, index, clk);
Simon Glasse70cc432016-01-20 19:43:02 -0700349}
Giulio Benettiefbdad32019-12-12 23:53:19 +0100350# endif /* OF_PLATDATA */
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200351
Chunfeng Yund6464202020-01-09 11:35:07 +0800352int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
353{
354 int index;
355
356 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
357 ofnode_get_name(node), name, clk);
358 clk->dev = NULL;
359
360 index = ofnode_stringlist_search(node, "clock-names", name);
361 if (index < 0) {
362 debug("fdt_stringlist_search() failed: %d\n", index);
363 return index;
364 }
365
366 return clk_get_by_index_nodev(node, index, clk);
367}
368
369int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
370{
371 int ret;
372
373 ret = clk_get_by_name_nodev(node, name, clk);
374 if (ret == -ENODATA)
375 return 0;
376
377 return ret;
378}
379
Patrice Chotardb108d8a2017-07-25 13:24:45 +0200380int clk_release_all(struct clk *clk, int count)
381{
382 int i, ret;
383
384 for (i = 0; i < count; i++) {
385 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
386
387 /* check if clock has been previously requested */
388 if (!clk[i].dev)
389 continue;
390
391 ret = clk_disable(&clk[i]);
392 if (ret && ret != -ENOSYS)
393 return ret;
394
395 ret = clk_free(&clk[i]);
396 if (ret && ret != -ENOSYS)
397 return ret;
398 }
399
400 return 0;
401}
402
Simon Glass7423daa2016-07-04 11:58:03 -0600403#endif /* OF_CONTROL */
Stephen Warren135aa952016-06-17 09:44:00 -0600404
405int clk_request(struct udevice *dev, struct clk *clk)
406{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200407 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600408
409 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200410 if (!clk)
411 return 0;
412 ops = clk_dev_ops(dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600413
414 clk->dev = dev;
415
416 if (!ops->request)
417 return 0;
418
419 return ops->request(clk);
420}
421
422int clk_free(struct clk *clk)
423{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200424 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600425
426 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800427 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200428 return 0;
429 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600430
Simon Glassfb8c0d52020-02-03 07:35:54 -0700431 if (!ops->rfree)
Stephen Warren135aa952016-06-17 09:44:00 -0600432 return 0;
433
Simon Glassfb8c0d52020-02-03 07:35:54 -0700434 return ops->rfree(clk);
Stephen Warren135aa952016-06-17 09:44:00 -0600435}
436
437ulong clk_get_rate(struct clk *clk)
438{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200439 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600440
441 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800442 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200443 return 0;
444 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600445
446 if (!ops->get_rate)
447 return -ENOSYS;
448
449 return ops->get_rate(clk);
450}
451
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200452struct clk *clk_get_parent(struct clk *clk)
453{
454 struct udevice *pdev;
455 struct clk *pclk;
456
457 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800458 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200459 return NULL;
Lukasz Majewski0c660c22019-06-24 15:50:42 +0200460
461 pdev = dev_get_parent(clk->dev);
462 pclk = dev_get_clk_ptr(pdev);
463 if (!pclk)
464 return ERR_PTR(-ENODEV);
465
466 return pclk;
467}
468
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200469long long clk_get_parent_rate(struct clk *clk)
470{
471 const struct clk_ops *ops;
472 struct clk *pclk;
473
474 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800475 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200476 return 0;
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200477
478 pclk = clk_get_parent(clk);
479 if (IS_ERR(pclk))
480 return -ENODEV;
481
482 ops = clk_dev_ops(pclk->dev);
483 if (!ops->get_rate)
484 return -ENOSYS;
485
Lukasz Majewski1a961c92019-06-24 15:50:46 +0200486 /* Read the 'rate' if not already set or if proper flag set*/
487 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski4aa78302019-06-24 15:50:43 +0200488 pclk->rate = clk_get_rate(pclk);
489
490 return pclk->rate;
491}
492
Stephen Warren135aa952016-06-17 09:44:00 -0600493ulong clk_set_rate(struct clk *clk, ulong rate)
494{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200495 const struct clk_ops *ops;
Stephen Warren135aa952016-06-17 09:44:00 -0600496
497 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800498 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200499 return 0;
500 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600501
502 if (!ops->set_rate)
503 return -ENOSYS;
504
505 return ops->set_rate(clk, rate);
506}
507
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100508int clk_set_parent(struct clk *clk, struct clk *parent)
509{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200510 const struct clk_ops *ops;
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100511
512 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800513 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200514 return 0;
515 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf7d10462018-01-08 11:15:08 +0100516
517 if (!ops->set_parent)
518 return -ENOSYS;
519
520 return ops->set_parent(clk, parent);
521}
522
Stephen Warren135aa952016-06-17 09:44:00 -0600523int clk_enable(struct clk *clk)
524{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200525 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000526 struct clk *clkp = NULL;
527 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600528
529 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800530 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200531 return 0;
532 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600533
Peng Fan0520be02019-08-21 13:35:09 +0000534 if (CONFIG_IS_ENABLED(CLK_CCF)) {
535 /* Take id 0 as a non-valid clk, such as dummy */
536 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
537 if (clkp->enable_count) {
538 clkp->enable_count++;
539 return 0;
540 }
541 if (clkp->dev->parent &&
542 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
543 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
544 if (ret) {
545 printf("Enable %s failed\n",
546 clkp->dev->parent->name);
547 return ret;
548 }
549 }
550 }
Stephen Warren135aa952016-06-17 09:44:00 -0600551
Peng Fan0520be02019-08-21 13:35:09 +0000552 if (ops->enable) {
553 ret = ops->enable(clk);
554 if (ret) {
555 printf("Enable %s failed\n", clk->dev->name);
556 return ret;
557 }
558 }
559 if (clkp)
560 clkp->enable_count++;
561 } else {
562 if (!ops->enable)
563 return -ENOSYS;
564 return ops->enable(clk);
565 }
566
567 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600568}
569
Neil Armstronga855be82018-04-03 11:44:18 +0200570int clk_enable_bulk(struct clk_bulk *bulk)
571{
572 int i, ret;
573
574 for (i = 0; i < bulk->count; i++) {
575 ret = clk_enable(&bulk->clks[i]);
576 if (ret < 0 && ret != -ENOSYS)
577 return ret;
578 }
579
580 return 0;
581}
582
Stephen Warren135aa952016-06-17 09:44:00 -0600583int clk_disable(struct clk *clk)
584{
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200585 const struct clk_ops *ops;
Peng Fan0520be02019-08-21 13:35:09 +0000586 struct clk *clkp = NULL;
587 int ret;
Stephen Warren135aa952016-06-17 09:44:00 -0600588
589 debug("%s(clk=%p)\n", __func__, clk);
Chunfeng Yunbd7c7982020-01-09 11:35:06 +0800590 if (!clk_valid(clk))
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200591 return 0;
592 ops = clk_dev_ops(clk->dev);
Stephen Warren135aa952016-06-17 09:44:00 -0600593
Peng Fan0520be02019-08-21 13:35:09 +0000594 if (CONFIG_IS_ENABLED(CLK_CCF)) {
595 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
596 if (clkp->enable_count == 0) {
597 printf("clk %s already disabled\n",
598 clkp->dev->name);
599 return 0;
600 }
Stephen Warren135aa952016-06-17 09:44:00 -0600601
Peng Fan0520be02019-08-21 13:35:09 +0000602 if (--clkp->enable_count > 0)
603 return 0;
604 }
605
606 if (ops->disable) {
607 ret = ops->disable(clk);
608 if (ret)
609 return ret;
610 }
611
612 if (clkp && clkp->dev->parent &&
613 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
614 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
615 if (ret) {
616 printf("Disable %s failed\n",
617 clkp->dev->parent->name);
618 return ret;
619 }
620 }
621 } else {
622 if (!ops->disable)
623 return -ENOSYS;
624
625 return ops->disable(clk);
626 }
627
628 return 0;
Stephen Warren135aa952016-06-17 09:44:00 -0600629}
Simon Glasse70cc432016-01-20 19:43:02 -0700630
Neil Armstronga855be82018-04-03 11:44:18 +0200631int clk_disable_bulk(struct clk_bulk *bulk)
632{
633 int i, ret;
634
635 for (i = 0; i < bulk->count; i++) {
636 ret = clk_disable(&bulk->clks[i]);
637 if (ret < 0 && ret != -ENOSYS)
638 return ret;
639 }
640
641 return 0;
642}
643
Lukasz Majewski2796af72019-06-24 15:50:44 +0200644int clk_get_by_id(ulong id, struct clk **clkp)
645{
646 struct udevice *dev;
647 struct uclass *uc;
648 int ret;
649
650 ret = uclass_get(UCLASS_CLK, &uc);
651 if (ret)
652 return ret;
653
654 uclass_foreach_dev(dev, uc) {
655 struct clk *clk = dev_get_clk_ptr(dev);
656
657 if (clk && clk->id == id) {
658 *clkp = clk;
659 return 0;
660 }
661 }
662
663 return -ENOENT;
664}
665
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530666bool clk_is_match(const struct clk *p, const struct clk *q)
667{
668 /* trivial case: identical struct clk's or both NULL */
669 if (p == q)
670 return true;
671
Jean-Jacques Hiblot8a1661f2019-10-22 14:00:03 +0200672 /* trivial case #2: on the clk pointer is NULL */
673 if (!p || !q)
674 return false;
675
Sekhar Noriacbb7cd2019-08-01 19:12:55 +0530676 /* same device, id and data */
677 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
678 return true;
679
680 return false;
681}
682
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200683static void devm_clk_release(struct udevice *dev, void *res)
684{
685 clk_free(res);
686}
687
688static int devm_clk_match(struct udevice *dev, void *res, void *data)
689{
690 return res == data;
691}
692
693struct clk *devm_clk_get(struct udevice *dev, const char *id)
694{
695 int rc;
696 struct clk *clk;
697
698 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
699 if (unlikely(!clk))
700 return ERR_PTR(-ENOMEM);
701
702 rc = clk_get_by_name(dev, id, clk);
703 if (rc)
704 return ERR_PTR(rc);
705
706 devres_add(dev, clk);
707 return clk;
708}
709
710struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
711{
712 struct clk *clk = devm_clk_get(dev, id);
713
Chunfeng Yun0f9b2b32020-01-09 11:35:05 +0800714 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot52720c52019-10-22 14:00:04 +0200715 return NULL;
716
717 return clk;
718}
719
720void devm_clk_put(struct udevice *dev, struct clk *clk)
721{
722 int rc;
723
724 if (!clk)
725 return;
726
727 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
728 WARN_ON(rc);
729}
730
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200731int clk_uclass_post_probe(struct udevice *dev)
732{
733 /*
734 * when a clock provider is probed. Call clk_set_defaults()
735 * also after the device is probed. This takes care of cases
736 * where the DT is used to setup default parents and rates
737 * using assigned-clocks
738 */
739 clk_set_defaults(dev, 1);
740
741 return 0;
742}
743
Simon Glassf26c8a82015-06-23 15:39:15 -0600744UCLASS_DRIVER(clk) = {
745 .id = UCLASS_CLK,
746 .name = "clk",
Jean-Jacques Hiblotfd1ba292019-10-22 14:00:06 +0200747 .post_probe = clk_uclass_post_probe,
Simon Glassf26c8a82015-06-23 15:39:15 -0600748};