blob: 0dd65934b361b243e72c61e88afa2d36d98d6e82 [file] [log] [blame]
Tero Kristob4a72a92021-06-11 11:45:14 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 clock driver
4 *
Suman Annacfd50df2021-09-07 17:16:58 -05005 * Copyright (C) 2020-2021 Texas Instruments Incorporated - http://www.ti.com/
Tero Kristob4a72a92021-06-11 11:45:14 +03006 * Tero Kristo <t-kristo@ti.com>
7 */
8
9#include <common.h>
10#include <dm.h>
11#include <errno.h>
12#include <soc.h>
13#include <clk-uclass.h>
14#include "k3-clk.h"
15
16#define PLL_MIN_FREQ 800000000
17#define PLL_MAX_FREQ 3200000000UL
18#define PLL_MAX_DIV 127
19
20/**
21 * struct clk_map - mapping from dev/clk id tuples towards physical clocks
22 * @dev_id: device ID for the clock
23 * @clk_id: clock ID for the clock
24 * @clk: pointer to the registered clock entry for the mapping
25 */
26struct clk_map {
27 u16 dev_id;
28 u32 clk_id;
29 struct clk *clk;
30};
31
32/**
33 * struct ti_clk_data - clock controller information structure
34 * @map: mapping from dev/clk id tuples to physical clock entries
35 * @size: number of entries in the map
36 */
37struct ti_clk_data {
38 struct clk_map *map;
39 int size;
40};
41
42static ulong osc_freq;
43
44static void clk_add_map(struct ti_clk_data *data, struct clk *clk,
45 u32 dev_id, u32 clk_id)
46{
47 struct clk_map *map;
48
49 debug("%s: added clk=%p, data=%p, dev=%d, clk=%d\n", __func__,
50 clk, data, dev_id, clk_id);
51 if (!clk)
52 return;
53
54 map = data->map + data->size++;
55
56 map->dev_id = dev_id;
57 map->clk_id = clk_id;
58 map->clk = clk;
59}
60
61static const struct soc_attr ti_k3_soc_clk_data[] = {
62#if IS_ENABLED(CONFIG_SOC_K3_J721E)
63 {
64 .family = "J721E",
65 .data = &j721e_clk_platdata,
66 },
67 {
68 .family = "J7200",
69 .data = &j7200_clk_platdata,
70 },
David Huang55bdc202022-01-25 20:56:33 +053071#elif CONFIG_SOC_K3_J721S2
72 {
73 .family = "J721S2",
74 .data = &j721s2_clk_platdata,
75 },
Tero Kristob4a72a92021-06-11 11:45:14 +030076#endif
Suman Anna4b8903a2022-05-25 13:38:43 +053077#ifdef CONFIG_SOC_K3_AM625
78 {
79 .family = "AM62X",
80 .data = &am62x_clk_platdata,
81 },
82#endif
Tero Kristob4a72a92021-06-11 11:45:14 +030083 { /* sentinel */ }
84};
85
86static int ti_clk_probe(struct udevice *dev)
87{
88 struct ti_clk_data *data = dev_get_priv(dev);
89 struct clk *clk;
90 const char *name;
91 const struct clk_data *ti_clk_data;
92 int i, j;
93 const struct soc_attr *soc_match_data;
94 const struct ti_k3_clk_platdata *pdata;
95
96 debug("%s(dev=%p)\n", __func__, dev);
97
98 soc_match_data = soc_device_match(ti_k3_soc_clk_data);
99 if (!soc_match_data)
100 return -ENODEV;
101
102 pdata = (const struct ti_k3_clk_platdata *)soc_match_data->data;
103
104 data->map = kcalloc(pdata->soc_dev_clk_data_cnt, sizeof(*data->map),
105 GFP_KERNEL);
106 data->size = 0;
107
108 for (i = 0; i < pdata->clk_list_cnt; i++) {
109 ti_clk_data = &pdata->clk_list[i];
110
111 switch (ti_clk_data->type) {
112 case CLK_TYPE_FIXED_RATE:
113 name = ti_clk_data->clk.fixed_rate.name;
114 clk = clk_register_fixed_rate(NULL,
115 name,
116 ti_clk_data->clk.fixed_rate.rate);
117 break;
118 case CLK_TYPE_DIV:
119 name = ti_clk_data->clk.div.name;
120 clk = clk_register_divider(NULL, name,
121 ti_clk_data->clk.div.parent,
122 ti_clk_data->clk.div.flags,
123 map_physmem(ti_clk_data->clk.div.reg, 0, MAP_NOCACHE),
124 ti_clk_data->clk.div.shift,
125 ti_clk_data->clk.div.width,
Suman Annacfd50df2021-09-07 17:16:58 -0500126 ti_clk_data->clk.div.div_flags);
Tero Kristob4a72a92021-06-11 11:45:14 +0300127 break;
128 case CLK_TYPE_MUX:
129 name = ti_clk_data->clk.mux.name;
130 clk = clk_register_mux(NULL, name,
131 ti_clk_data->clk.mux.parents,
132 ti_clk_data->clk.mux.num_parents,
133 ti_clk_data->clk.mux.flags,
134 map_physmem(ti_clk_data->clk.mux.reg, 0, MAP_NOCACHE),
135 ti_clk_data->clk.mux.shift,
136 ti_clk_data->clk.mux.width,
137 0);
138 break;
139 case CLK_TYPE_PLL:
140 name = ti_clk_data->clk.pll.name;
141 clk = clk_register_ti_pll(name,
142 ti_clk_data->clk.pll.parent,
143 map_physmem(ti_clk_data->clk.pll.reg, 0, MAP_NOCACHE));
144
145 if (!osc_freq)
146 osc_freq = clk_get_rate(clk_get_parent(clk));
147 break;
148 default:
149 name = NULL;
150 clk = NULL;
151 printf("WARNING: %s has encountered unknown clk type %d\n",
152 __func__, ti_clk_data->type);
153 }
154
155 if (clk && ti_clk_data->default_freq)
156 clk_set_rate(clk, ti_clk_data->default_freq);
157
158 if (clk && name) {
159 for (j = 0; j < pdata->soc_dev_clk_data_cnt; j++) {
160 if (!strcmp(name, pdata->soc_dev_clk_data[j].clk_name)) {
161 clk_add_map(data, clk, pdata->soc_dev_clk_data[j].dev_id,
162 pdata->soc_dev_clk_data[j].clk_id);
163 }
164 }
165 }
166 }
167
168 return 0;
169}
170
171static int _clk_cmp(u32 dev_id, u32 clk_id, const struct clk_map *map)
172{
173 if (map->dev_id == dev_id && map->clk_id == clk_id)
174 return 0;
175 if (map->dev_id > dev_id ||
176 (map->dev_id == dev_id && map->clk_id > clk_id))
177 return -1;
178 return 1;
179}
180
181static int bsearch(u32 dev_id, u32 clk_id, struct clk_map *map, int num)
182{
183 int result;
184 int idx;
185
186 for (idx = 0; idx < num; idx++) {
187 result = _clk_cmp(dev_id, clk_id, &map[idx]);
188
189 if (result == 0)
190 return idx;
191 }
192
193 return -ENOENT;
194}
195
196static int ti_clk_of_xlate(struct clk *clk,
197 struct ofnode_phandle_args *args)
198{
199 struct ti_clk_data *data = dev_get_priv(clk->dev);
200 int idx;
201
202 debug("%s(clk=%p, args_count=%d [0]=%d [1]=%d)\n", __func__, clk,
203 args->args_count, args->args[0], args->args[1]);
204
205 if (args->args_count != 2) {
206 debug("Invalid args_count: %d\n", args->args_count);
207 return -EINVAL;
208 }
209
210 if (!data->size)
211 return -EPROBE_DEFER;
212
213 idx = bsearch(args->args[0], args->args[1], data->map, data->size);
214 if (idx < 0)
215 return idx;
216
217 clk->id = idx;
218
219 return 0;
220}
221
222static ulong ti_clk_get_rate(struct clk *clk)
223{
224 struct ti_clk_data *data = dev_get_priv(clk->dev);
225 struct clk *clkp = data->map[clk->id].clk;
226
227 return clk_get_rate(clkp);
228}
229
230static ulong ti_clk_set_rate(struct clk *clk, ulong rate)
231{
232 struct ti_clk_data *data = dev_get_priv(clk->dev);
233 struct clk *clkp = data->map[clk->id].clk;
234 int div = 1;
235 ulong child_rate;
236 const struct clk_ops *ops;
237 ulong new_rate, rem;
238 ulong diff, new_diff;
239
240 /*
241 * We must propagate rate change to parent if current clock type
242 * does not allow setting it.
243 */
244 while (clkp) {
245 ops = clkp->dev->driver->ops;
246 if (ops->set_rate)
247 break;
248
249 /*
250 * Store child rate so we can calculate the clock rate
251 * that must be passed to parent
252 */
253 child_rate = clk_get_rate(clkp);
254 clkp = clk_get_parent(clkp);
255 if (clkp) {
256 debug("%s: propagating rate change to parent %s, rate=%u.\n",
257 __func__, clkp->dev->name, (u32)rate / div);
258 div *= clk_get_rate(clkp) / child_rate;
259 }
260 }
261
262 if (!clkp)
263 return -ENOSYS;
264
265 child_rate = clk_get_rate(clkp);
266
267 new_rate = clk_set_rate(clkp, rate / div);
268
269 diff = abs(new_rate - rate / div);
270
271 debug("%s: clk=%s, div=%d, rate=%u, new_rate=%u, diff=%u\n", __func__,
272 clkp->dev->name, div, (u32)rate, (u32)new_rate, (u32)diff);
273
274 /*
275 * If the new rate differs by 50% of the target,
276 * modify parent. This handles typical cases where we have a hsdiv
277 * following directly a PLL
278 */
279
280 if (diff > rate / div / 2) {
281 ulong pll_tgt;
282 int pll_div = 0;
283
284 clk = clkp;
285
286 debug("%s: propagating rate change to parent, rate=%u.\n",
287 __func__, (u32)rate / div);
288
289 clkp = clk_get_parent(clkp);
290
291 if (rate > osc_freq) {
292 if (rate > PLL_MAX_FREQ / 2 && rate < PLL_MAX_FREQ) {
293 pll_tgt = rate;
294 pll_div = 1;
295 } else {
296 for (pll_div = 2; pll_div < PLL_MAX_DIV; pll_div++) {
297 pll_tgt = rate / div * pll_div;
298 if (pll_tgt >= PLL_MIN_FREQ && pll_tgt <= PLL_MAX_FREQ)
299 break;
300 }
301 }
302 } else {
303 pll_tgt = osc_freq;
304 pll_div = rate / div / osc_freq;
305 }
306
307 debug("%s: pll_tgt=%u, rate=%u, div=%u\n", __func__,
308 (u32)pll_tgt, (u32)rate, pll_div);
309
310 clk_set_rate(clkp, pll_tgt);
311
312 return clk_set_rate(clk, rate / div) * div;
313 }
314
315 /*
316 * If the new rate differs by at least 5% of the target,
317 * we must check for rounding error in a divider, so try
318 * set rate with rate + (parent_freq % rate).
319 */
320
321 if (diff > rate / div / 20) {
322 u64 parent_freq = clk_get_parent_rate(clkp);
323
324 rem = parent_freq % rate;
325 new_rate = clk_set_rate(clkp, (rate / div) + rem);
326 new_diff = abs(new_rate - rate / div);
327
328 if (new_diff > diff) {
329 new_rate = clk_set_rate(clkp, rate / div);
330 } else {
331 debug("%s: Using better rate %lu that gives diff %lu\n",
332 __func__, new_rate, new_diff);
333 }
334 }
335
336 return new_rate;
337}
338
339static int ti_clk_set_parent(struct clk *clk, struct clk *parent)
340{
341 struct ti_clk_data *data = dev_get_priv(clk->dev);
342 struct clk *clkp = data->map[clk->id].clk;
343 struct clk *parentp = data->map[parent->id].clk;
344
345 return clk_set_parent(clkp, parentp);
346}
347
348static int ti_clk_enable(struct clk *clk)
349{
350 struct ti_clk_data *data = dev_get_priv(clk->dev);
351 struct clk *clkp = data->map[clk->id].clk;
352
353 return clk_enable(clkp);
354}
355
356static int ti_clk_disable(struct clk *clk)
357{
358 struct ti_clk_data *data = dev_get_priv(clk->dev);
359 struct clk *clkp = data->map[clk->id].clk;
360
361 return clk_disable(clkp);
362}
363
364static const struct udevice_id ti_clk_of_match[] = {
365 { .compatible = "ti,k2g-sci-clk" },
366 { /* sentinel */ },
367};
368
369static const struct clk_ops ti_clk_ops = {
370 .of_xlate = ti_clk_of_xlate,
371 .set_rate = ti_clk_set_rate,
372 .get_rate = ti_clk_get_rate,
373 .enable = ti_clk_enable,
374 .disable = ti_clk_disable,
375 .set_parent = ti_clk_set_parent,
376};
377
378U_BOOT_DRIVER(ti_clk) = {
379 .name = "ti-clk",
380 .id = UCLASS_CLK,
381 .of_match = ti_clk_of_match,
382 .probe = ti_clk_probe,
383 .priv_auto = sizeof(struct ti_clk_data),
384 .ops = &ti_clk_ops,
385};