blob: 8b202fa0ae2cfa92745fd841e487fb17e13cc7d7 [file] [log] [blame]
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
5 */
6
7#include <common.h>
Simon Glass90526e92020-05-10 11:39:56 -06008#include <asm/cache.h>
Simon Glass336d4612020-02-03 07:36:16 -07009#include <dm/device_compat.h>
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +053010#include <linux/bitops.h>
11#include <linux/bitfield.h>
12#include <malloc.h>
13#include <clk-uclass.h>
14#include <clk.h>
15#include <dm.h>
16#include <asm/arch/sys_proto.h>
Michal Simek0f3604a2019-10-04 15:25:18 +020017#include <zynqmp_firmware.h>
Simon Glass61b29b82020-02-03 07:36:15 -070018#include <linux/err.h>
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +053019
20#define MAX_PARENT 100
21#define MAX_NODES 6
22#define MAX_NAME_LEN 50
23
24#define CLK_TYPE_SHIFT 2
25
26#define PM_API_PAYLOAD_LEN 3
27
28#define NA_PARENT 0xFFFFFFFF
29#define DUMMY_PARENT 0xFFFFFFFE
30
31#define CLK_TYPE_FIELD_LEN 4
32#define CLK_TOPOLOGY_NODE_OFFSET 16
33#define NODES_PER_RESP 3
34
35#define CLK_TYPE_FIELD_MASK 0xF
36#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
37#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
38#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
39#define CLK_TYPE_FLAG_BITS 8
40
41#define CLK_PARENTS_ID_LEN 16
42#define CLK_PARENTS_ID_MASK 0xFFFF
43
44#define END_OF_TOPOLOGY_NODE 1
45#define END_OF_PARENTS 1
46
47#define CLK_VALID_MASK 0x1
48#define NODE_CLASS_SHIFT 26U
49#define NODE_SUBCLASS_SHIFT 20U
50#define NODE_TYPE_SHIFT 14U
51#define NODE_INDEX_SHIFT 0U
52
53#define CLK_GET_NAME_RESP_LEN 16
54#define CLK_GET_TOPOLOGY_RESP_WORDS 3
55#define CLK_GET_PARENTS_RESP_WORDS 3
56#define CLK_GET_ATTR_RESP_WORDS 1
57
58#define NODE_SUBCLASS_CLOCK_PLL 1
59#define NODE_SUBCLASS_CLOCK_OUT 2
60#define NODE_SUBCLASS_CLOCK_REF 3
61
62#define NODE_CLASS_CLOCK 2
63#define NODE_CLASS_MASK 0x3F
64
65#define CLOCK_NODE_TYPE_MUX 1
66#define CLOCK_NODE_TYPE_DIV 4
67#define CLOCK_NODE_TYPE_GATE 6
68
69enum pm_query_id {
70 PM_QID_INVALID,
71 PM_QID_CLOCK_GET_NAME,
72 PM_QID_CLOCK_GET_TOPOLOGY,
73 PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
74 PM_QID_CLOCK_GET_PARENTS,
75 PM_QID_CLOCK_GET_ATTRIBUTES,
76 PM_QID_PINCTRL_GET_NUM_PINS,
77 PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
78 PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
79 PM_QID_PINCTRL_GET_FUNCTION_NAME,
80 PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
81 PM_QID_PINCTRL_GET_PIN_GROUPS,
82 PM_QID_CLOCK_GET_NUM_CLOCKS,
83 PM_QID_CLOCK_GET_MAX_DIVISOR,
84};
85
86enum clk_type {
87 CLK_TYPE_OUTPUT,
88 CLK_TYPE_EXTERNAL,
89};
90
91struct clock_parent {
92 char name[MAX_NAME_LEN];
93 int id;
94 u32 flag;
95};
96
97struct clock_topology {
98 u32 type;
99 u32 flag;
100 u32 type_flag;
101};
102
103struct versal_clock {
104 char clk_name[MAX_NAME_LEN];
105 u32 valid;
106 enum clk_type type;
107 struct clock_topology node[MAX_NODES];
108 u32 num_nodes;
109 struct clock_parent parent[MAX_PARENT];
110 u32 num_parents;
111 u32 clk_id;
112};
113
114struct versal_clk_priv {
115 struct versal_clock *clk;
116};
117
118static ulong alt_ref_clk;
119static ulong pl_alt_ref_clk;
120static ulong ref_clk;
121
122struct versal_pm_query_data {
123 u32 qid;
124 u32 arg1;
125 u32 arg2;
126 u32 arg3;
127};
128
129static struct versal_clock *clock;
130static unsigned int clock_max_idx;
131
132#define PM_QUERY_DATA 35
133
134static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
135{
136 struct pt_regs regs;
137
138 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
139 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
140 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
141
142 smc_call(&regs);
143
144 if (ret_payload) {
145 ret_payload[0] = (u32)regs.regs[0];
146 ret_payload[1] = upper_32_bits(regs.regs[0]);
147 ret_payload[2] = (u32)regs.regs[1];
148 ret_payload[3] = upper_32_bits(regs.regs[1]);
149 ret_payload[4] = (u32)regs.regs[2];
150 }
151
152 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
153}
154
155static inline int versal_is_valid_clock(u32 clk_id)
156{
157 if (clk_id >= clock_max_idx)
158 return -ENODEV;
159
160 return clock[clk_id].valid;
161}
162
163static int versal_get_clock_name(u32 clk_id, char *clk_name)
164{
165 int ret;
166
167 ret = versal_is_valid_clock(clk_id);
168 if (ret == 1) {
169 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
170 return 0;
171 }
172
173 return ret == 0 ? -EINVAL : ret;
174}
175
176static int versal_get_clock_type(u32 clk_id, u32 *type)
177{
178 int ret;
179
180 ret = versal_is_valid_clock(clk_id);
181 if (ret == 1) {
182 *type = clock[clk_id].type;
183 return 0;
184 }
185
186 return ret == 0 ? -EINVAL : ret;
187}
188
189static int versal_pm_clock_get_num_clocks(u32 *nclocks)
190{
191 struct versal_pm_query_data qdata = {0};
192 u32 ret_payload[PAYLOAD_ARG_CNT];
193 int ret;
194
195 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
196
197 ret = versal_pm_query(qdata, ret_payload);
198 *nclocks = ret_payload[1];
199
200 return ret;
201}
202
203static int versal_pm_clock_get_name(u32 clock_id, char *name)
204{
205 struct versal_pm_query_data qdata = {0};
206 u32 ret_payload[PAYLOAD_ARG_CNT];
207 int ret;
208
209 qdata.qid = PM_QID_CLOCK_GET_NAME;
210 qdata.arg1 = clock_id;
211
212 ret = versal_pm_query(qdata, ret_payload);
213 if (ret)
214 return ret;
215 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
216
217 return 0;
218}
219
220static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
221{
222 struct versal_pm_query_data qdata = {0};
223 u32 ret_payload[PAYLOAD_ARG_CNT];
224 int ret;
225
226 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
227 qdata.arg1 = clock_id;
228 qdata.arg2 = index;
229
230 ret = versal_pm_query(qdata, ret_payload);
231 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
232
233 return ret;
234}
235
236static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
237{
238 struct versal_pm_query_data qdata = {0};
239 u32 ret_payload[PAYLOAD_ARG_CNT];
240 int ret;
241
242 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
243 qdata.arg1 = clock_id;
244 qdata.arg2 = index;
245
246 ret = versal_pm_query(qdata, ret_payload);
247 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
248
249 return ret;
250}
251
252static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
253{
254 struct versal_pm_query_data qdata = {0};
255 u32 ret_payload[PAYLOAD_ARG_CNT];
256 int ret;
257
258 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
259 qdata.arg1 = clock_id;
260
261 ret = versal_pm_query(qdata, ret_payload);
262 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
263
264 return ret;
265}
266
267static int __versal_clock_get_topology(struct clock_topology *topology,
268 u32 *data, u32 *nnodes)
269{
270 int i;
271
272 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
273 if (!(data[i] & CLK_TYPE_FIELD_MASK))
274 return END_OF_TOPOLOGY_NODE;
275 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
276 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
277 data[i]);
278 topology[*nnodes].type_flag =
279 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
280 topology[*nnodes].type_flag |=
281 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
282 CLK_TYPE_FLAG_BITS;
283 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
284 topology[*nnodes].type, topology[*nnodes].flag,
285 topology[*nnodes].type_flag);
286 (*nnodes)++;
287 }
288
289 return 0;
290}
291
292static int versal_clock_get_topology(u32 clk_id,
293 struct clock_topology *topology,
294 u32 *num_nodes)
295{
296 int j, ret;
297 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
298
299 *num_nodes = 0;
300 for (j = 0; j <= MAX_NODES; j += 3) {
301 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
302 pm_resp);
303 if (ret)
304 return ret;
305 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
306 if (ret == END_OF_TOPOLOGY_NODE)
307 return 0;
308 }
309
310 return 0;
311}
312
313static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
314 u32 *nparent)
315{
316 int i;
317 struct clock_parent *parent;
318
319 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
320 if (data[i] == NA_PARENT)
321 return END_OF_PARENTS;
322
323 parent = &parents[i];
324 parent->id = data[i] & CLK_PARENTS_ID_MASK;
325 if (data[i] == DUMMY_PARENT) {
326 strcpy(parent->name, "dummy_name");
327 parent->flag = 0;
328 } else {
329 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
330 if (versal_get_clock_name(parent->id, parent->name))
331 continue;
332 }
333 debug("parent name:%s\n", parent->name);
334 *nparent += 1;
335 }
336
337 return 0;
338}
339
340static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
341 u32 *num_parents)
342{
343 int j = 0, ret;
344 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
345
346 *num_parents = 0;
347 do {
348 /* Get parents from firmware */
349 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
350 pm_resp);
351 if (ret)
352 return ret;
353
354 ret = __versal_clock_get_parents(&parents[j], pm_resp,
355 num_parents);
356 if (ret == END_OF_PARENTS)
357 return 0;
358 j += PM_API_PAYLOAD_LEN;
359 } while (*num_parents <= MAX_PARENT);
360
361 return 0;
362}
363
364static u32 versal_clock_get_div(u32 clk_id)
365{
366 u32 ret_payload[PAYLOAD_ARG_CNT];
367 u32 div;
368
Michal Simek65962702019-10-04 15:52:43 +0200369 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530370 div = ret_payload[1];
371
372 return div;
373}
374
375static u32 versal_clock_set_div(u32 clk_id, u32 div)
376{
377 u32 ret_payload[PAYLOAD_ARG_CNT];
378
Michal Simek65962702019-10-04 15:52:43 +0200379 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530380
381 return div;
382}
383
384static u64 versal_clock_ref(u32 clk_id)
385{
386 u32 ret_payload[PAYLOAD_ARG_CNT];
387 int ref;
388
Michal Simek65962702019-10-04 15:52:43 +0200389 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530390 ref = ret_payload[0];
391 if (!(ref & 1))
392 return ref_clk;
393 if (ref & 2)
394 return pl_alt_ref_clk;
395 return 0;
396}
397
398static u64 versal_clock_get_pll_rate(u32 clk_id)
399{
400 u32 ret_payload[PAYLOAD_ARG_CNT];
401 u32 fbdiv;
402 u32 res;
403 u32 frac;
404 u64 freq;
405 u32 parent_rate, parent_id;
406 u32 id = clk_id & 0xFFF;
407
Michal Simek65962702019-10-04 15:52:43 +0200408 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530409 res = ret_payload[1];
410 if (!res) {
411 printf("0%x PLL not enabled\n", clk_id);
412 return 0;
413 }
414
415 parent_id = clock[clock[id].parent[0].id].clk_id;
416 parent_rate = versal_clock_ref(parent_id);
417
Michal Simek65962702019-10-04 15:52:43 +0200418 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530419 fbdiv = ret_payload[1];
Michal Simek65962702019-10-04 15:52:43 +0200420 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530421 frac = ret_payload[1];
422
423 freq = (fbdiv * parent_rate) >> (1 << frac);
424
425 return freq;
426}
427
428static u32 versal_clock_mux(u32 clk_id)
429{
430 int i;
431 u32 id = clk_id & 0xFFF;
432
433 for (i = 0; i < clock[id].num_nodes; i++)
434 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
435 return 1;
436
437 return 0;
438}
439
440static u32 versal_clock_get_parentid(u32 clk_id)
441{
442 u32 parent_id = 0;
443 u32 ret_payload[PAYLOAD_ARG_CNT];
444 u32 id = clk_id & 0xFFF;
445
446 if (versal_clock_mux(clk_id)) {
Michal Simek65962702019-10-04 15:52:43 +0200447 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530448 ret_payload);
449 parent_id = ret_payload[1];
450 }
451
452 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
453 return clock[clock[id].parent[parent_id].id].clk_id;
454}
455
456static u32 versal_clock_gate(u32 clk_id)
457{
458 u32 id = clk_id & 0xFFF;
459 int i;
460
461 for (i = 0; i < clock[id].num_nodes; i++)
462 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
463 return 1;
464
465 return 0;
466}
467
468static u32 versal_clock_div(u32 clk_id)
469{
470 int i;
471 u32 id = clk_id & 0xFFF;
472
473 for (i = 0; i < clock[id].num_nodes; i++)
474 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
475 return 1;
476
477 return 0;
478}
479
480static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
481{
482 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
483 NODE_SUBCLASS_CLOCK_PLL &&
484 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
485 NODE_CLASS_CLOCK) {
486 *clk_rate = versal_clock_get_pll_rate(clk_id);
487 return 1;
488 }
489
490 return 0;
491}
492
493static u64 versal_clock_calc(u32 clk_id)
494{
495 u32 parent_id;
496 u64 clk_rate;
497 u32 div;
498
499 if (versal_clock_pll(clk_id, &clk_rate))
500 return clk_rate;
501
502 parent_id = versal_clock_get_parentid(clk_id);
503 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
504 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
505 return versal_clock_ref(clk_id);
506
T Karthik Reddy7eab6242020-04-08 21:34:54 -0600507 if (!parent_id)
508 return 0;
509
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530510 clk_rate = versal_clock_calc(parent_id);
511
512 if (versal_clock_div(clk_id)) {
513 div = versal_clock_get_div(clk_id);
514 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
515 }
516
517 return clk_rate;
518}
519
520static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
521{
522 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
523 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
524 *clk_rate = versal_clock_ref(clk_id);
525
526 if (versal_clock_pll(clk_id, clk_rate))
527 return 0;
528
529 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
530 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
531 ((clk_id >> NODE_CLASS_SHIFT) &
532 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddy7eab6242020-04-08 21:34:54 -0600533 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530534 return -EINVAL;
535 *clk_rate = versal_clock_calc(clk_id);
536 return 0;
537 }
538
539 return 0;
540}
541
542int soc_clk_dump(void)
543{
544 u64 clk_rate = 0;
545 u32 type, ret, i = 0;
546
547 printf("\n ****** VERSAL CLOCKS *****\n");
548
549 printf("alt_ref_clk:%ld pl_alt_ref_clk:%ld ref_clk:%ld\n",
550 alt_ref_clk, pl_alt_ref_clk, ref_clk);
551 for (i = 0; i < clock_max_idx; i++) {
552 debug("%s\n", clock[i].clk_name);
553 ret = versal_get_clock_type(i, &type);
554 if (ret || type != CLK_TYPE_OUTPUT)
555 continue;
556
557 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
558
559 if (ret != -EINVAL)
560 printf("clk: %s freq:%lld\n",
561 clock[i].clk_name, clk_rate);
562 }
563
564 return 0;
565}
566
567static void versal_get_clock_info(void)
568{
569 int i, ret;
570 u32 attr, type = 0, nodetype, subclass, class;
571
572 for (i = 0; i < clock_max_idx; i++) {
573 ret = versal_pm_clock_get_attributes(i, &attr);
574 if (ret)
575 continue;
576
577 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja65bcca92020-01-16 03:55:05 -0800578
579 /* skip query for Invalid clock */
580 ret = versal_is_valid_clock(i);
581 if (ret != CLK_VALID_MASK)
582 continue;
583
Siva Durga Prasad Paladugu95105082019-06-23 12:24:57 +0530584 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
585 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
586 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
587 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
588 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
589
590 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
591 (subclass << NODE_SUBCLASS_SHIFT) |
592 (nodetype << NODE_TYPE_SHIFT) |
593 (i << NODE_INDEX_SHIFT);
594
595 ret = versal_pm_clock_get_name(clock[i].clk_id,
596 clock[i].clk_name);
597 if (ret)
598 continue;
599 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
600 clock[i].clk_name, clock[i].valid,
601 clock[i].type, clock[i].clk_id);
602 }
603
604 /* Get topology of all clock */
605 for (i = 0; i < clock_max_idx; i++) {
606 ret = versal_get_clock_type(i, &type);
607 if (ret || type != CLK_TYPE_OUTPUT)
608 continue;
609 debug("clk name:%s\n", clock[i].clk_name);
610 ret = versal_clock_get_topology(i, clock[i].node,
611 &clock[i].num_nodes);
612 if (ret)
613 continue;
614
615 ret = versal_clock_get_parents(i, clock[i].parent,
616 &clock[i].num_parents);
617 if (ret)
618 continue;
619 }
620}
621
622int versal_clock_setup(void)
623{
624 int ret;
625
626 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
627 if (ret)
628 return ret;
629
630 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
631 clock = calloc(clock_max_idx, sizeof(*clock));
632 if (!clock)
633 return -ENOMEM;
634
635 versal_get_clock_info();
636
637 return 0;
638}
639
640static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
641 ulong *freq)
642{
643 struct clk clk;
644 int ret;
645
646 ret = clk_get_by_name(dev, name, &clk);
647 if (ret < 0) {
648 dev_err(dev, "failed to get %s\n", name);
649 return ret;
650 }
651
652 *freq = clk_get_rate(&clk);
653 if (IS_ERR_VALUE(*freq)) {
654 dev_err(dev, "failed to get rate %s\n", name);
655 return -EINVAL;
656 }
657
658 return 0;
659}
660
661static int versal_clk_probe(struct udevice *dev)
662{
663 int ret;
664 struct versal_clk_priv *priv = dev_get_priv(dev);
665
666 debug("%s\n", __func__);
667
668 ret = versal_clock_get_freq_by_name("alt_ref_clk", dev, &alt_ref_clk);
669 if (ret < 0)
670 return -EINVAL;
671
672 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
673 dev, &pl_alt_ref_clk);
674 if (ret < 0)
675 return -EINVAL;
676
677 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
678 if (ret < 0)
679 return -EINVAL;
680
681 versal_clock_setup();
682
683 priv->clk = clock;
684
685 return ret;
686}
687
688static ulong versal_clk_get_rate(struct clk *clk)
689{
690 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
691 u32 id = clk->id;
692 u32 clk_id;
693 u64 clk_rate = 0;
694
695 debug("%s\n", __func__);
696
697 clk_id = priv->clk[id].clk_id;
698
699 versal_clock_get_rate(clk_id, &clk_rate);
700
701 return clk_rate;
702}
703
704static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
705{
706 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
707 u32 id = clk->id;
708 u32 clk_id;
709 u64 clk_rate = 0;
710 u32 div;
711 int ret;
712
713 debug("%s\n", __func__);
714
715 clk_id = priv->clk[id].clk_id;
716
717 ret = versal_clock_get_rate(clk_id, &clk_rate);
718 if (ret) {
719 printf("Clock is not a Gate:0x%x\n", clk_id);
720 return 0;
721 }
722
723 do {
724 if (versal_clock_div(clk_id)) {
725 div = versal_clock_get_div(clk_id);
726 clk_rate *= div;
727 div = DIV_ROUND_CLOSEST(clk_rate, rate);
728 versal_clock_set_div(clk_id, div);
729 debug("%s, div:%d, newrate:%lld\n", __func__,
730 div, DIV_ROUND_CLOSEST(clk_rate, div));
731 return DIV_ROUND_CLOSEST(clk_rate, div);
732 }
733 clk_id = versal_clock_get_parentid(clk_id);
734 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
735 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
736
737 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
738
739 return clk_rate;
740}
741
742static struct clk_ops versal_clk_ops = {
743 .set_rate = versal_clk_set_rate,
744 .get_rate = versal_clk_get_rate,
745};
746
747static const struct udevice_id versal_clk_ids[] = {
748 { .compatible = "xlnx,versal-clk" },
749 { }
750};
751
752U_BOOT_DRIVER(versal_clk) = {
753 .name = "versal-clk",
754 .id = UCLASS_CLK,
755 .of_match = versal_clk_ids,
756 .probe = versal_clk_probe,
757 .ops = &versal_clk_ops,
758 .priv_auto_alloc_size = sizeof(struct versal_clk_priv),
759};