blob: c8fb60029070576588d60a58f621ce1cb59a893f [file] [log] [blame]
Green Wand56d79e2021-05-27 06:52:08 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2021 SiFive, Inc.
4 * Wesley Terpstra
5 * Paul Walmsley
6 * Zong Li
7 * Pragnesh Patel
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * The PRCI implements clock and reset control for the SiFive chip.
19 * This driver assumes that it has sole control over all PRCI resources.
20 *
21 * This driver is based on the PRCI driver written by Wesley Terpstra:
22 * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
23 */
24
25#include <common.h>
26#include <clk-uclass.h>
27#include <clk.h>
28#include <dm.h>
29#include <dm/device_compat.h>
30#include <reset.h>
31#include <asm/io.h>
32#include <asm/arch/reset.h>
33#include <linux/delay.h>
34#include <linux/math64.h>
35#include <dt-bindings/clock/sifive-fu740-prci.h>
36
37#include "fu540-prci.h"
38#include "fu740-prci.h"
39
40/*
41 * Private functions
42 */
43
44/**
45 * __prci_readl() - read from a PRCI register
46 * @pd: PRCI context
47 * @offs: register offset to read from (in bytes, from PRCI base address)
48 *
49 * Read the register located at offset @offs from the base virtual
50 * address of the PRCI register target described by @pd, and return
51 * the value to the caller.
52 *
53 * Context: Any context.
54 *
55 * Return: the contents of the register described by @pd and @offs.
56 */
57static u32 __prci_readl(struct __prci_data *pd, u32 offs)
58{
59 return readl(pd->va + offs);
60}
61
62static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
63{
64 writel(v, pd->va + offs);
65}
66
67/* WRPLL-related private functions */
68
69/**
70 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
71 * @c: ptr to a struct wrpll_cfg record to write config into
72 * @r: value read from the PRCI PLL configuration register
73 *
74 * Given a value @r read from an FU540 PRCI PLL configuration register,
75 * split it into fields and populate it into the WRPLL configuration record
76 * pointed to by @c.
77 *
78 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
79 * have the same register layout.
80 *
81 * Context: Any context.
82 */
83static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
84{
85 u32 v;
86
87 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
88 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
89 c->divr = v;
90
91 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
92 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
93 c->divf = v;
94
95 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
96 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
97 c->divq = v;
98
99 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
100 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
101 c->range = v;
102
103 c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
104 WRPLL_FLAGS_EXT_FEEDBACK_MASK);
105
106 /* external feedback mode not supported */
107 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
108}
109
110/**
111 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
112 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
113 *
114 * Using a set of WRPLL configuration values pointed to by @c,
115 * assemble a PRCI PLL configuration register value, and return it to
116 * the caller.
117 *
118 * Context: Any context. Caller must ensure that the contents of the
119 * record pointed to by @c do not change during the execution
120 * of this function.
121 *
122 * Returns: a value suitable for writing into a PRCI PLL configuration
123 * register
124 */
125static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
126{
127 u32 r = 0;
128
129 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
130 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
131 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
132 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
133
134 /* external feedback mode not supported */
135 r |= PRCI_COREPLLCFG0_FSE_MASK;
136
137 return r;
138}
139
140/**
141 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
142 * @pd: PRCI context
143 * @pwd: PRCI WRPLL metadata
144 *
145 * Read the current configuration of the PLL identified by @pwd from
146 * the PRCI identified by @pd, and store it into the local configuration
147 * cache in @pwd.
148 *
149 * Context: Any context. Caller must prevent the records pointed to by
150 * @pd and @pwd from changing during execution.
151 */
152static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
153 struct __prci_wrpll_data *pwd)
154{
155 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
156}
157
158/**
159 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
160 * @pd: PRCI context
161 * @pwd: PRCI WRPLL metadata
162 * @c: WRPLL configuration record to write
163 *
164 * Write the WRPLL configuration described by @c into the WRPLL
165 * configuration register identified by @pwd in the PRCI instance
166 * described by @c. Make a cached copy of the WRPLL's current
167 * configuration so it can be used by other code.
168 *
169 * Context: Any context. Caller must prevent the records pointed to by
170 * @pd and @pwd from changing during execution.
171 */
172static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
173 struct __prci_wrpll_data *pwd,
174 struct wrpll_cfg *c)
175{
176 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
177
178 memcpy(&pwd->c, c, sizeof(*c));
179}
180
181/**
182 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
183 * into the PRCI
184 * @pd: PRCI context
185 * @pwd: PRCI WRPLL metadata
186 * @enable: Clock enable or disable value
187 */
188static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
189 struct __prci_wrpll_data *pwd,
190 u32 enable)
191{
192 __prci_writel(enable, pwd->cfg1_offs, pd);
193}
194
195unsigned long sifive_prci_wrpll_recalc_rate(struct __prci_clock *pc,
196 unsigned long parent_rate)
197{
198 struct __prci_wrpll_data *pwd = pc->pwd;
199
200 return wrpll_calc_output_rate(&pwd->c, parent_rate);
201}
202
203unsigned long sifive_prci_wrpll_round_rate(struct __prci_clock *pc,
204 unsigned long rate,
205 unsigned long *parent_rate)
206{
207 struct __prci_wrpll_data *pwd = pc->pwd;
208 struct wrpll_cfg c;
209
210 memcpy(&c, &pwd->c, sizeof(c));
211
212 wrpll_configure_for_rate(&c, rate, *parent_rate);
213
214 return wrpll_calc_output_rate(&c, *parent_rate);
215}
216
217int sifive_prci_wrpll_set_rate(struct __prci_clock *pc,
218 unsigned long rate,
219 unsigned long parent_rate)
220{
221 struct __prci_wrpll_data *pwd = pc->pwd;
222 struct __prci_data *pd = pc->pd;
223 int r;
224
225 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
226 if (r)
227 return r;
228
229 if (pwd->enable_bypass)
230 pwd->enable_bypass(pd);
231
232 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
233
234 udelay(wrpll_calc_max_lock_us(&pwd->c));
235
236 return 0;
237}
238
239int sifive_prci_clock_enable(struct __prci_clock *pc, bool enable)
240{
241 struct __prci_wrpll_data *pwd = pc->pwd;
242 struct __prci_data *pd = pc->pd;
243
244 if (enable) {
245 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
246
247 if (pwd->disable_bypass)
248 pwd->disable_bypass(pd);
249
250 if (pwd->release_reset)
251 pwd->release_reset(pd);
252 } else {
253 u32 r;
254
255 if (pwd->enable_bypass)
256 pwd->enable_bypass(pd);
257
258 r = __prci_readl(pd, pwd->cfg1_offs);
259 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
260
261 __prci_wrpll_write_cfg1(pd, pwd, r);
262 }
263
264 return 0;
265}
266
267/* TLCLKSEL clock integration */
268
269unsigned long sifive_prci_tlclksel_recalc_rate(struct __prci_clock *pc,
270 unsigned long parent_rate)
271{
272 struct __prci_data *pd = pc->pd;
273 u32 v;
274 u8 div;
275
276 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
277 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
278 div = v ? 1 : 2;
279
280 return div_u64(parent_rate, div);
281}
282
283/* HFPCLK clock integration */
284
285unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct __prci_clock *pc,
286 unsigned long parent_rate)
287{
288 struct __prci_data *pd = pc->pd;
289 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
290
291 return div_u64(parent_rate, div + 2);
292}
293
294/**
295 * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
296 * FINAL_COREPLL
297 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
298 *
299 * Switch the CORECLK mux to the final COREPLL output clock; return once
300 * complete.
301 *
302 * Context: Any context. Caller must prevent concurrent changes to the
303 * PRCI_CORECLKSEL_OFFSET register.
304 */
305void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
306{
307 u32 r;
308
309 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
310 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
311 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
312
313 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
314}
315
316/**
317 * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
318 * output DVFS_COREPLL
319 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
320 *
321 * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
322 *
323 * Context: Any context. Caller must prevent concurrent changes to the
324 * PRCI_COREPLLSEL_OFFSET register.
325 */
326void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
327{
328 u32 r;
329
330 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
331 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
332 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
333
334 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
335}
336
337/**
338 * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
339 * output COREPLL
340 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
341 *
342 * Switch the COREPLL mux to the COREPLL output clock; return once complete.
343 *
344 * Context: Any context. Caller must prevent concurrent changes to the
345 * PRCI_COREPLLSEL_OFFSET register.
346 */
347void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
348{
349 u32 r;
350
351 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
352 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
353 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
354
355 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
356}
357
358/**
359 * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
360 * output HFCLK
361 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
362 *
363 * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
364 *
365 * Context: Any context. Caller must prevent concurrent changes to the
366 * PRCI_HFPCLKPLLSEL_OFFSET register.
367 */
368void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
369{
370 u32 r;
371
372 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
373 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
374 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
375
376 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
377}
378
379/**
380 * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
381 * output HFPCLKPLL
382 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
383 *
384 * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
385 *
386 * Context: Any context. Caller must prevent concurrent changes to the
387 * PRCI_HFPCLKPLLSEL_OFFSET register.
388 */
389void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
390{
391 u32 r;
392
393 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
394 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
395 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
396
397 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
398}
399
400static int __prci_consumer_reset(const char *rst_name, bool trigger)
401{
402 struct udevice *dev;
403 struct reset_ctl rst_sig;
404 int ret;
405
406 ret = uclass_get_device_by_driver(UCLASS_RESET,
407 DM_DRIVER_GET(sifive_reset),
408 &dev);
409 if (ret) {
410 dev_err(dev, "Reset driver not found: %d\n", ret);
411 return ret;
412 }
413
414 ret = reset_get_by_name(dev, rst_name, &rst_sig);
415 if (ret) {
416 dev_err(dev, "failed to get %s reset\n", rst_name);
417 return ret;
418 }
419
420 if (reset_valid(&rst_sig)) {
421 if (trigger)
422 ret = reset_deassert(&rst_sig);
423 else
424 ret = reset_assert(&rst_sig);
425 if (ret) {
426 dev_err(dev, "failed to trigger reset id = %ld\n",
427 rst_sig.id);
428 return ret;
429 }
430 }
431
432 return ret;
433}
434
435/**
436 * sifive_prci_ddr_release_reset() - Release DDR reset
437 * @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
438 *
439 */
440void sifive_prci_ddr_release_reset(struct __prci_data *pd)
441{
442 /* Release DDR ctrl reset */
443 __prci_consumer_reset("ddr_ctrl", true);
444
445 /* HACK to get the '1 full controller clock cycle'. */
446 asm volatile ("fence");
447
448 /* Release DDR AXI reset */
449 __prci_consumer_reset("ddr_axi", true);
450
451 /* Release DDR AHB reset */
452 __prci_consumer_reset("ddr_ahb", true);
453
454 /* Release DDR PHY reset */
455 __prci_consumer_reset("ddr_phy", true);
456
457 /* HACK to get the '1 full controller clock cycle'. */
458 asm volatile ("fence");
459
460 /*
461 * These take like 16 cycles to actually propagate. We can't go sending
462 * stuff before they come out of reset. So wait.
463 */
464 for (int i = 0; i < 256; i++)
465 asm volatile ("nop");
466}
467
468/**
469 * sifive_prci_ethernet_release_reset() - Release ethernet reset
470 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
471 *
472 */
473void sifive_prci_ethernet_release_reset(struct __prci_data *pd)
474{
475 /* Release GEMGXL reset */
476 __prci_consumer_reset("gemgxl_reset", true);
477
478 /* Procmon => core clock */
479 __prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
480 pd);
481
482 /* Release Chiplink reset */
483 __prci_consumer_reset("cltx_reset", true);
484}
485
486/**
487 * sifive_prci_cltx_release_reset() - Release cltx reset
488 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
489 *
490 */
491void sifive_prci_cltx_release_reset(struct __prci_data *pd)
492{
493 /* Release CLTX reset */
494 __prci_consumer_reset("cltx_reset", true);
495}
496
497/* Core clock mux control */
498
499/**
500 * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
501 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
502 *
503 * Switch the CORECLK mux to the HFCLK input source; return once complete.
504 *
505 * Context: Any context. Caller must prevent concurrent changes to the
506 * PRCI_CORECLKSEL_OFFSET register.
507 */
508void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
509{
510 u32 r;
511
512 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
513 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
514 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
515
516 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
517}
518
519/**
520 * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
521 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
522 *
523 * Switch the CORECLK mux to the PLL output clock; return once complete.
524 *
525 * Context: Any context. Caller must prevent concurrent changes to the
526 * PRCI_CORECLKSEL_OFFSET register.
527 */
528void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
529{
530 u32 r;
531
532 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
533 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
534 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
535
536 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
537}
538
539static ulong sifive_prci_parent_rate(struct __prci_clock *pc, struct prci_clk_desc *data)
540{
541 ulong parent_rate;
542 ulong i;
543 struct __prci_clock *p;
544
545 if (strcmp(pc->parent_name, "corepll") == 0 ||
546 strcmp(pc->parent_name, "hfpclkpll") == 0) {
547 for (i = 0; i < data->num_clks; i++) {
548 if (strcmp(pc->parent_name, data->clks[i].name) == 0)
549 break;
550 }
551
552 if (i >= data->num_clks)
553 return -ENXIO;
554
555 p = &data->clks[i];
556 if (!p->pd || !p->ops->recalc_rate)
557 return -ENXIO;
558
559 return p->ops->recalc_rate(p, sifive_prci_parent_rate(p, data));
560 }
561
562 if (strcmp(pc->parent_name, "rtcclk") == 0)
563 parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
564 else
565 parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
566
567 return parent_rate;
568}
569
570static ulong sifive_prci_get_rate(struct clk *clk)
571{
572 struct __prci_clock *pc;
573 struct prci_clk_desc *data =
574 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
575
576 if (data->num_clks <= clk->id)
577 return -ENXIO;
578
579 pc = &data->clks[clk->id];
580 if (!pc->pd || !pc->ops->recalc_rate)
581 return -ENXIO;
582
583 return pc->ops->recalc_rate(pc, sifive_prci_parent_rate(pc, data));
584}
585
586static ulong sifive_prci_set_rate(struct clk *clk, ulong rate)
587{
588 int err;
589 struct __prci_clock *pc;
590 struct prci_clk_desc *data =
591 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
592
593 if (data->num_clks <= clk->id)
594 return -ENXIO;
595
596 pc = &data->clks[clk->id];
597 if (!pc->pd || !pc->ops->set_rate)
598 return -ENXIO;
599
600 err = pc->ops->set_rate(pc, rate, sifive_prci_parent_rate(pc, data));
601 if (err)
602 return err;
603
604 return rate;
605}
606
607static int sifive_prci_enable(struct clk *clk)
608{
609 struct __prci_clock *pc;
610 int ret = 0;
611 struct prci_clk_desc *data =
612 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
613
614 if (data->num_clks <= clk->id)
615 return -ENXIO;
616
617 pc = &data->clks[clk->id];
618 if (!pc->pd)
619 return -ENXIO;
620
621 if (pc->ops->enable_clk)
622 ret = pc->ops->enable_clk(pc, 1);
623
624 return ret;
625}
626
627static int sifive_prci_disable(struct clk *clk)
628{
629 struct __prci_clock *pc;
630 int ret = 0;
631 struct prci_clk_desc *data =
632 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
633
634 if (data->num_clks <= clk->id)
635 return -ENXIO;
636
637 pc = &data->clks[clk->id];
638 if (!pc->pd)
639 return -ENXIO;
640
641 if (pc->ops->enable_clk)
642 ret = pc->ops->enable_clk(pc, 0);
643
644 return ret;
645}
646
647static int sifive_prci_probe(struct udevice *dev)
648{
649 int i, err;
650 struct __prci_clock *pc;
651 struct __prci_data *pd = dev_get_priv(dev);
652
653 struct prci_clk_desc *data =
654 (struct prci_clk_desc *)dev_get_driver_data(dev);
655
Bin Meng2edb02e2021-09-12 11:15:09 +0800656 pd->va = dev_read_addr_ptr(dev);
657 if (!pd->va)
658 return -EINVAL;
Green Wand56d79e2021-05-27 06:52:08 -0700659
660 err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
661 if (err)
662 return err;
663
664 err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
665 if (err)
666 return err;
667
668 for (i = 0; i < data->num_clks; ++i) {
669 pc = &data->clks[i];
670 pc->pd = pd;
671 if (pc->pwd)
672 __prci_wrpll_read_cfg0(pd, pc->pwd);
673 }
674
675 if (IS_ENABLED(CONFIG_SPL_BUILD)) {
676 if (device_is_compatible(dev, "sifive,fu740-c000-prci")) {
677 u32 prci_pll_reg;
678 unsigned long parent_rate;
679
680 prci_pll_reg = readl(pd->va + PRCI_PRCIPLL_OFFSET);
681
682 if (prci_pll_reg & PRCI_PRCIPLL_HFPCLKPLL) {
683 /*
684 * Only initialize the HFPCLK PLL. In this
685 * case the design uses hfpclk to drive
686 * Chiplink
687 */
Icenowy Zhengd13cd772022-08-25 16:11:18 +0800688 pc = &data->clks[FU740_PRCI_CLK_HFPCLKPLL];
Green Wand56d79e2021-05-27 06:52:08 -0700689 parent_rate = sifive_prci_parent_rate(pc, data);
690 sifive_prci_wrpll_set_rate(pc, 260000000,
691 parent_rate);
692 pc->ops->enable_clk(pc, 1);
693 } else if (prci_pll_reg & PRCI_PRCIPLL_CLTXPLL) {
694 /* CLTX pll init */
Icenowy Zhengd13cd772022-08-25 16:11:18 +0800695 pc = &data->clks[FU740_PRCI_CLK_CLTXPLL];
Green Wand56d79e2021-05-27 06:52:08 -0700696 parent_rate = sifive_prci_parent_rate(pc, data);
697 sifive_prci_wrpll_set_rate(pc, 260000000,
698 parent_rate);
699 pc->ops->enable_clk(pc, 1);
700 }
701 }
702 }
703
704 return 0;
705}
706
707static struct clk_ops sifive_prci_ops = {
708 .set_rate = sifive_prci_set_rate,
709 .get_rate = sifive_prci_get_rate,
710 .enable = sifive_prci_enable,
711 .disable = sifive_prci_disable,
712};
713
714static int sifive_clk_bind(struct udevice *dev)
715{
716 return sifive_reset_bind(dev, PRCI_DEVICERESETCNT);
717}
718
719static const struct udevice_id sifive_prci_ids[] = {
720 { .compatible = "sifive,fu540-c000-prci", .data = (ulong)&prci_clk_fu540 },
721 { .compatible = "sifive,fu740-c000-prci", .data = (ulong)&prci_clk_fu740 },
722 { }
723};
724
725U_BOOT_DRIVER(sifive_prci) = {
726 .name = "sifive-prci",
727 .id = UCLASS_CLK,
728 .of_match = sifive_prci_ids,
729 .probe = sifive_prci_probe,
730 .ops = &sifive_prci_ops,
731 .priv_auto = sizeof(struct __prci_data),
732 .bind = sifive_clk_bind,
733};