blob: 11e3bd33cbeadadeb4de22079b191814a0c74df0 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Heiko Stübnerdcdd3272017-02-18 19:46:34 +01002/*
3 * (C) Copyright 2015 Google, Inc
4 * (C) Copyright 2016 Heiko Stuebner <heiko@sntech.de>
Heiko Stübnerdcdd3272017-02-18 19:46:34 +01005 */
6
7#include <common.h>
8#include <clk-uclass.h>
9#include <dm.h>
10#include <dt-structs.h>
11#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070013#include <malloc.h>
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010014#include <mapmem.h>
15#include <syscon.h>
16#include <asm/io.h>
Kever Yang15f09a12019-03-28 11:01:23 +080017#include <asm/arch-rockchip/clock.h>
18#include <asm/arch-rockchip/cru_rk3188.h>
19#include <asm/arch-rockchip/grf_rk3188.h>
20#include <asm/arch-rockchip/hardware.h>
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010021#include <dt-bindings/clock/rk3188-cru.h>
22#include <dm/device-internal.h>
23#include <dm/lists.h>
24#include <dm/uclass-internal.h>
Simon Glassc05ed002020-05-10 11:40:11 -060025#include <linux/delay.h>
Simon Glass61b29b82020-02-03 07:36:15 -070026#include <linux/err.h>
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010027#include <linux/log2.h>
Simon Glass1af3c7f2020-05-10 11:40:09 -060028#include <linux/stringify.h>
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010029
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010030enum rk3188_clk_type {
31 RK3188_CRU,
32 RK3188A_CRU,
33};
34
35struct rk3188_clk_plat {
36#if CONFIG_IS_ENABLED(OF_PLATDATA)
37 struct dtd_rockchip_rk3188_cru dtd;
38#endif
39};
40
41struct pll_div {
42 u32 nr;
43 u32 nf;
44 u32 no;
45};
46
47enum {
48 VCO_MAX_HZ = 2200U * 1000000,
49 VCO_MIN_HZ = 440 * 1000000,
50 OUTPUT_MAX_HZ = 2200U * 1000000,
51 OUTPUT_MIN_HZ = 30 * 1000000,
52 FREF_MAX_HZ = 2200U * 1000000,
53 FREF_MIN_HZ = 30 * 1000,
54};
55
56enum {
57 /* PLL CON0 */
58 PLL_OD_MASK = 0x0f,
59
60 /* PLL CON1 */
61 PLL_NF_MASK = 0x1fff,
62
63 /* PLL CON2 */
64 PLL_BWADJ_MASK = 0x0fff,
65
66 /* PLL CON3 */
67 PLL_RESET_SHIFT = 5,
68
69 /* GRF_SOC_STATUS0 */
70 SOCSTS_DPLL_LOCK = 1 << 5,
71 SOCSTS_APLL_LOCK = 1 << 6,
72 SOCSTS_CPLL_LOCK = 1 << 7,
73 SOCSTS_GPLL_LOCK = 1 << 8,
74};
75
Heiko Stübnerdcdd3272017-02-18 19:46:34 +010076#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
77
78#define PLL_DIVISORS(hz, _nr, _no) {\
79 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no};\
80 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
81 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL "\
82 "divisors on line " __stringify(__LINE__));
83
84/* Keep divisors as low as possible to reduce jitter and power usage */
85#ifdef CONFIG_SPL_BUILD
86static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 2, 2);
87static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 2);
88#endif
89
90static int rkclk_set_pll(struct rk3188_cru *cru, enum rk_clk_id clk_id,
91 const struct pll_div *div, bool has_bwadj)
92{
93 int pll_id = rk_pll_id(clk_id);
94 struct rk3188_pll *pll = &cru->pll[pll_id];
95 /* All PLLs have same VCO and output frequency range restrictions. */
96 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
97 uint output_hz = vco_hz / div->no;
98
99 debug("PLL at %x: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
100 (uint)pll, div->nf, div->nr, div->no, vco_hz, output_hz);
101 assert(vco_hz >= VCO_MIN_HZ && vco_hz <= VCO_MAX_HZ &&
102 output_hz >= OUTPUT_MIN_HZ && output_hz <= OUTPUT_MAX_HZ &&
103 (div->no == 1 || !(div->no % 2)));
104
105 /* enter reset */
106 rk_setreg(&pll->con3, 1 << PLL_RESET_SHIFT);
107
108 rk_clrsetreg(&pll->con0,
109 CLKR_MASK << CLKR_SHIFT | PLL_OD_MASK,
110 ((div->nr - 1) << CLKR_SHIFT) | (div->no - 1));
111 rk_clrsetreg(&pll->con1, CLKF_MASK, div->nf - 1);
112
113 if (has_bwadj)
114 rk_clrsetreg(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
115
116 udelay(10);
117
118 /* return from reset */
119 rk_clrreg(&pll->con3, 1 << PLL_RESET_SHIFT);
120
121 return 0;
122}
123
124static int rkclk_configure_ddr(struct rk3188_cru *cru, struct rk3188_grf *grf,
125 unsigned int hz, bool has_bwadj)
126{
127 static const struct pll_div dpll_cfg[] = {
Alexander Kochetkov6b0c26f2018-02-26 14:27:38 +0300128 {.nf = 75, .nr = 1, .no = 6},
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100129 {.nf = 400, .nr = 9, .no = 2},
130 {.nf = 500, .nr = 9, .no = 2},
131 {.nf = 100, .nr = 3, .no = 1},
132 };
133 int cfg;
134
135 switch (hz) {
136 case 300000000:
137 cfg = 0;
138 break;
139 case 533000000: /* actually 533.3P MHz */
140 cfg = 1;
141 break;
142 case 666000000: /* actually 666.6P MHz */
143 cfg = 2;
144 break;
145 case 800000000:
146 cfg = 3;
147 break;
148 default:
149 debug("Unsupported SDRAM frequency");
150 return -EINVAL;
151 }
152
153 /* pll enter slow-mode */
154 rk_clrsetreg(&cru->cru_mode_con, DPLL_MODE_MASK << DPLL_MODE_SHIFT,
155 DPLL_MODE_SLOW << DPLL_MODE_SHIFT);
156
157 rkclk_set_pll(cru, CLK_DDR, &dpll_cfg[cfg], has_bwadj);
158
159 /* wait for pll lock */
160 while (!(readl(&grf->soc_status0) & SOCSTS_DPLL_LOCK))
161 udelay(1);
162
163 /* PLL enter normal-mode */
164 rk_clrsetreg(&cru->cru_mode_con, DPLL_MODE_MASK << DPLL_MODE_SHIFT,
165 DPLL_MODE_NORMAL << DPLL_MODE_SHIFT);
166
167 return 0;
168}
169
Heiko Stübnerf7853572017-03-20 12:40:32 +0100170static int rkclk_configure_cpu(struct rk3188_cru *cru, struct rk3188_grf *grf,
171 unsigned int hz, bool has_bwadj)
172{
173 static const struct pll_div apll_cfg[] = {
174 {.nf = 50, .nr = 1, .no = 2},
175 {.nf = 67, .nr = 1, .no = 1},
176 };
177 int div_core_peri, div_aclk_core, cfg;
178
179 /*
180 * We support two possible frequencies, the safe 600MHz
181 * which will work with default pmic settings and will
182 * be set in SPL to get away from the 24MHz default and
183 * the maximum of 1.6Ghz, which boards can set if they
184 * were able to get pmic support for it.
185 */
186 switch (hz) {
187 case APLL_SAFE_HZ:
188 cfg = 0;
189 div_core_peri = 1;
190 div_aclk_core = 3;
191 break;
192 case APLL_HZ:
193 cfg = 1;
194 div_core_peri = 2;
195 div_aclk_core = 3;
196 break;
197 default:
198 debug("Unsupported ARMCLK frequency");
199 return -EINVAL;
200 }
201
202 /* pll enter slow-mode */
203 rk_clrsetreg(&cru->cru_mode_con, APLL_MODE_MASK << APLL_MODE_SHIFT,
204 APLL_MODE_SLOW << APLL_MODE_SHIFT);
205
206 rkclk_set_pll(cru, CLK_ARM, &apll_cfg[cfg], has_bwadj);
207
208 /* waiting for pll lock */
209 while (!(readl(&grf->soc_status0) & SOCSTS_APLL_LOCK))
210 udelay(1);
211
212 /* Set divider for peripherals attached to the cpu core. */
213 rk_clrsetreg(&cru->cru_clksel_con[0],
214 CORE_PERI_DIV_MASK << CORE_PERI_DIV_SHIFT,
215 div_core_peri << CORE_PERI_DIV_SHIFT);
216
217 /* set up dependent divisor for aclk_core */
218 rk_clrsetreg(&cru->cru_clksel_con[1],
219 CORE_ACLK_DIV_MASK << CORE_ACLK_DIV_SHIFT,
220 div_aclk_core << CORE_ACLK_DIV_SHIFT);
221
222 /* PLL enter normal-mode */
223 rk_clrsetreg(&cru->cru_mode_con, APLL_MODE_MASK << APLL_MODE_SHIFT,
224 APLL_MODE_NORMAL << APLL_MODE_SHIFT);
225
226 return hz;
227}
228
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100229/* Get pll rate by id */
230static uint32_t rkclk_pll_get_rate(struct rk3188_cru *cru,
231 enum rk_clk_id clk_id)
232{
233 uint32_t nr, no, nf;
234 uint32_t con;
235 int pll_id = rk_pll_id(clk_id);
236 struct rk3188_pll *pll = &cru->pll[pll_id];
237 static u8 clk_shift[CLK_COUNT] = {
238 0xff, APLL_MODE_SHIFT, DPLL_MODE_SHIFT, CPLL_MODE_SHIFT,
239 GPLL_MODE_SHIFT
240 };
241 uint shift;
242
243 con = readl(&cru->cru_mode_con);
244 shift = clk_shift[clk_id];
245 switch ((con >> shift) & APLL_MODE_MASK) {
246 case APLL_MODE_SLOW:
247 return OSC_HZ;
248 case APLL_MODE_NORMAL:
249 /* normal mode */
250 con = readl(&pll->con0);
251 no = ((con >> CLKOD_SHIFT) & CLKOD_MASK) + 1;
252 nr = ((con >> CLKR_SHIFT) & CLKR_MASK) + 1;
253 con = readl(&pll->con1);
254 nf = ((con >> CLKF_SHIFT) & CLKF_MASK) + 1;
255
256 return (24 * nf / (nr * no)) * 1000000;
257 case APLL_MODE_DEEP:
258 default:
259 return 32768;
260 }
261}
262
263static ulong rockchip_mmc_get_clk(struct rk3188_cru *cru, uint gclk_rate,
264 int periph)
265{
266 uint div;
267 u32 con;
268
269 switch (periph) {
270 case HCLK_EMMC:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800271 case SCLK_EMMC:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100272 con = readl(&cru->cru_clksel_con[12]);
273 div = (con >> EMMC_DIV_SHIFT) & EMMC_DIV_MASK;
274 break;
275 case HCLK_SDMMC:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800276 case SCLK_SDMMC:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100277 con = readl(&cru->cru_clksel_con[11]);
278 div = (con >> MMC0_DIV_SHIFT) & MMC0_DIV_MASK;
279 break;
280 case HCLK_SDIO:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800281 case SCLK_SDIO:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100282 con = readl(&cru->cru_clksel_con[12]);
283 div = (con >> SDIO_DIV_SHIFT) & SDIO_DIV_MASK;
284 break;
285 default:
286 return -EINVAL;
287 }
288
Kever Yang3a94d752017-07-27 12:54:01 +0800289 return DIV_TO_RATE(gclk_rate, div) / 2;
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100290}
291
292static ulong rockchip_mmc_set_clk(struct rk3188_cru *cru, uint gclk_rate,
293 int periph, uint freq)
294{
295 int src_clk_div;
296
297 debug("%s: gclk_rate=%u\n", __func__, gclk_rate);
Kever Yang3a94d752017-07-27 12:54:01 +0800298 /* mmc clock defaulg div 2 internal, need provide double in cru */
Kever Yang217273c2017-07-27 12:54:02 +0800299 src_clk_div = DIV_ROUND_UP(gclk_rate / 2, freq) - 1;
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100300 assert(src_clk_div <= 0x3f);
301
302 switch (periph) {
303 case HCLK_EMMC:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800304 case SCLK_EMMC:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100305 rk_clrsetreg(&cru->cru_clksel_con[12],
306 EMMC_DIV_MASK << EMMC_DIV_SHIFT,
307 src_clk_div << EMMC_DIV_SHIFT);
308 break;
309 case HCLK_SDMMC:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800310 case SCLK_SDMMC:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100311 rk_clrsetreg(&cru->cru_clksel_con[11],
312 MMC0_DIV_MASK << MMC0_DIV_SHIFT,
313 src_clk_div << MMC0_DIV_SHIFT);
314 break;
315 case HCLK_SDIO:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800316 case SCLK_SDIO:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100317 rk_clrsetreg(&cru->cru_clksel_con[12],
318 SDIO_DIV_MASK << SDIO_DIV_SHIFT,
319 src_clk_div << SDIO_DIV_SHIFT);
320 break;
321 default:
322 return -EINVAL;
323 }
324
325 return rockchip_mmc_get_clk(cru, gclk_rate, periph);
326}
327
328static ulong rockchip_spi_get_clk(struct rk3188_cru *cru, uint gclk_rate,
329 int periph)
330{
331 uint div;
332 u32 con;
333
334 switch (periph) {
335 case SCLK_SPI0:
336 con = readl(&cru->cru_clksel_con[25]);
337 div = (con >> SPI0_DIV_SHIFT) & SPI0_DIV_MASK;
338 break;
339 case SCLK_SPI1:
340 con = readl(&cru->cru_clksel_con[25]);
341 div = (con >> SPI1_DIV_SHIFT) & SPI1_DIV_MASK;
342 break;
343 default:
344 return -EINVAL;
345 }
346
347 return DIV_TO_RATE(gclk_rate, div);
348}
349
350static ulong rockchip_spi_set_clk(struct rk3188_cru *cru, uint gclk_rate,
351 int periph, uint freq)
352{
Kever Yang217273c2017-07-27 12:54:02 +0800353 int src_clk_div = DIV_ROUND_UP(gclk_rate, freq) - 1;
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100354
Kever Yang217273c2017-07-27 12:54:02 +0800355 assert(src_clk_div < 128);
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100356 switch (periph) {
357 case SCLK_SPI0:
358 assert(src_clk_div <= SPI0_DIV_MASK);
359 rk_clrsetreg(&cru->cru_clksel_con[25],
360 SPI0_DIV_MASK << SPI0_DIV_SHIFT,
361 src_clk_div << SPI0_DIV_SHIFT);
362 break;
363 case SCLK_SPI1:
364 assert(src_clk_div <= SPI1_DIV_MASK);
365 rk_clrsetreg(&cru->cru_clksel_con[25],
366 SPI1_DIV_MASK << SPI1_DIV_SHIFT,
367 src_clk_div << SPI1_DIV_SHIFT);
368 break;
369 default:
370 return -EINVAL;
371 }
372
373 return rockchip_spi_get_clk(cru, gclk_rate, periph);
374}
375
376#ifdef CONFIG_SPL_BUILD
377static void rkclk_init(struct rk3188_cru *cru, struct rk3188_grf *grf,
378 bool has_bwadj)
379{
380 u32 aclk_div, hclk_div, pclk_div, h2p_div;
381
382 /* pll enter slow-mode */
383 rk_clrsetreg(&cru->cru_mode_con,
384 GPLL_MODE_MASK << GPLL_MODE_SHIFT |
385 CPLL_MODE_MASK << CPLL_MODE_SHIFT,
386 GPLL_MODE_SLOW << GPLL_MODE_SHIFT |
387 CPLL_MODE_SLOW << CPLL_MODE_SHIFT);
388
389 /* init pll */
390 rkclk_set_pll(cru, CLK_GENERAL, &gpll_init_cfg, has_bwadj);
391 rkclk_set_pll(cru, CLK_CODEC, &cpll_init_cfg, has_bwadj);
392
393 /* waiting for pll lock */
394 while ((readl(&grf->soc_status0) &
395 (SOCSTS_CPLL_LOCK | SOCSTS_GPLL_LOCK)) !=
396 (SOCSTS_CPLL_LOCK | SOCSTS_GPLL_LOCK))
397 udelay(1);
398
399 /*
400 * cpu clock pll source selection and
401 * reparent aclk_cpu_pre from apll to gpll
402 * set up dependent divisors for PCLK/HCLK and ACLK clocks.
403 */
Kever Yang217273c2017-07-27 12:54:02 +0800404 aclk_div = DIV_ROUND_UP(GPLL_HZ, CPU_ACLK_HZ) - 1;
405 assert((aclk_div + 1) * CPU_ACLK_HZ == GPLL_HZ && aclk_div <= 0x1f);
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100406
407 rk_clrsetreg(&cru->cru_clksel_con[0],
408 CPU_ACLK_PLL_MASK << CPU_ACLK_PLL_SHIFT |
409 A9_CPU_DIV_MASK << A9_CPU_DIV_SHIFT,
410 CPU_ACLK_PLL_SELECT_GPLL << CPU_ACLK_PLL_SHIFT |
411 aclk_div << A9_CPU_DIV_SHIFT);
412
413 hclk_div = ilog2(CPU_ACLK_HZ / CPU_HCLK_HZ);
414 assert((1 << hclk_div) * CPU_HCLK_HZ == CPU_ACLK_HZ && hclk_div < 0x3);
415 pclk_div = ilog2(CPU_ACLK_HZ / CPU_PCLK_HZ);
416 assert((1 << pclk_div) * CPU_PCLK_HZ == CPU_ACLK_HZ && pclk_div < 0x4);
417 h2p_div = ilog2(CPU_HCLK_HZ / CPU_H2P_HZ);
418 assert((1 << h2p_div) * CPU_H2P_HZ == CPU_HCLK_HZ && pclk_div < 0x3);
419
420 rk_clrsetreg(&cru->cru_clksel_con[1],
421 AHB2APB_DIV_MASK << AHB2APB_DIV_SHIFT |
422 CPU_PCLK_DIV_MASK << CPU_PCLK_DIV_SHIFT |
423 CPU_HCLK_DIV_MASK << CPU_HCLK_DIV_SHIFT,
424 h2p_div << AHB2APB_DIV_SHIFT |
425 pclk_div << CPU_PCLK_DIV_SHIFT |
426 hclk_div << CPU_HCLK_DIV_SHIFT);
427
428 /*
429 * peri clock pll source selection and
430 * set up dependent divisors for PCLK/HCLK and ACLK clocks.
431 */
432 aclk_div = GPLL_HZ / PERI_ACLK_HZ - 1;
433 assert((aclk_div + 1) * PERI_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
434
435 hclk_div = ilog2(PERI_ACLK_HZ / PERI_HCLK_HZ);
436 assert((1 << hclk_div) * PERI_HCLK_HZ ==
437 PERI_ACLK_HZ && (hclk_div < 0x4));
438
439 pclk_div = ilog2(PERI_ACLK_HZ / PERI_PCLK_HZ);
440 assert((1 << pclk_div) * PERI_PCLK_HZ ==
441 PERI_ACLK_HZ && (pclk_div < 0x4));
442
443 rk_clrsetreg(&cru->cru_clksel_con[10],
444 PERI_PCLK_DIV_MASK << PERI_PCLK_DIV_SHIFT |
445 PERI_HCLK_DIV_MASK << PERI_HCLK_DIV_SHIFT |
446 PERI_ACLK_DIV_MASK << PERI_ACLK_DIV_SHIFT,
447 PERI_SEL_GPLL << PERI_SEL_PLL_SHIFT |
448 pclk_div << PERI_PCLK_DIV_SHIFT |
449 hclk_div << PERI_HCLK_DIV_SHIFT |
450 aclk_div << PERI_ACLK_DIV_SHIFT);
451
452 /* PLL enter normal-mode */
453 rk_clrsetreg(&cru->cru_mode_con,
454 GPLL_MODE_MASK << GPLL_MODE_SHIFT |
455 CPLL_MODE_MASK << CPLL_MODE_SHIFT,
456 GPLL_MODE_NORMAL << GPLL_MODE_SHIFT |
457 CPLL_MODE_NORMAL << CPLL_MODE_SHIFT);
458
459 rockchip_mmc_set_clk(cru, PERI_HCLK_HZ, HCLK_SDMMC, 16000000);
460}
461#endif
462
463static ulong rk3188_clk_get_rate(struct clk *clk)
464{
465 struct rk3188_clk_priv *priv = dev_get_priv(clk->dev);
466 ulong new_rate, gclk_rate;
467
468 gclk_rate = rkclk_pll_get_rate(priv->cru, CLK_GENERAL);
469 switch (clk->id) {
470 case 1 ... 4:
471 new_rate = rkclk_pll_get_rate(priv->cru, clk->id);
472 break;
473 case HCLK_EMMC:
474 case HCLK_SDMMC:
475 case HCLK_SDIO:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800476 case SCLK_EMMC:
477 case SCLK_SDMMC:
478 case SCLK_SDIO:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100479 new_rate = rockchip_mmc_get_clk(priv->cru, PERI_HCLK_HZ,
480 clk->id);
481 break;
482 case SCLK_SPI0:
483 case SCLK_SPI1:
484 new_rate = rockchip_spi_get_clk(priv->cru, PERI_PCLK_HZ,
485 clk->id);
486 break;
487 case PCLK_I2C0:
488 case PCLK_I2C1:
489 case PCLK_I2C2:
490 case PCLK_I2C3:
491 case PCLK_I2C4:
492 return gclk_rate;
493 default:
494 return -ENOENT;
495 }
496
497 return new_rate;
498}
499
500static ulong rk3188_clk_set_rate(struct clk *clk, ulong rate)
501{
502 struct rk3188_clk_priv *priv = dev_get_priv(clk->dev);
503 struct rk3188_cru *cru = priv->cru;
504 ulong new_rate;
505
506 switch (clk->id) {
Heiko Stübnerf7853572017-03-20 12:40:32 +0100507 case PLL_APLL:
508 new_rate = rkclk_configure_cpu(priv->cru, priv->grf, rate,
509 priv->has_bwadj);
510 break;
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100511 case CLK_DDR:
512 new_rate = rkclk_configure_ddr(priv->cru, priv->grf, rate,
513 priv->has_bwadj);
514 break;
515 case HCLK_EMMC:
516 case HCLK_SDMMC:
517 case HCLK_SDIO:
Xu Ziyuan7a25a632017-04-16 17:44:44 +0800518 case SCLK_EMMC:
519 case SCLK_SDMMC:
520 case SCLK_SDIO:
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100521 new_rate = rockchip_mmc_set_clk(cru, PERI_HCLK_HZ,
522 clk->id, rate);
523 break;
524 case SCLK_SPI0:
525 case SCLK_SPI1:
526 new_rate = rockchip_spi_set_clk(cru, PERI_PCLK_HZ,
527 clk->id, rate);
528 break;
529 default:
530 return -ENOENT;
531 }
532
533 return new_rate;
534}
535
536static struct clk_ops rk3188_clk_ops = {
537 .get_rate = rk3188_clk_get_rate,
538 .set_rate = rk3188_clk_set_rate,
539};
540
541static int rk3188_clk_ofdata_to_platdata(struct udevice *dev)
542{
543#if !CONFIG_IS_ENABLED(OF_PLATDATA)
544 struct rk3188_clk_priv *priv = dev_get_priv(dev);
545
Kever Yangaca45642018-02-11 11:53:06 +0800546 priv->cru = dev_read_addr_ptr(dev);
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100547#endif
548
549 return 0;
550}
551
552static int rk3188_clk_probe(struct udevice *dev)
553{
554 struct rk3188_clk_priv *priv = dev_get_priv(dev);
555 enum rk3188_clk_type type = dev_get_driver_data(dev);
556
557 priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
558 if (IS_ERR(priv->grf))
559 return PTR_ERR(priv->grf);
560 priv->has_bwadj = (type == RK3188A_CRU) ? 1 : 0;
561
562#ifdef CONFIG_SPL_BUILD
563#if CONFIG_IS_ENABLED(OF_PLATDATA)
564 struct rk3188_clk_plat *plat = dev_get_platdata(dev);
565
566 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
567#endif
568
569 rkclk_init(priv->cru, priv->grf, priv->has_bwadj);
Kever Yang84a6a272019-07-22 19:59:13 +0800570
571 /* Init CPU frequency */
572 rkclk_configure_cpu(priv->cru, priv->grf, APLL_HZ, priv->has_bwadj);
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100573#endif
574
575 return 0;
576}
577
578static int rk3188_clk_bind(struct udevice *dev)
579{
580 int ret;
Kever Yangf24e36d2017-11-03 15:16:13 +0800581 struct udevice *sys_child;
582 struct sysreset_reg *priv;
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100583
584 /* The reset driver does not have a device node, so bind it here */
Kever Yangf24e36d2017-11-03 15:16:13 +0800585 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
586 &sys_child);
587 if (ret) {
588 debug("Warning: No sysreset driver: ret=%d\n", ret);
589 } else {
590 priv = malloc(sizeof(struct sysreset_reg));
591 priv->glb_srst_fst_value = offsetof(struct rk3188_cru,
592 cru_glb_srst_fst_value);
593 priv->glb_srst_snd_value = offsetof(struct rk3188_cru,
594 cru_glb_srst_snd_value);
595 sys_child->priv = priv;
596 }
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100597
Heiko Stuebnera5ada252019-11-09 00:06:30 +0100598#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
Elaine Zhang538f67c2017-12-19 18:22:38 +0800599 ret = offsetof(struct rk3188_cru, cru_softrst_con[0]);
600 ret = rockchip_reset_bind(dev, ret, 9);
601 if (ret)
602 debug("Warning: software reset driver bind faile\n");
603#endif
604
Heiko Stübnerdcdd3272017-02-18 19:46:34 +0100605 return 0;
606}
607
608static const struct udevice_id rk3188_clk_ids[] = {
609 { .compatible = "rockchip,rk3188-cru", .data = RK3188_CRU },
610 { .compatible = "rockchip,rk3188a-cru", .data = RK3188A_CRU },
611 { }
612};
613
614U_BOOT_DRIVER(rockchip_rk3188_cru) = {
615 .name = "rockchip_rk3188_cru",
616 .id = UCLASS_CLK,
617 .of_match = rk3188_clk_ids,
618 .priv_auto_alloc_size = sizeof(struct rk3188_clk_priv),
619 .platdata_auto_alloc_size = sizeof(struct rk3188_clk_plat),
620 .ops = &rk3188_clk_ops,
621 .bind = rk3188_clk_bind,
622 .ofdata_to_platdata = rk3188_clk_ofdata_to_platdata,
623 .probe = rk3188_clk_probe,
624};