blob: 198914b0676685fd1b1c98ec64fa1634c08a3663 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Kever Yangb0b3c862016-07-29 10:35:25 +08002/*
3 * (C) Copyright 2015 Google, Inc
Philipp Tomsich8fa69792017-04-20 22:05:49 +02004 * (C) 2017 Theobroma Systems Design und Consulting GmbH
Kever Yangb0b3c862016-07-29 10:35:25 +08005 */
6
7#include <common.h>
8#include <clk-uclass.h>
9#include <dm.h>
Kever Yang5ae2fd92017-02-13 17:38:56 +080010#include <dt-structs.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080011#include <errno.h>
Kever Yang5ae2fd92017-02-13 17:38:56 +080012#include <mapmem.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080013#include <syscon.h>
David Wu364fc732017-09-20 14:38:58 +080014#include <bitfield.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080015#include <asm/io.h>
16#include <asm/arch/clock.h>
17#include <asm/arch/cru_rk3399.h>
18#include <asm/arch/hardware.h>
19#include <dm/lists.h>
20#include <dt-bindings/clock/rk3399-cru.h>
21
Kever Yang5ae2fd92017-02-13 17:38:56 +080022#if CONFIG_IS_ENABLED(OF_PLATDATA)
23struct rk3399_clk_plat {
24 struct dtd_rockchip_rk3399_cru dtd;
Kever Yang5e79f442016-08-12 17:47:15 +080025};
26
Kever Yang5ae2fd92017-02-13 17:38:56 +080027struct rk3399_pmuclk_plat {
28 struct dtd_rockchip_rk3399_pmucru dtd;
29};
30#endif
31
Kever Yangb0b3c862016-07-29 10:35:25 +080032struct pll_div {
33 u32 refdiv;
34 u32 fbdiv;
35 u32 postdiv1;
36 u32 postdiv2;
37 u32 frac;
38};
39
40#define RATE_TO_DIV(input_rate, output_rate) \
41 ((input_rate) / (output_rate) - 1);
42#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
43
44#define PLL_DIVISORS(hz, _refdiv, _postdiv1, _postdiv2) {\
45 .refdiv = _refdiv,\
46 .fbdiv = (u32)((u64)hz * _refdiv * _postdiv1 * _postdiv2 / OSC_HZ),\
47 .postdiv1 = _postdiv1, .postdiv2 = _postdiv2};
48
Philipp Tomsich61dff332017-03-24 19:24:24 +010049#if defined(CONFIG_SPL_BUILD)
Kever Yangb0b3c862016-07-29 10:35:25 +080050static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 2, 2, 1);
51static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 2, 2);
Philipp Tomsich61dff332017-03-24 19:24:24 +010052#else
Kever Yangb0b3c862016-07-29 10:35:25 +080053static const struct pll_div ppll_init_cfg = PLL_DIVISORS(PPLL_HZ, 2, 2, 1);
Philipp Tomsich61dff332017-03-24 19:24:24 +010054#endif
Kever Yangb0b3c862016-07-29 10:35:25 +080055
56static const struct pll_div apll_l_1600_cfg = PLL_DIVISORS(1600*MHz, 3, 1, 1);
57static const struct pll_div apll_l_600_cfg = PLL_DIVISORS(600*MHz, 1, 2, 1);
58
59static const struct pll_div *apll_l_cfgs[] = {
60 [APLL_L_1600_MHZ] = &apll_l_1600_cfg,
61 [APLL_L_600_MHZ] = &apll_l_600_cfg,
62};
63
Christoph Muellneraf765a42018-11-30 20:32:48 +010064static const struct pll_div apll_b_600_cfg = PLL_DIVISORS(600*MHz, 1, 2, 1);
65static const struct pll_div *apll_b_cfgs[] = {
66 [APLL_B_600_MHZ] = &apll_b_600_cfg,
67};
68
Kever Yangb0b3c862016-07-29 10:35:25 +080069enum {
70 /* PLL_CON0 */
71 PLL_FBDIV_MASK = 0xfff,
72 PLL_FBDIV_SHIFT = 0,
73
74 /* PLL_CON1 */
75 PLL_POSTDIV2_SHIFT = 12,
76 PLL_POSTDIV2_MASK = 0x7 << PLL_POSTDIV2_SHIFT,
77 PLL_POSTDIV1_SHIFT = 8,
78 PLL_POSTDIV1_MASK = 0x7 << PLL_POSTDIV1_SHIFT,
79 PLL_REFDIV_MASK = 0x3f,
80 PLL_REFDIV_SHIFT = 0,
81
82 /* PLL_CON2 */
83 PLL_LOCK_STATUS_SHIFT = 31,
84 PLL_LOCK_STATUS_MASK = 1 << PLL_LOCK_STATUS_SHIFT,
85 PLL_FRACDIV_MASK = 0xffffff,
86 PLL_FRACDIV_SHIFT = 0,
87
88 /* PLL_CON3 */
89 PLL_MODE_SHIFT = 8,
90 PLL_MODE_MASK = 3 << PLL_MODE_SHIFT,
91 PLL_MODE_SLOW = 0,
92 PLL_MODE_NORM,
93 PLL_MODE_DEEP,
94 PLL_DSMPD_SHIFT = 3,
95 PLL_DSMPD_MASK = 1 << PLL_DSMPD_SHIFT,
96 PLL_INTEGER_MODE = 1,
97
98 /* PMUCRU_CLKSEL_CON0 */
99 PMU_PCLK_DIV_CON_MASK = 0x1f,
100 PMU_PCLK_DIV_CON_SHIFT = 0,
101
102 /* PMUCRU_CLKSEL_CON1 */
103 SPI3_PLL_SEL_SHIFT = 7,
104 SPI3_PLL_SEL_MASK = 1 << SPI3_PLL_SEL_SHIFT,
105 SPI3_PLL_SEL_24M = 0,
106 SPI3_PLL_SEL_PPLL = 1,
107 SPI3_DIV_CON_SHIFT = 0x0,
108 SPI3_DIV_CON_MASK = 0x7f,
109
110 /* PMUCRU_CLKSEL_CON2 */
111 I2C_DIV_CON_MASK = 0x7f,
Kever Yang5e79f442016-08-12 17:47:15 +0800112 CLK_I2C8_DIV_CON_SHIFT = 8,
113 CLK_I2C0_DIV_CON_SHIFT = 0,
Kever Yangb0b3c862016-07-29 10:35:25 +0800114
115 /* PMUCRU_CLKSEL_CON3 */
Kever Yang5e79f442016-08-12 17:47:15 +0800116 CLK_I2C4_DIV_CON_SHIFT = 0,
Kever Yangb0b3c862016-07-29 10:35:25 +0800117
118 /* CLKSEL_CON0 */
119 ACLKM_CORE_L_DIV_CON_SHIFT = 8,
120 ACLKM_CORE_L_DIV_CON_MASK = 0x1f << ACLKM_CORE_L_DIV_CON_SHIFT,
121 CLK_CORE_L_PLL_SEL_SHIFT = 6,
122 CLK_CORE_L_PLL_SEL_MASK = 3 << CLK_CORE_L_PLL_SEL_SHIFT,
123 CLK_CORE_L_PLL_SEL_ALPLL = 0x0,
124 CLK_CORE_L_PLL_SEL_ABPLL = 0x1,
125 CLK_CORE_L_PLL_SEL_DPLL = 0x10,
126 CLK_CORE_L_PLL_SEL_GPLL = 0x11,
127 CLK_CORE_L_DIV_MASK = 0x1f,
128 CLK_CORE_L_DIV_SHIFT = 0,
129
130 /* CLKSEL_CON1 */
131 PCLK_DBG_L_DIV_SHIFT = 0x8,
132 PCLK_DBG_L_DIV_MASK = 0x1f << PCLK_DBG_L_DIV_SHIFT,
133 ATCLK_CORE_L_DIV_SHIFT = 0,
134 ATCLK_CORE_L_DIV_MASK = 0x1f << ATCLK_CORE_L_DIV_SHIFT,
135
Christoph Muellneraf765a42018-11-30 20:32:48 +0100136 /* CLKSEL_CON2 */
137 ACLKM_CORE_B_DIV_CON_SHIFT = 8,
138 ACLKM_CORE_B_DIV_CON_MASK = 0x1f << ACLKM_CORE_B_DIV_CON_SHIFT,
139 CLK_CORE_B_PLL_SEL_SHIFT = 6,
140 CLK_CORE_B_PLL_SEL_MASK = 3 << CLK_CORE_B_PLL_SEL_SHIFT,
141 CLK_CORE_B_PLL_SEL_ALPLL = 0x0,
142 CLK_CORE_B_PLL_SEL_ABPLL = 0x1,
143 CLK_CORE_B_PLL_SEL_DPLL = 0x10,
144 CLK_CORE_B_PLL_SEL_GPLL = 0x11,
145 CLK_CORE_B_DIV_MASK = 0x1f,
146 CLK_CORE_B_DIV_SHIFT = 0,
147
148 /* CLKSEL_CON3 */
149 PCLK_DBG_B_DIV_SHIFT = 0x8,
150 PCLK_DBG_B_DIV_MASK = 0x1f << PCLK_DBG_B_DIV_SHIFT,
151 ATCLK_CORE_B_DIV_SHIFT = 0,
152 ATCLK_CORE_B_DIV_MASK = 0x1f << ATCLK_CORE_B_DIV_SHIFT,
153
Kever Yangb0b3c862016-07-29 10:35:25 +0800154 /* CLKSEL_CON14 */
155 PCLK_PERIHP_DIV_CON_SHIFT = 12,
156 PCLK_PERIHP_DIV_CON_MASK = 0x7 << PCLK_PERIHP_DIV_CON_SHIFT,
157 HCLK_PERIHP_DIV_CON_SHIFT = 8,
158 HCLK_PERIHP_DIV_CON_MASK = 3 << HCLK_PERIHP_DIV_CON_SHIFT,
159 ACLK_PERIHP_PLL_SEL_SHIFT = 7,
160 ACLK_PERIHP_PLL_SEL_MASK = 1 << ACLK_PERIHP_PLL_SEL_SHIFT,
161 ACLK_PERIHP_PLL_SEL_CPLL = 0,
162 ACLK_PERIHP_PLL_SEL_GPLL = 1,
163 ACLK_PERIHP_DIV_CON_SHIFT = 0,
164 ACLK_PERIHP_DIV_CON_MASK = 0x1f,
165
166 /* CLKSEL_CON21 */
167 ACLK_EMMC_PLL_SEL_SHIFT = 7,
168 ACLK_EMMC_PLL_SEL_MASK = 0x1 << ACLK_EMMC_PLL_SEL_SHIFT,
169 ACLK_EMMC_PLL_SEL_GPLL = 0x1,
170 ACLK_EMMC_DIV_CON_SHIFT = 0,
171 ACLK_EMMC_DIV_CON_MASK = 0x1f,
172
173 /* CLKSEL_CON22 */
174 CLK_EMMC_PLL_SHIFT = 8,
175 CLK_EMMC_PLL_MASK = 0x7 << CLK_EMMC_PLL_SHIFT,
176 CLK_EMMC_PLL_SEL_GPLL = 0x1,
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800177 CLK_EMMC_PLL_SEL_24M = 0x5,
Kever Yangb0b3c862016-07-29 10:35:25 +0800178 CLK_EMMC_DIV_CON_SHIFT = 0,
179 CLK_EMMC_DIV_CON_MASK = 0x7f << CLK_EMMC_DIV_CON_SHIFT,
180
181 /* CLKSEL_CON23 */
182 PCLK_PERILP0_DIV_CON_SHIFT = 12,
183 PCLK_PERILP0_DIV_CON_MASK = 0x7 << PCLK_PERILP0_DIV_CON_SHIFT,
184 HCLK_PERILP0_DIV_CON_SHIFT = 8,
185 HCLK_PERILP0_DIV_CON_MASK = 3 << HCLK_PERILP0_DIV_CON_SHIFT,
186 ACLK_PERILP0_PLL_SEL_SHIFT = 7,
187 ACLK_PERILP0_PLL_SEL_MASK = 1 << ACLK_PERILP0_PLL_SEL_SHIFT,
188 ACLK_PERILP0_PLL_SEL_CPLL = 0,
189 ACLK_PERILP0_PLL_SEL_GPLL = 1,
190 ACLK_PERILP0_DIV_CON_SHIFT = 0,
191 ACLK_PERILP0_DIV_CON_MASK = 0x1f,
192
193 /* CLKSEL_CON25 */
194 PCLK_PERILP1_DIV_CON_SHIFT = 8,
195 PCLK_PERILP1_DIV_CON_MASK = 0x7 << PCLK_PERILP1_DIV_CON_SHIFT,
196 HCLK_PERILP1_PLL_SEL_SHIFT = 7,
197 HCLK_PERILP1_PLL_SEL_MASK = 1 << HCLK_PERILP1_PLL_SEL_SHIFT,
198 HCLK_PERILP1_PLL_SEL_CPLL = 0,
199 HCLK_PERILP1_PLL_SEL_GPLL = 1,
200 HCLK_PERILP1_DIV_CON_SHIFT = 0,
201 HCLK_PERILP1_DIV_CON_MASK = 0x1f,
202
203 /* CLKSEL_CON26 */
204 CLK_SARADC_DIV_CON_SHIFT = 8,
David Wu364fc732017-09-20 14:38:58 +0800205 CLK_SARADC_DIV_CON_MASK = GENMASK(15, 8),
206 CLK_SARADC_DIV_CON_WIDTH = 8,
Kever Yangb0b3c862016-07-29 10:35:25 +0800207
208 /* CLKSEL_CON27 */
209 CLK_TSADC_SEL_X24M = 0x0,
210 CLK_TSADC_SEL_SHIFT = 15,
211 CLK_TSADC_SEL_MASK = 1 << CLK_TSADC_SEL_SHIFT,
212 CLK_TSADC_DIV_CON_SHIFT = 0,
213 CLK_TSADC_DIV_CON_MASK = 0x3ff,
214
215 /* CLKSEL_CON47 & CLKSEL_CON48 */
216 ACLK_VOP_PLL_SEL_SHIFT = 6,
217 ACLK_VOP_PLL_SEL_MASK = 0x3 << ACLK_VOP_PLL_SEL_SHIFT,
218 ACLK_VOP_PLL_SEL_CPLL = 0x1,
219 ACLK_VOP_DIV_CON_SHIFT = 0,
220 ACLK_VOP_DIV_CON_MASK = 0x1f << ACLK_VOP_DIV_CON_SHIFT,
221
222 /* CLKSEL_CON49 & CLKSEL_CON50 */
223 DCLK_VOP_DCLK_SEL_SHIFT = 11,
224 DCLK_VOP_DCLK_SEL_MASK = 1 << DCLK_VOP_DCLK_SEL_SHIFT,
225 DCLK_VOP_DCLK_SEL_DIVOUT = 0,
226 DCLK_VOP_PLL_SEL_SHIFT = 8,
227 DCLK_VOP_PLL_SEL_MASK = 3 << DCLK_VOP_PLL_SEL_SHIFT,
228 DCLK_VOP_PLL_SEL_VPLL = 0,
229 DCLK_VOP_DIV_CON_MASK = 0xff,
230 DCLK_VOP_DIV_CON_SHIFT = 0,
231
232 /* CLKSEL_CON58 */
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200233 CLK_SPI_PLL_SEL_WIDTH = 1,
234 CLK_SPI_PLL_SEL_MASK = ((1 < CLK_SPI_PLL_SEL_WIDTH) - 1),
235 CLK_SPI_PLL_SEL_CPLL = 0,
236 CLK_SPI_PLL_SEL_GPLL = 1,
237 CLK_SPI_PLL_DIV_CON_WIDTH = 7,
238 CLK_SPI_PLL_DIV_CON_MASK = ((1 << CLK_SPI_PLL_DIV_CON_WIDTH) - 1),
239
240 CLK_SPI5_PLL_DIV_CON_SHIFT = 8,
241 CLK_SPI5_PLL_SEL_SHIFT = 15,
Kever Yangb0b3c862016-07-29 10:35:25 +0800242
243 /* CLKSEL_CON59 */
244 CLK_SPI1_PLL_SEL_SHIFT = 15,
245 CLK_SPI1_PLL_DIV_CON_SHIFT = 8,
246 CLK_SPI0_PLL_SEL_SHIFT = 7,
247 CLK_SPI0_PLL_DIV_CON_SHIFT = 0,
248
249 /* CLKSEL_CON60 */
250 CLK_SPI4_PLL_SEL_SHIFT = 15,
251 CLK_SPI4_PLL_DIV_CON_SHIFT = 8,
252 CLK_SPI2_PLL_SEL_SHIFT = 7,
253 CLK_SPI2_PLL_DIV_CON_SHIFT = 0,
254
255 /* CLKSEL_CON61 */
256 CLK_I2C_PLL_SEL_MASK = 1,
257 CLK_I2C_PLL_SEL_CPLL = 0,
258 CLK_I2C_PLL_SEL_GPLL = 1,
259 CLK_I2C5_PLL_SEL_SHIFT = 15,
260 CLK_I2C5_DIV_CON_SHIFT = 8,
261 CLK_I2C1_PLL_SEL_SHIFT = 7,
262 CLK_I2C1_DIV_CON_SHIFT = 0,
263
264 /* CLKSEL_CON62 */
265 CLK_I2C6_PLL_SEL_SHIFT = 15,
266 CLK_I2C6_DIV_CON_SHIFT = 8,
267 CLK_I2C2_PLL_SEL_SHIFT = 7,
268 CLK_I2C2_DIV_CON_SHIFT = 0,
269
270 /* CLKSEL_CON63 */
271 CLK_I2C7_PLL_SEL_SHIFT = 15,
272 CLK_I2C7_DIV_CON_SHIFT = 8,
273 CLK_I2C3_PLL_SEL_SHIFT = 7,
274 CLK_I2C3_DIV_CON_SHIFT = 0,
275
276 /* CRU_SOFTRST_CON4 */
277 RESETN_DDR0_REQ_SHIFT = 8,
278 RESETN_DDR0_REQ_MASK = 1 << RESETN_DDR0_REQ_SHIFT,
279 RESETN_DDRPHY0_REQ_SHIFT = 9,
280 RESETN_DDRPHY0_REQ_MASK = 1 << RESETN_DDRPHY0_REQ_SHIFT,
281 RESETN_DDR1_REQ_SHIFT = 12,
282 RESETN_DDR1_REQ_MASK = 1 << RESETN_DDR1_REQ_SHIFT,
283 RESETN_DDRPHY1_REQ_SHIFT = 13,
284 RESETN_DDRPHY1_REQ_MASK = 1 << RESETN_DDRPHY1_REQ_SHIFT,
285};
286
287#define VCO_MAX_KHZ (3200 * (MHz / KHz))
288#define VCO_MIN_KHZ (800 * (MHz / KHz))
289#define OUTPUT_MAX_KHZ (3200 * (MHz / KHz))
290#define OUTPUT_MIN_KHZ (16 * (MHz / KHz))
291
292/*
293 * the div restructions of pll in integer mode, these are defined in
294 * * CRU_*PLL_CON0 or PMUCRU_*PLL_CON0
295 */
296#define PLL_DIV_MIN 16
297#define PLL_DIV_MAX 3200
298
299/*
300 * How to calculate the PLL(from TRM V0.3 Part 1 Page 63):
301 * Formulas also embedded within the Fractional PLL Verilog model:
302 * If DSMPD = 1 (DSM is disabled, "integer mode")
303 * FOUTVCO = FREF / REFDIV * FBDIV
304 * FOUTPOSTDIV = FOUTVCO / POSTDIV1 / POSTDIV2
305 * Where:
306 * FOUTVCO = Fractional PLL non-divided output frequency
307 * FOUTPOSTDIV = Fractional PLL divided output frequency
308 * (output of second post divider)
309 * FREF = Fractional PLL input reference frequency, (the OSC_HZ 24MHz input)
310 * REFDIV = Fractional PLL input reference clock divider
311 * FBDIV = Integer value programmed into feedback divide
312 *
313 */
314static void rkclk_set_pll(u32 *pll_con, const struct pll_div *div)
315{
316 /* All 8 PLLs have same VCO and output frequency range restrictions. */
317 u32 vco_khz = OSC_HZ / 1000 * div->fbdiv / div->refdiv;
318 u32 output_khz = vco_khz / div->postdiv1 / div->postdiv2;
319
320 debug("PLL at %p: fbdiv=%d, refdiv=%d, postdiv1=%d, "
321 "postdiv2=%d, vco=%u khz, output=%u khz\n",
322 pll_con, div->fbdiv, div->refdiv, div->postdiv1,
323 div->postdiv2, vco_khz, output_khz);
324 assert(vco_khz >= VCO_MIN_KHZ && vco_khz <= VCO_MAX_KHZ &&
325 output_khz >= OUTPUT_MIN_KHZ && output_khz <= OUTPUT_MAX_KHZ &&
326 div->fbdiv >= PLL_DIV_MIN && div->fbdiv <= PLL_DIV_MAX);
327
328 /*
329 * When power on or changing PLL setting,
330 * we must force PLL into slow mode to ensure output stable clock.
331 */
332 rk_clrsetreg(&pll_con[3], PLL_MODE_MASK,
333 PLL_MODE_SLOW << PLL_MODE_SHIFT);
334
335 /* use integer mode */
336 rk_clrsetreg(&pll_con[3], PLL_DSMPD_MASK,
337 PLL_INTEGER_MODE << PLL_DSMPD_SHIFT);
338
339 rk_clrsetreg(&pll_con[0], PLL_FBDIV_MASK,
340 div->fbdiv << PLL_FBDIV_SHIFT);
341 rk_clrsetreg(&pll_con[1],
342 PLL_POSTDIV2_MASK | PLL_POSTDIV1_MASK |
343 PLL_REFDIV_MASK | PLL_REFDIV_SHIFT,
344 (div->postdiv2 << PLL_POSTDIV2_SHIFT) |
345 (div->postdiv1 << PLL_POSTDIV1_SHIFT) |
346 (div->refdiv << PLL_REFDIV_SHIFT));
347
348 /* waiting for pll lock */
349 while (!(readl(&pll_con[2]) & (1 << PLL_LOCK_STATUS_SHIFT)))
350 udelay(1);
351
352 /* pll enter normal mode */
353 rk_clrsetreg(&pll_con[3], PLL_MODE_MASK,
354 PLL_MODE_NORM << PLL_MODE_SHIFT);
355}
356
357static int pll_para_config(u32 freq_hz, struct pll_div *div)
358{
359 u32 ref_khz = OSC_HZ / KHz, refdiv, fbdiv = 0;
360 u32 postdiv1, postdiv2 = 1;
361 u32 fref_khz;
362 u32 diff_khz, best_diff_khz;
363 const u32 max_refdiv = 63, max_fbdiv = 3200, min_fbdiv = 16;
364 const u32 max_postdiv1 = 7, max_postdiv2 = 7;
365 u32 vco_khz;
366 u32 freq_khz = freq_hz / KHz;
367
368 if (!freq_hz) {
369 printf("%s: the frequency can't be 0 Hz\n", __func__);
370 return -1;
371 }
372
373 postdiv1 = DIV_ROUND_UP(VCO_MIN_KHZ, freq_khz);
374 if (postdiv1 > max_postdiv1) {
375 postdiv2 = DIV_ROUND_UP(postdiv1, max_postdiv1);
376 postdiv1 = DIV_ROUND_UP(postdiv1, postdiv2);
377 }
378
379 vco_khz = freq_khz * postdiv1 * postdiv2;
380
381 if (vco_khz < VCO_MIN_KHZ || vco_khz > VCO_MAX_KHZ ||
382 postdiv2 > max_postdiv2) {
383 printf("%s: Cannot find out a supported VCO"
384 " for Frequency (%uHz).\n", __func__, freq_hz);
385 return -1;
386 }
387
388 div->postdiv1 = postdiv1;
389 div->postdiv2 = postdiv2;
390
391 best_diff_khz = vco_khz;
392 for (refdiv = 1; refdiv < max_refdiv && best_diff_khz; refdiv++) {
393 fref_khz = ref_khz / refdiv;
394
395 fbdiv = vco_khz / fref_khz;
396 if ((fbdiv >= max_fbdiv) || (fbdiv <= min_fbdiv))
397 continue;
398 diff_khz = vco_khz - fbdiv * fref_khz;
399 if (fbdiv + 1 < max_fbdiv && diff_khz > fref_khz / 2) {
400 fbdiv++;
401 diff_khz = fref_khz - diff_khz;
402 }
403
404 if (diff_khz >= best_diff_khz)
405 continue;
406
407 best_diff_khz = diff_khz;
408 div->refdiv = refdiv;
409 div->fbdiv = fbdiv;
410 }
411
412 if (best_diff_khz > 4 * (MHz/KHz)) {
413 printf("%s: Failed to match output frequency %u, "
414 "difference is %u Hz,exceed 4MHZ\n", __func__, freq_hz,
415 best_diff_khz * KHz);
416 return -1;
417 }
418 return 0;
419}
420
Christoph Muellneraf765a42018-11-30 20:32:48 +0100421void rk3399_configure_cpu_l(struct rk3399_cru *cru,
422 enum apll_l_frequencies apll_l_freq)
Kever Yangb0b3c862016-07-29 10:35:25 +0800423{
424 u32 aclkm_div;
425 u32 pclk_dbg_div;
426 u32 atclk_div;
427
Christoph Muellneraf765a42018-11-30 20:32:48 +0100428 /* Setup cluster L */
Kever Yangb0b3c862016-07-29 10:35:25 +0800429 rkclk_set_pll(&cru->apll_l_con[0], apll_l_cfgs[apll_l_freq]);
430
Christoph Muellneraf765a42018-11-30 20:32:48 +0100431 aclkm_div = LPLL_HZ / ACLKM_CORE_L_HZ - 1;
432 assert((aclkm_div + 1) * ACLKM_CORE_L_HZ == LPLL_HZ &&
Kever Yangb0b3c862016-07-29 10:35:25 +0800433 aclkm_div < 0x1f);
434
Christoph Muellneraf765a42018-11-30 20:32:48 +0100435 pclk_dbg_div = LPLL_HZ / PCLK_DBG_L_HZ - 1;
436 assert((pclk_dbg_div + 1) * PCLK_DBG_L_HZ == LPLL_HZ &&
Kever Yangb0b3c862016-07-29 10:35:25 +0800437 pclk_dbg_div < 0x1f);
438
Christoph Muellneraf765a42018-11-30 20:32:48 +0100439 atclk_div = LPLL_HZ / ATCLK_CORE_L_HZ - 1;
440 assert((atclk_div + 1) * ATCLK_CORE_L_HZ == LPLL_HZ &&
Kever Yangb0b3c862016-07-29 10:35:25 +0800441 atclk_div < 0x1f);
442
443 rk_clrsetreg(&cru->clksel_con[0],
444 ACLKM_CORE_L_DIV_CON_MASK | CLK_CORE_L_PLL_SEL_MASK |
445 CLK_CORE_L_DIV_MASK,
446 aclkm_div << ACLKM_CORE_L_DIV_CON_SHIFT |
447 CLK_CORE_L_PLL_SEL_ALPLL << CLK_CORE_L_PLL_SEL_SHIFT |
448 0 << CLK_CORE_L_DIV_SHIFT);
449
450 rk_clrsetreg(&cru->clksel_con[1],
451 PCLK_DBG_L_DIV_MASK | ATCLK_CORE_L_DIV_MASK,
452 pclk_dbg_div << PCLK_DBG_L_DIV_SHIFT |
453 atclk_div << ATCLK_CORE_L_DIV_SHIFT);
454}
Christoph Muellneraf765a42018-11-30 20:32:48 +0100455
456void rk3399_configure_cpu_b(struct rk3399_cru *cru,
457 enum apll_b_frequencies apll_b_freq)
458{
459 u32 aclkm_div;
460 u32 pclk_dbg_div;
461 u32 atclk_div;
462
463 /* Setup cluster B */
464 rkclk_set_pll(&cru->apll_b_con[0], apll_b_cfgs[apll_b_freq]);
465
466 aclkm_div = BPLL_HZ / ACLKM_CORE_B_HZ - 1;
467 assert((aclkm_div + 1) * ACLKM_CORE_B_HZ == BPLL_HZ &&
468 aclkm_div < 0x1f);
469
470 pclk_dbg_div = BPLL_HZ / PCLK_DBG_B_HZ - 1;
471 assert((pclk_dbg_div + 1) * PCLK_DBG_B_HZ == BPLL_HZ &&
472 pclk_dbg_div < 0x1f);
473
474 atclk_div = BPLL_HZ / ATCLK_CORE_B_HZ - 1;
475 assert((atclk_div + 1) * ATCLK_CORE_B_HZ == BPLL_HZ &&
476 atclk_div < 0x1f);
477
478 rk_clrsetreg(&cru->clksel_con[2],
479 ACLKM_CORE_B_DIV_CON_MASK | CLK_CORE_B_PLL_SEL_MASK |
480 CLK_CORE_B_DIV_MASK,
481 aclkm_div << ACLKM_CORE_B_DIV_CON_SHIFT |
482 CLK_CORE_B_PLL_SEL_ABPLL << CLK_CORE_B_PLL_SEL_SHIFT |
483 0 << CLK_CORE_B_DIV_SHIFT);
484
485 rk_clrsetreg(&cru->clksel_con[3],
486 PCLK_DBG_B_DIV_MASK | ATCLK_CORE_B_DIV_MASK,
487 pclk_dbg_div << PCLK_DBG_B_DIV_SHIFT |
488 atclk_div << ATCLK_CORE_B_DIV_SHIFT);
489}
490
Kever Yangb0b3c862016-07-29 10:35:25 +0800491#define I2C_CLK_REG_MASK(bus) \
492 (I2C_DIV_CON_MASK << \
493 CLK_I2C ##bus## _DIV_CON_SHIFT | \
494 CLK_I2C_PLL_SEL_MASK << \
495 CLK_I2C ##bus## _PLL_SEL_SHIFT)
496
497#define I2C_CLK_REG_VALUE(bus, clk_div) \
498 ((clk_div - 1) << \
499 CLK_I2C ##bus## _DIV_CON_SHIFT | \
500 CLK_I2C_PLL_SEL_GPLL << \
501 CLK_I2C ##bus## _PLL_SEL_SHIFT)
502
503#define I2C_CLK_DIV_VALUE(con, bus) \
504 (con >> CLK_I2C ##bus## _DIV_CON_SHIFT) & \
505 I2C_DIV_CON_MASK;
506
Kever Yang5e79f442016-08-12 17:47:15 +0800507#define I2C_PMUCLK_REG_MASK(bus) \
508 (I2C_DIV_CON_MASK << \
509 CLK_I2C ##bus## _DIV_CON_SHIFT)
510
511#define I2C_PMUCLK_REG_VALUE(bus, clk_div) \
512 ((clk_div - 1) << \
513 CLK_I2C ##bus## _DIV_CON_SHIFT)
514
Kever Yangb0b3c862016-07-29 10:35:25 +0800515static ulong rk3399_i2c_get_clk(struct rk3399_cru *cru, ulong clk_id)
516{
517 u32 div, con;
518
519 switch (clk_id) {
520 case SCLK_I2C1:
521 con = readl(&cru->clksel_con[61]);
522 div = I2C_CLK_DIV_VALUE(con, 1);
523 break;
524 case SCLK_I2C2:
525 con = readl(&cru->clksel_con[62]);
526 div = I2C_CLK_DIV_VALUE(con, 2);
527 break;
528 case SCLK_I2C3:
529 con = readl(&cru->clksel_con[63]);
530 div = I2C_CLK_DIV_VALUE(con, 3);
531 break;
532 case SCLK_I2C5:
533 con = readl(&cru->clksel_con[61]);
534 div = I2C_CLK_DIV_VALUE(con, 5);
535 break;
536 case SCLK_I2C6:
537 con = readl(&cru->clksel_con[62]);
538 div = I2C_CLK_DIV_VALUE(con, 6);
539 break;
540 case SCLK_I2C7:
541 con = readl(&cru->clksel_con[63]);
542 div = I2C_CLK_DIV_VALUE(con, 7);
543 break;
544 default:
545 printf("do not support this i2c bus\n");
546 return -EINVAL;
547 }
548
549 return DIV_TO_RATE(GPLL_HZ, div);
550}
551
552static ulong rk3399_i2c_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz)
553{
554 int src_clk_div;
555
556 /* i2c0,4,8 src clock from ppll, i2c1,2,3,5,6,7 src clock from gpll*/
557 src_clk_div = GPLL_HZ / hz;
558 assert(src_clk_div - 1 < 127);
559
560 switch (clk_id) {
561 case SCLK_I2C1:
562 rk_clrsetreg(&cru->clksel_con[61], I2C_CLK_REG_MASK(1),
563 I2C_CLK_REG_VALUE(1, src_clk_div));
564 break;
565 case SCLK_I2C2:
566 rk_clrsetreg(&cru->clksel_con[62], I2C_CLK_REG_MASK(2),
567 I2C_CLK_REG_VALUE(2, src_clk_div));
568 break;
569 case SCLK_I2C3:
570 rk_clrsetreg(&cru->clksel_con[63], I2C_CLK_REG_MASK(3),
571 I2C_CLK_REG_VALUE(3, src_clk_div));
572 break;
573 case SCLK_I2C5:
574 rk_clrsetreg(&cru->clksel_con[61], I2C_CLK_REG_MASK(5),
575 I2C_CLK_REG_VALUE(5, src_clk_div));
576 break;
577 case SCLK_I2C6:
578 rk_clrsetreg(&cru->clksel_con[62], I2C_CLK_REG_MASK(6),
579 I2C_CLK_REG_VALUE(6, src_clk_div));
580 break;
581 case SCLK_I2C7:
582 rk_clrsetreg(&cru->clksel_con[63], I2C_CLK_REG_MASK(7),
583 I2C_CLK_REG_VALUE(7, src_clk_div));
584 break;
585 default:
586 printf("do not support this i2c bus\n");
587 return -EINVAL;
588 }
589
Philipp Tomsichbeb90a52017-04-20 22:05:50 +0200590 return rk3399_i2c_get_clk(cru, clk_id);
Kever Yangb0b3c862016-07-29 10:35:25 +0800591}
592
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200593/*
594 * RK3399 SPI clocks have a common divider-width (7 bits) and a single bit
595 * to select either CPLL or GPLL as the clock-parent. The location within
596 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
597 */
598
599struct spi_clkreg {
600 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
601 uint8_t div_shift;
602 uint8_t sel_shift;
603};
604
605/*
606 * The entries are numbered relative to their offset from SCLK_SPI0.
607 *
608 * Note that SCLK_SPI3 (which is configured via PMUCRU and requires different
609 * logic is not supported).
610 */
611static const struct spi_clkreg spi_clkregs[] = {
612 [0] = { .reg = 59,
613 .div_shift = CLK_SPI0_PLL_DIV_CON_SHIFT,
614 .sel_shift = CLK_SPI0_PLL_SEL_SHIFT, },
615 [1] = { .reg = 59,
616 .div_shift = CLK_SPI1_PLL_DIV_CON_SHIFT,
617 .sel_shift = CLK_SPI1_PLL_SEL_SHIFT, },
618 [2] = { .reg = 60,
619 .div_shift = CLK_SPI2_PLL_DIV_CON_SHIFT,
620 .sel_shift = CLK_SPI2_PLL_SEL_SHIFT, },
621 [3] = { .reg = 60,
622 .div_shift = CLK_SPI4_PLL_DIV_CON_SHIFT,
623 .sel_shift = CLK_SPI4_PLL_SEL_SHIFT, },
624 [4] = { .reg = 58,
625 .div_shift = CLK_SPI5_PLL_DIV_CON_SHIFT,
626 .sel_shift = CLK_SPI5_PLL_SEL_SHIFT, },
627};
628
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200629static ulong rk3399_spi_get_clk(struct rk3399_cru *cru, ulong clk_id)
630{
631 const struct spi_clkreg *spiclk = NULL;
632 u32 div, val;
633
634 switch (clk_id) {
635 case SCLK_SPI0 ... SCLK_SPI5:
636 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
637 break;
638
639 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900640 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200641 return -EINVAL;
642 }
643
644 val = readl(&cru->clksel_con[spiclk->reg]);
Philipp Tomsicha8ee98d2017-11-22 19:45:04 +0100645 div = bitfield_extract(val, spiclk->div_shift,
646 CLK_SPI_PLL_DIV_CON_WIDTH);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200647
648 return DIV_TO_RATE(GPLL_HZ, div);
649}
650
651static ulong rk3399_spi_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz)
652{
653 const struct spi_clkreg *spiclk = NULL;
654 int src_clk_div;
655
Kever Yang217273c2017-07-27 12:54:02 +0800656 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz) - 1;
657 assert(src_clk_div < 128);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200658
659 switch (clk_id) {
660 case SCLK_SPI1 ... SCLK_SPI5:
661 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
662 break;
663
664 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900665 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200666 return -EINVAL;
667 }
668
669 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
670 ((CLK_SPI_PLL_DIV_CON_MASK << spiclk->div_shift) |
671 (CLK_SPI_PLL_SEL_GPLL << spiclk->sel_shift)),
672 ((src_clk_div << spiclk->div_shift) |
673 (CLK_SPI_PLL_SEL_GPLL << spiclk->sel_shift)));
674
Philipp Tomsichbeb90a52017-04-20 22:05:50 +0200675 return rk3399_spi_get_clk(cru, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200676}
677
Kever Yangb0b3c862016-07-29 10:35:25 +0800678static ulong rk3399_vop_set_clk(struct rk3399_cru *cru, ulong clk_id, u32 hz)
679{
680 struct pll_div vpll_config = {0};
681 int aclk_vop = 198*MHz;
682 void *aclkreg_addr, *dclkreg_addr;
683 u32 div;
684
685 switch (clk_id) {
686 case DCLK_VOP0:
687 aclkreg_addr = &cru->clksel_con[47];
688 dclkreg_addr = &cru->clksel_con[49];
689 break;
690 case DCLK_VOP1:
691 aclkreg_addr = &cru->clksel_con[48];
692 dclkreg_addr = &cru->clksel_con[50];
693 break;
694 default:
695 return -EINVAL;
696 }
697 /* vop aclk source clk: cpll */
698 div = CPLL_HZ / aclk_vop;
699 assert(div - 1 < 32);
700
701 rk_clrsetreg(aclkreg_addr,
702 ACLK_VOP_PLL_SEL_MASK | ACLK_VOP_DIV_CON_MASK,
703 ACLK_VOP_PLL_SEL_CPLL << ACLK_VOP_PLL_SEL_SHIFT |
704 (div - 1) << ACLK_VOP_DIV_CON_SHIFT);
705
706 /* vop dclk source from vpll, and equals to vpll(means div == 1) */
707 if (pll_para_config(hz, &vpll_config))
708 return -1;
709
710 rkclk_set_pll(&cru->vpll_con[0], &vpll_config);
711
712 rk_clrsetreg(dclkreg_addr,
713 DCLK_VOP_DCLK_SEL_MASK | DCLK_VOP_PLL_SEL_MASK|
714 DCLK_VOP_DIV_CON_MASK,
715 DCLK_VOP_DCLK_SEL_DIVOUT << DCLK_VOP_DCLK_SEL_SHIFT |
716 DCLK_VOP_PLL_SEL_VPLL << DCLK_VOP_PLL_SEL_SHIFT |
717 (1 - 1) << DCLK_VOP_DIV_CON_SHIFT);
718
719 return hz;
720}
721
722static ulong rk3399_mmc_get_clk(struct rk3399_cru *cru, uint clk_id)
723{
724 u32 div, con;
725
726 switch (clk_id) {
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200727 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800728 case SCLK_SDMMC:
729 con = readl(&cru->clksel_con[16]);
Kever Yang3a94d752017-07-27 12:54:01 +0800730 /* dwmmc controller have internal div 2 */
731 div = 2;
Kever Yangb0b3c862016-07-29 10:35:25 +0800732 break;
733 case SCLK_EMMC:
734 con = readl(&cru->clksel_con[21]);
Kever Yang3a94d752017-07-27 12:54:01 +0800735 div = 1;
Kever Yangb0b3c862016-07-29 10:35:25 +0800736 break;
737 default:
738 return -EINVAL;
739 }
Kever Yangb0b3c862016-07-29 10:35:25 +0800740
Kever Yang3a94d752017-07-27 12:54:01 +0800741 div *= (con & CLK_EMMC_DIV_CON_MASK) >> CLK_EMMC_DIV_CON_SHIFT;
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800742 if ((con & CLK_EMMC_PLL_MASK) >> CLK_EMMC_PLL_SHIFT
743 == CLK_EMMC_PLL_SEL_24M)
Kever Yang3a94d752017-07-27 12:54:01 +0800744 return DIV_TO_RATE(OSC_HZ, div);
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800745 else
746 return DIV_TO_RATE(GPLL_HZ, div);
Kever Yangb0b3c862016-07-29 10:35:25 +0800747}
748
749static ulong rk3399_mmc_set_clk(struct rk3399_cru *cru,
750 ulong clk_id, ulong set_rate)
751{
752 int src_clk_div;
753 int aclk_emmc = 198*MHz;
754
755 switch (clk_id) {
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200756 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800757 case SCLK_SDMMC:
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800758 /* Select clk_sdmmc source from GPLL by default */
Kever Yang3a94d752017-07-27 12:54:01 +0800759 /* mmc clock defaulg div 2 internal, provide double in cru */
760 src_clk_div = DIV_ROUND_UP(GPLL_HZ / 2, set_rate);
Kever Yangb0b3c862016-07-29 10:35:25 +0800761
Kever Yang217273c2017-07-27 12:54:02 +0800762 if (src_clk_div > 128) {
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800763 /* use 24MHz source for 400KHz clock */
Kever Yang3a94d752017-07-27 12:54:01 +0800764 src_clk_div = DIV_ROUND_UP(OSC_HZ / 2, set_rate);
Kever Yang217273c2017-07-27 12:54:02 +0800765 assert(src_clk_div - 1 < 128);
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800766 rk_clrsetreg(&cru->clksel_con[16],
767 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
768 CLK_EMMC_PLL_SEL_24M << CLK_EMMC_PLL_SHIFT |
769 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
770 } else {
771 rk_clrsetreg(&cru->clksel_con[16],
772 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
773 CLK_EMMC_PLL_SEL_GPLL << CLK_EMMC_PLL_SHIFT |
774 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
775 }
Kever Yangb0b3c862016-07-29 10:35:25 +0800776 break;
777 case SCLK_EMMC:
778 /* Select aclk_emmc source from GPLL */
Kever Yang217273c2017-07-27 12:54:02 +0800779 src_clk_div = DIV_ROUND_UP(GPLL_HZ , aclk_emmc);
780 assert(src_clk_div - 1 < 32);
Kever Yangb0b3c862016-07-29 10:35:25 +0800781
782 rk_clrsetreg(&cru->clksel_con[21],
783 ACLK_EMMC_PLL_SEL_MASK | ACLK_EMMC_DIV_CON_MASK,
784 ACLK_EMMC_PLL_SEL_GPLL << ACLK_EMMC_PLL_SEL_SHIFT |
785 (src_clk_div - 1) << ACLK_EMMC_DIV_CON_SHIFT);
786
787 /* Select clk_emmc source from GPLL too */
Kever Yang217273c2017-07-27 12:54:02 +0800788 src_clk_div = DIV_ROUND_UP(GPLL_HZ, set_rate);
789 assert(src_clk_div - 1 < 128);
Kever Yangb0b3c862016-07-29 10:35:25 +0800790
791 rk_clrsetreg(&cru->clksel_con[22],
792 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
793 CLK_EMMC_PLL_SEL_GPLL << CLK_EMMC_PLL_SHIFT |
794 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
795 break;
796 default:
797 return -EINVAL;
798 }
799 return rk3399_mmc_get_clk(cru, clk_id);
800}
801
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100802static ulong rk3399_gmac_set_clk(struct rk3399_cru *cru, ulong rate)
803{
804 ulong ret;
805
806 /*
807 * The RGMII CLK can be derived either from an external "clkin"
808 * or can be generated from internally by a divider from SCLK_MAC.
809 */
810 if (readl(&cru->clksel_con[19]) & BIT(4)) {
811 /* An external clock will always generate the right rate... */
812 ret = rate;
813 } else {
814 /*
815 * No platform uses an internal clock to date.
816 * Implement this once it becomes necessary and print an error
817 * if someone tries to use it (while it remains unimplemented).
818 */
819 pr_err("%s: internal clock is UNIMPLEMENTED\n", __func__);
820 ret = 0;
821 }
822
823 return ret;
824}
825
Kever Yang5ae2fd92017-02-13 17:38:56 +0800826#define PMUSGRF_DDR_RGN_CON16 0xff330040
827static ulong rk3399_ddr_set_clk(struct rk3399_cru *cru,
828 ulong set_rate)
829{
830 struct pll_div dpll_cfg;
831
832 /* IC ECO bug, need to set this register */
833 writel(0xc000c000, PMUSGRF_DDR_RGN_CON16);
834
835 /* clk_ddrc == DPLL = 24MHz / refdiv * fbdiv / postdiv1 / postdiv2 */
836 switch (set_rate) {
837 case 200*MHz:
838 dpll_cfg = (struct pll_div)
839 {.refdiv = 1, .fbdiv = 50, .postdiv1 = 6, .postdiv2 = 1};
840 break;
841 case 300*MHz:
842 dpll_cfg = (struct pll_div)
843 {.refdiv = 2, .fbdiv = 100, .postdiv1 = 4, .postdiv2 = 1};
844 break;
845 case 666*MHz:
846 dpll_cfg = (struct pll_div)
847 {.refdiv = 2, .fbdiv = 111, .postdiv1 = 2, .postdiv2 = 1};
848 break;
849 case 800*MHz:
850 dpll_cfg = (struct pll_div)
851 {.refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1};
852 break;
853 case 933*MHz:
854 dpll_cfg = (struct pll_div)
855 {.refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1};
856 break;
857 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900858 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Kever Yang5ae2fd92017-02-13 17:38:56 +0800859 }
860 rkclk_set_pll(&cru->dpll_con[0], &dpll_cfg);
861
862 return set_rate;
863}
David Wu364fc732017-09-20 14:38:58 +0800864
865static ulong rk3399_saradc_get_clk(struct rk3399_cru *cru)
866{
867 u32 div, val;
868
869 val = readl(&cru->clksel_con[26]);
870 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
871 CLK_SARADC_DIV_CON_WIDTH);
872
873 return DIV_TO_RATE(OSC_HZ, div);
874}
875
876static ulong rk3399_saradc_set_clk(struct rk3399_cru *cru, uint hz)
877{
878 int src_clk_div;
879
880 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
881 assert(src_clk_div < 128);
882
883 rk_clrsetreg(&cru->clksel_con[26],
884 CLK_SARADC_DIV_CON_MASK,
885 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
886
887 return rk3399_saradc_get_clk(cru);
888}
889
Kever Yangb0b3c862016-07-29 10:35:25 +0800890static ulong rk3399_clk_get_rate(struct clk *clk)
891{
892 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
893 ulong rate = 0;
894
895 switch (clk->id) {
896 case 0 ... 63:
897 return 0;
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200898 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800899 case SCLK_SDMMC:
900 case SCLK_EMMC:
901 rate = rk3399_mmc_get_clk(priv->cru, clk->id);
902 break;
903 case SCLK_I2C1:
904 case SCLK_I2C2:
905 case SCLK_I2C3:
906 case SCLK_I2C5:
907 case SCLK_I2C6:
908 case SCLK_I2C7:
909 rate = rk3399_i2c_get_clk(priv->cru, clk->id);
910 break;
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200911 case SCLK_SPI0...SCLK_SPI5:
912 rate = rk3399_spi_get_clk(priv->cru, clk->id);
913 break;
914 case SCLK_UART0:
915 case SCLK_UART2:
916 return 24000000;
Philipp Tomsichffc1fac2017-04-28 18:33:57 +0200917 break;
918 case PCLK_HDMI_CTRL:
919 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800920 case DCLK_VOP0:
921 case DCLK_VOP1:
922 break;
Philipp Tomsicha70feb42017-04-28 17:11:55 +0200923 case PCLK_EFUSE1024NS:
924 break;
David Wu364fc732017-09-20 14:38:58 +0800925 case SCLK_SARADC:
926 rate = rk3399_saradc_get_clk(priv->cru);
927 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800928 default:
929 return -ENOENT;
930 }
931
932 return rate;
933}
934
935static ulong rk3399_clk_set_rate(struct clk *clk, ulong rate)
936{
937 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
938 ulong ret = 0;
939
940 switch (clk->id) {
941 case 0 ... 63:
942 return 0;
Philipp Tomsichd2f1f1a2018-01-08 14:00:27 +0100943
944 case ACLK_PERIHP:
945 case HCLK_PERIHP:
946 case PCLK_PERIHP:
947 return 0;
948
949 case ACLK_PERILP0:
950 case HCLK_PERILP0:
951 case PCLK_PERILP0:
952 return 0;
953
954 case ACLK_CCI:
955 return 0;
956
957 case HCLK_PERILP1:
958 case PCLK_PERILP1:
959 return 0;
960
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200961 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800962 case SCLK_SDMMC:
963 case SCLK_EMMC:
964 ret = rk3399_mmc_set_clk(priv->cru, clk->id, rate);
965 break;
Philipp Tomsich65d83302017-03-24 19:24:25 +0100966 case SCLK_MAC:
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100967 ret = rk3399_gmac_set_clk(priv->cru, rate);
Philipp Tomsich65d83302017-03-24 19:24:25 +0100968 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800969 case SCLK_I2C1:
970 case SCLK_I2C2:
971 case SCLK_I2C3:
972 case SCLK_I2C5:
973 case SCLK_I2C6:
974 case SCLK_I2C7:
975 ret = rk3399_i2c_set_clk(priv->cru, clk->id, rate);
976 break;
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200977 case SCLK_SPI0...SCLK_SPI5:
978 ret = rk3399_spi_set_clk(priv->cru, clk->id, rate);
979 break;
Philipp Tomsichffc1fac2017-04-28 18:33:57 +0200980 case PCLK_HDMI_CTRL:
981 case PCLK_VIO_GRF:
982 /* the PCLK gates for video are enabled by default */
983 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800984 case DCLK_VOP0:
985 case DCLK_VOP1:
Kever Yang5e79f442016-08-12 17:47:15 +0800986 ret = rk3399_vop_set_clk(priv->cru, clk->id, rate);
Kever Yangb0b3c862016-07-29 10:35:25 +0800987 break;
Kever Yang5ae2fd92017-02-13 17:38:56 +0800988 case SCLK_DDRCLK:
989 ret = rk3399_ddr_set_clk(priv->cru, rate);
990 break;
Philipp Tomsicha70feb42017-04-28 17:11:55 +0200991 case PCLK_EFUSE1024NS:
992 break;
David Wu364fc732017-09-20 14:38:58 +0800993 case SCLK_SARADC:
994 ret = rk3399_saradc_set_clk(priv->cru, rate);
995 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800996 default:
997 return -ENOENT;
998 }
999
1000 return ret;
1001}
1002
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001003static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, struct clk *parent)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +01001004{
1005 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
1006 const char *clock_output_name;
1007 int ret;
1008
1009 /*
1010 * If the requested parent is in the same clock-controller and
1011 * the id is SCLK_MAC ("clk_gmac"), switch to the internal clock.
1012 */
1013 if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) {
1014 debug("%s: switching RGMII to SCLK_MAC\n", __func__);
1015 rk_clrreg(&priv->cru->clksel_con[19], BIT(4));
1016 return 0;
1017 }
1018
1019 /*
1020 * Otherwise, we need to check the clock-output-names of the
1021 * requested parent to see if the requested id is "clkin_gmac".
1022 */
1023 ret = dev_read_string_index(parent->dev, "clock-output-names",
1024 parent->id, &clock_output_name);
1025 if (ret < 0)
1026 return -ENODATA;
1027
1028 /* If this is "clkin_gmac", switch to the external clock input */
1029 if (!strcmp(clock_output_name, "clkin_gmac")) {
1030 debug("%s: switching RGMII to CLKIN\n", __func__);
1031 rk_setreg(&priv->cru->clksel_con[19], BIT(4));
1032 return 0;
1033 }
1034
1035 return -EINVAL;
1036}
1037
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001038static int __maybe_unused rk3399_clk_set_parent(struct clk *clk, struct clk *parent)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +01001039{
1040 switch (clk->id) {
1041 case SCLK_RMII_SRC:
1042 return rk3399_gmac_set_parent(clk, parent);
1043 }
1044
1045 debug("%s: unsupported clk %ld\n", __func__, clk->id);
1046 return -ENOENT;
1047}
1048
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +02001049static int rk3399_clk_enable(struct clk *clk)
1050{
1051 switch (clk->id) {
1052 case HCLK_HOST0:
1053 case HCLK_HOST0_ARB:
1054 case HCLK_HOST1:
1055 case HCLK_HOST1_ARB:
1056 return 0;
Philipp Tomsicha9bdd672018-02-16 16:07:24 +01001057
1058 case SCLK_MAC:
1059 case SCLK_MAC_RX:
1060 case SCLK_MAC_TX:
1061 case SCLK_MACREF:
1062 case SCLK_MACREF_OUT:
1063 case ACLK_GMAC:
1064 case PCLK_GMAC:
1065 /* Required to successfully probe the Designware GMAC driver */
1066 return 0;
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +02001067 }
1068
1069 debug("%s: unsupported clk %ld\n", __func__, clk->id);
1070 return -ENOENT;
1071}
1072
Kever Yangb0b3c862016-07-29 10:35:25 +08001073static struct clk_ops rk3399_clk_ops = {
1074 .get_rate = rk3399_clk_get_rate,
1075 .set_rate = rk3399_clk_set_rate,
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001076#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +01001077 .set_parent = rk3399_clk_set_parent,
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001078#endif
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +02001079 .enable = rk3399_clk_enable,
Kever Yangb0b3c862016-07-29 10:35:25 +08001080};
1081
Kever Yang9f636a22017-10-12 15:27:29 +08001082#ifdef CONFIG_SPL_BUILD
1083static void rkclk_init(struct rk3399_cru *cru)
1084{
1085 u32 aclk_div;
1086 u32 hclk_div;
1087 u32 pclk_div;
1088
Christoph Muellneraf765a42018-11-30 20:32:48 +01001089 rk3399_configure_cpu_l(cru, APLL_L_600_MHZ);
1090 rk3399_configure_cpu_b(cru, APLL_B_600_MHZ);
Kever Yang9f636a22017-10-12 15:27:29 +08001091 /*
1092 * some cru registers changed by bootrom, we'd better reset them to
1093 * reset/default values described in TRM to avoid confusion in kernel.
1094 * Please consider these three lines as a fix of bootrom bug.
1095 */
1096 rk_clrsetreg(&cru->clksel_con[12], 0xffff, 0x4101);
1097 rk_clrsetreg(&cru->clksel_con[19], 0xffff, 0x033f);
1098 rk_clrsetreg(&cru->clksel_con[56], 0x0003, 0x0003);
1099
1100 /* configure gpll cpll */
1101 rkclk_set_pll(&cru->gpll_con[0], &gpll_init_cfg);
1102 rkclk_set_pll(&cru->cpll_con[0], &cpll_init_cfg);
1103
1104 /* configure perihp aclk, hclk, pclk */
1105 aclk_div = GPLL_HZ / PERIHP_ACLK_HZ - 1;
1106 assert((aclk_div + 1) * PERIHP_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
1107
1108 hclk_div = PERIHP_ACLK_HZ / PERIHP_HCLK_HZ - 1;
1109 assert((hclk_div + 1) * PERIHP_HCLK_HZ ==
1110 PERIHP_ACLK_HZ && (hclk_div < 0x4));
1111
1112 pclk_div = PERIHP_ACLK_HZ / PERIHP_PCLK_HZ - 1;
1113 assert((pclk_div + 1) * PERIHP_PCLK_HZ ==
1114 PERIHP_ACLK_HZ && (pclk_div < 0x7));
1115
1116 rk_clrsetreg(&cru->clksel_con[14],
1117 PCLK_PERIHP_DIV_CON_MASK | HCLK_PERIHP_DIV_CON_MASK |
1118 ACLK_PERIHP_PLL_SEL_MASK | ACLK_PERIHP_DIV_CON_MASK,
1119 pclk_div << PCLK_PERIHP_DIV_CON_SHIFT |
1120 hclk_div << HCLK_PERIHP_DIV_CON_SHIFT |
1121 ACLK_PERIHP_PLL_SEL_GPLL << ACLK_PERIHP_PLL_SEL_SHIFT |
1122 aclk_div << ACLK_PERIHP_DIV_CON_SHIFT);
1123
1124 /* configure perilp0 aclk, hclk, pclk */
1125 aclk_div = GPLL_HZ / PERILP0_ACLK_HZ - 1;
1126 assert((aclk_div + 1) * PERILP0_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
1127
1128 hclk_div = PERILP0_ACLK_HZ / PERILP0_HCLK_HZ - 1;
1129 assert((hclk_div + 1) * PERILP0_HCLK_HZ ==
1130 PERILP0_ACLK_HZ && (hclk_div < 0x4));
1131
1132 pclk_div = PERILP0_ACLK_HZ / PERILP0_PCLK_HZ - 1;
1133 assert((pclk_div + 1) * PERILP0_PCLK_HZ ==
1134 PERILP0_ACLK_HZ && (pclk_div < 0x7));
1135
1136 rk_clrsetreg(&cru->clksel_con[23],
1137 PCLK_PERILP0_DIV_CON_MASK | HCLK_PERILP0_DIV_CON_MASK |
1138 ACLK_PERILP0_PLL_SEL_MASK | ACLK_PERILP0_DIV_CON_MASK,
1139 pclk_div << PCLK_PERILP0_DIV_CON_SHIFT |
1140 hclk_div << HCLK_PERILP0_DIV_CON_SHIFT |
1141 ACLK_PERILP0_PLL_SEL_GPLL << ACLK_PERILP0_PLL_SEL_SHIFT |
1142 aclk_div << ACLK_PERILP0_DIV_CON_SHIFT);
1143
1144 /* perilp1 hclk select gpll as source */
1145 hclk_div = GPLL_HZ / PERILP1_HCLK_HZ - 1;
1146 assert((hclk_div + 1) * PERILP1_HCLK_HZ ==
1147 GPLL_HZ && (hclk_div < 0x1f));
1148
1149 pclk_div = PERILP1_HCLK_HZ / PERILP1_HCLK_HZ - 1;
1150 assert((pclk_div + 1) * PERILP1_HCLK_HZ ==
1151 PERILP1_HCLK_HZ && (hclk_div < 0x7));
1152
1153 rk_clrsetreg(&cru->clksel_con[25],
1154 PCLK_PERILP1_DIV_CON_MASK | HCLK_PERILP1_DIV_CON_MASK |
1155 HCLK_PERILP1_PLL_SEL_MASK,
1156 pclk_div << PCLK_PERILP1_DIV_CON_SHIFT |
1157 hclk_div << HCLK_PERILP1_DIV_CON_SHIFT |
1158 HCLK_PERILP1_PLL_SEL_GPLL << HCLK_PERILP1_PLL_SEL_SHIFT);
1159}
1160#endif
1161
Kever Yangb0b3c862016-07-29 10:35:25 +08001162static int rk3399_clk_probe(struct udevice *dev)
1163{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001164#ifdef CONFIG_SPL_BUILD
Kever Yangb0b3c862016-07-29 10:35:25 +08001165 struct rk3399_clk_priv *priv = dev_get_priv(dev);
1166
Kever Yang5ae2fd92017-02-13 17:38:56 +08001167#if CONFIG_IS_ENABLED(OF_PLATDATA)
1168 struct rk3399_clk_plat *plat = dev_get_platdata(dev);
Kever Yangb0b3c862016-07-29 10:35:25 +08001169
Simon Glassc20ee0e2017-08-29 14:15:50 -06001170 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001171#endif
1172 rkclk_init(priv->cru);
1173#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001174 return 0;
1175}
1176
1177static int rk3399_clk_ofdata_to_platdata(struct udevice *dev)
1178{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001179#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Kever Yangb0b3c862016-07-29 10:35:25 +08001180 struct rk3399_clk_priv *priv = dev_get_priv(dev);
1181
Philipp Tomsich75c78592017-09-12 17:32:24 +02001182 priv->cru = dev_read_addr_ptr(dev);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001183#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001184 return 0;
1185}
1186
1187static int rk3399_clk_bind(struct udevice *dev)
1188{
1189 int ret;
Kever Yangf24e36d2017-11-03 15:16:13 +08001190 struct udevice *sys_child;
1191 struct sysreset_reg *priv;
Kever Yangb0b3c862016-07-29 10:35:25 +08001192
1193 /* The reset driver does not have a device node, so bind it here */
Kever Yangf24e36d2017-11-03 15:16:13 +08001194 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
1195 &sys_child);
1196 if (ret) {
1197 debug("Warning: No sysreset driver: ret=%d\n", ret);
1198 } else {
1199 priv = malloc(sizeof(struct sysreset_reg));
1200 priv->glb_srst_fst_value = offsetof(struct rk3399_cru,
1201 glb_srst_fst_value);
1202 priv->glb_srst_snd_value = offsetof(struct rk3399_cru,
1203 glb_srst_snd_value);
1204 sys_child->priv = priv;
1205 }
Kever Yangb0b3c862016-07-29 10:35:25 +08001206
Elaine Zhang538f67c2017-12-19 18:22:38 +08001207#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
1208 ret = offsetof(struct rk3399_cru, softrst_con[0]);
1209 ret = rockchip_reset_bind(dev, ret, 21);
1210 if (ret)
1211 debug("Warning: software reset driver bind faile\n");
1212#endif
1213
Kever Yangb0b3c862016-07-29 10:35:25 +08001214 return 0;
1215}
1216
1217static const struct udevice_id rk3399_clk_ids[] = {
1218 { .compatible = "rockchip,rk3399-cru" },
1219 { }
1220};
1221
1222U_BOOT_DRIVER(clk_rk3399) = {
Kever Yang5ae2fd92017-02-13 17:38:56 +08001223 .name = "rockchip_rk3399_cru",
Kever Yangb0b3c862016-07-29 10:35:25 +08001224 .id = UCLASS_CLK,
1225 .of_match = rk3399_clk_ids,
1226 .priv_auto_alloc_size = sizeof(struct rk3399_clk_priv),
1227 .ofdata_to_platdata = rk3399_clk_ofdata_to_platdata,
1228 .ops = &rk3399_clk_ops,
1229 .bind = rk3399_clk_bind,
1230 .probe = rk3399_clk_probe,
Kever Yang5ae2fd92017-02-13 17:38:56 +08001231#if CONFIG_IS_ENABLED(OF_PLATDATA)
1232 .platdata_auto_alloc_size = sizeof(struct rk3399_clk_plat),
1233#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001234};
Kever Yang5e79f442016-08-12 17:47:15 +08001235
1236static ulong rk3399_i2c_get_pmuclk(struct rk3399_pmucru *pmucru, ulong clk_id)
1237{
1238 u32 div, con;
1239
1240 switch (clk_id) {
1241 case SCLK_I2C0_PMU:
1242 con = readl(&pmucru->pmucru_clksel[2]);
1243 div = I2C_CLK_DIV_VALUE(con, 0);
1244 break;
1245 case SCLK_I2C4_PMU:
1246 con = readl(&pmucru->pmucru_clksel[3]);
1247 div = I2C_CLK_DIV_VALUE(con, 4);
1248 break;
1249 case SCLK_I2C8_PMU:
1250 con = readl(&pmucru->pmucru_clksel[2]);
1251 div = I2C_CLK_DIV_VALUE(con, 8);
1252 break;
1253 default:
1254 printf("do not support this i2c bus\n");
1255 return -EINVAL;
1256 }
1257
1258 return DIV_TO_RATE(PPLL_HZ, div);
1259}
1260
1261static ulong rk3399_i2c_set_pmuclk(struct rk3399_pmucru *pmucru, ulong clk_id,
1262 uint hz)
1263{
1264 int src_clk_div;
1265
1266 src_clk_div = PPLL_HZ / hz;
1267 assert(src_clk_div - 1 < 127);
1268
1269 switch (clk_id) {
1270 case SCLK_I2C0_PMU:
1271 rk_clrsetreg(&pmucru->pmucru_clksel[2], I2C_PMUCLK_REG_MASK(0),
1272 I2C_PMUCLK_REG_VALUE(0, src_clk_div));
1273 break;
1274 case SCLK_I2C4_PMU:
1275 rk_clrsetreg(&pmucru->pmucru_clksel[3], I2C_PMUCLK_REG_MASK(4),
1276 I2C_PMUCLK_REG_VALUE(4, src_clk_div));
1277 break;
1278 case SCLK_I2C8_PMU:
1279 rk_clrsetreg(&pmucru->pmucru_clksel[2], I2C_PMUCLK_REG_MASK(8),
1280 I2C_PMUCLK_REG_VALUE(8, src_clk_div));
1281 break;
1282 default:
1283 printf("do not support this i2c bus\n");
1284 return -EINVAL;
1285 }
1286
1287 return DIV_TO_RATE(PPLL_HZ, src_clk_div);
1288}
1289
1290static ulong rk3399_pwm_get_clk(struct rk3399_pmucru *pmucru)
1291{
1292 u32 div, con;
1293
1294 /* PWM closk rate is same as pclk_pmu */
1295 con = readl(&pmucru->pmucru_clksel[0]);
1296 div = con & PMU_PCLK_DIV_CON_MASK;
1297
1298 return DIV_TO_RATE(PPLL_HZ, div);
1299}
1300
1301static ulong rk3399_pmuclk_get_rate(struct clk *clk)
1302{
1303 struct rk3399_pmuclk_priv *priv = dev_get_priv(clk->dev);
1304 ulong rate = 0;
1305
1306 switch (clk->id) {
Philipp Tomsich434d5a02018-02-23 17:36:41 +01001307 case PLL_PPLL:
1308 return PPLL_HZ;
Kever Yang5e79f442016-08-12 17:47:15 +08001309 case PCLK_RKPWM_PMU:
1310 rate = rk3399_pwm_get_clk(priv->pmucru);
1311 break;
1312 case SCLK_I2C0_PMU:
1313 case SCLK_I2C4_PMU:
1314 case SCLK_I2C8_PMU:
1315 rate = rk3399_i2c_get_pmuclk(priv->pmucru, clk->id);
1316 break;
1317 default:
1318 return -ENOENT;
1319 }
1320
1321 return rate;
1322}
1323
1324static ulong rk3399_pmuclk_set_rate(struct clk *clk, ulong rate)
1325{
1326 struct rk3399_pmuclk_priv *priv = dev_get_priv(clk->dev);
1327 ulong ret = 0;
1328
1329 switch (clk->id) {
Philipp Tomsich434d5a02018-02-23 17:36:41 +01001330 case PLL_PPLL:
1331 /*
1332 * This has already been set up and we don't want/need
1333 * to change it here. Accept the request though, as the
1334 * device-tree has this in an 'assigned-clocks' list.
1335 */
1336 return PPLL_HZ;
Kever Yang5e79f442016-08-12 17:47:15 +08001337 case SCLK_I2C0_PMU:
1338 case SCLK_I2C4_PMU:
1339 case SCLK_I2C8_PMU:
1340 ret = rk3399_i2c_set_pmuclk(priv->pmucru, clk->id, rate);
1341 break;
1342 default:
1343 return -ENOENT;
1344 }
1345
1346 return ret;
1347}
1348
1349static struct clk_ops rk3399_pmuclk_ops = {
1350 .get_rate = rk3399_pmuclk_get_rate,
1351 .set_rate = rk3399_pmuclk_set_rate,
1352};
1353
Kever Yang5ae2fd92017-02-13 17:38:56 +08001354#ifndef CONFIG_SPL_BUILD
Kever Yang5e79f442016-08-12 17:47:15 +08001355static void pmuclk_init(struct rk3399_pmucru *pmucru)
1356{
1357 u32 pclk_div;
1358
1359 /* configure pmu pll(ppll) */
1360 rkclk_set_pll(&pmucru->ppll_con[0], &ppll_init_cfg);
1361
1362 /* configure pmu pclk */
1363 pclk_div = PPLL_HZ / PMU_PCLK_HZ - 1;
Kever Yang5e79f442016-08-12 17:47:15 +08001364 rk_clrsetreg(&pmucru->pmucru_clksel[0],
1365 PMU_PCLK_DIV_CON_MASK,
1366 pclk_div << PMU_PCLK_DIV_CON_SHIFT);
1367}
Kever Yang5ae2fd92017-02-13 17:38:56 +08001368#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001369
1370static int rk3399_pmuclk_probe(struct udevice *dev)
1371{
Philipp Tomsich61dff332017-03-24 19:24:24 +01001372#if CONFIG_IS_ENABLED(OF_PLATDATA) || !defined(CONFIG_SPL_BUILD)
Kever Yang5e79f442016-08-12 17:47:15 +08001373 struct rk3399_pmuclk_priv *priv = dev_get_priv(dev);
Philipp Tomsich61dff332017-03-24 19:24:24 +01001374#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001375
Kever Yang5ae2fd92017-02-13 17:38:56 +08001376#if CONFIG_IS_ENABLED(OF_PLATDATA)
1377 struct rk3399_pmuclk_plat *plat = dev_get_platdata(dev);
Kever Yang5e79f442016-08-12 17:47:15 +08001378
Simon Glassc20ee0e2017-08-29 14:15:50 -06001379 priv->pmucru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001380#endif
1381
1382#ifndef CONFIG_SPL_BUILD
1383 pmuclk_init(priv->pmucru);
1384#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001385 return 0;
1386}
1387
1388static int rk3399_pmuclk_ofdata_to_platdata(struct udevice *dev)
1389{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001390#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Kever Yang5e79f442016-08-12 17:47:15 +08001391 struct rk3399_pmuclk_priv *priv = dev_get_priv(dev);
1392
Philipp Tomsich75c78592017-09-12 17:32:24 +02001393 priv->pmucru = dev_read_addr_ptr(dev);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001394#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001395 return 0;
1396}
1397
Elaine Zhang538f67c2017-12-19 18:22:38 +08001398static int rk3399_pmuclk_bind(struct udevice *dev)
1399{
1400#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
1401 int ret;
1402
1403 ret = offsetof(struct rk3399_pmucru, pmucru_softrst_con[0]);
1404 ret = rockchip_reset_bind(dev, ret, 2);
1405 if (ret)
1406 debug("Warning: software reset driver bind faile\n");
1407#endif
1408 return 0;
1409}
1410
Kever Yang5e79f442016-08-12 17:47:15 +08001411static const struct udevice_id rk3399_pmuclk_ids[] = {
1412 { .compatible = "rockchip,rk3399-pmucru" },
1413 { }
1414};
1415
Simon Glassc8a6bc92016-10-01 20:04:51 -06001416U_BOOT_DRIVER(rockchip_rk3399_pmuclk) = {
Kever Yang5ae2fd92017-02-13 17:38:56 +08001417 .name = "rockchip_rk3399_pmucru",
Kever Yang5e79f442016-08-12 17:47:15 +08001418 .id = UCLASS_CLK,
1419 .of_match = rk3399_pmuclk_ids,
1420 .priv_auto_alloc_size = sizeof(struct rk3399_pmuclk_priv),
1421 .ofdata_to_platdata = rk3399_pmuclk_ofdata_to_platdata,
1422 .ops = &rk3399_pmuclk_ops,
1423 .probe = rk3399_pmuclk_probe,
Elaine Zhang538f67c2017-12-19 18:22:38 +08001424 .bind = rk3399_pmuclk_bind,
Kever Yang5ae2fd92017-02-13 17:38:56 +08001425#if CONFIG_IS_ENABLED(OF_PLATDATA)
1426 .platdata_auto_alloc_size = sizeof(struct rk3399_pmuclk_plat),
1427#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001428};