blob: 26faf88116bd40651527f5bd283eaee50d83ebe9 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Kever Yangb0b3c862016-07-29 10:35:25 +08002/*
3 * (C) Copyright 2015 Google, Inc
Philipp Tomsich8fa69792017-04-20 22:05:49 +02004 * (C) 2017 Theobroma Systems Design und Consulting GmbH
Kever Yangb0b3c862016-07-29 10:35:25 +08005 */
6
7#include <common.h>
8#include <clk-uclass.h>
9#include <dm.h>
Kever Yang5ae2fd92017-02-13 17:38:56 +080010#include <dt-structs.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080011#include <errno.h>
Kever Yang5ae2fd92017-02-13 17:38:56 +080012#include <mapmem.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080013#include <syscon.h>
David Wu364fc732017-09-20 14:38:58 +080014#include <bitfield.h>
Kever Yangb0b3c862016-07-29 10:35:25 +080015#include <asm/io.h>
16#include <asm/arch/clock.h>
17#include <asm/arch/cru_rk3399.h>
18#include <asm/arch/hardware.h>
19#include <dm/lists.h>
20#include <dt-bindings/clock/rk3399-cru.h>
21
Kever Yang5ae2fd92017-02-13 17:38:56 +080022#if CONFIG_IS_ENABLED(OF_PLATDATA)
23struct rk3399_clk_plat {
24 struct dtd_rockchip_rk3399_cru dtd;
Kever Yang5e79f442016-08-12 17:47:15 +080025};
26
Kever Yang5ae2fd92017-02-13 17:38:56 +080027struct rk3399_pmuclk_plat {
28 struct dtd_rockchip_rk3399_pmucru dtd;
29};
30#endif
31
Kever Yangb0b3c862016-07-29 10:35:25 +080032struct pll_div {
33 u32 refdiv;
34 u32 fbdiv;
35 u32 postdiv1;
36 u32 postdiv2;
37 u32 frac;
38};
39
40#define RATE_TO_DIV(input_rate, output_rate) \
41 ((input_rate) / (output_rate) - 1);
42#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
43
44#define PLL_DIVISORS(hz, _refdiv, _postdiv1, _postdiv2) {\
45 .refdiv = _refdiv,\
46 .fbdiv = (u32)((u64)hz * _refdiv * _postdiv1 * _postdiv2 / OSC_HZ),\
47 .postdiv1 = _postdiv1, .postdiv2 = _postdiv2};
48
Philipp Tomsich61dff332017-03-24 19:24:24 +010049#if defined(CONFIG_SPL_BUILD)
Kever Yangb0b3c862016-07-29 10:35:25 +080050static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 2, 2, 1);
51static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 2, 2);
Philipp Tomsich61dff332017-03-24 19:24:24 +010052#else
Kever Yangb0b3c862016-07-29 10:35:25 +080053static const struct pll_div ppll_init_cfg = PLL_DIVISORS(PPLL_HZ, 2, 2, 1);
Philipp Tomsich61dff332017-03-24 19:24:24 +010054#endif
Kever Yangb0b3c862016-07-29 10:35:25 +080055
56static const struct pll_div apll_l_1600_cfg = PLL_DIVISORS(1600*MHz, 3, 1, 1);
57static const struct pll_div apll_l_600_cfg = PLL_DIVISORS(600*MHz, 1, 2, 1);
58
59static const struct pll_div *apll_l_cfgs[] = {
60 [APLL_L_1600_MHZ] = &apll_l_1600_cfg,
61 [APLL_L_600_MHZ] = &apll_l_600_cfg,
62};
63
64enum {
65 /* PLL_CON0 */
66 PLL_FBDIV_MASK = 0xfff,
67 PLL_FBDIV_SHIFT = 0,
68
69 /* PLL_CON1 */
70 PLL_POSTDIV2_SHIFT = 12,
71 PLL_POSTDIV2_MASK = 0x7 << PLL_POSTDIV2_SHIFT,
72 PLL_POSTDIV1_SHIFT = 8,
73 PLL_POSTDIV1_MASK = 0x7 << PLL_POSTDIV1_SHIFT,
74 PLL_REFDIV_MASK = 0x3f,
75 PLL_REFDIV_SHIFT = 0,
76
77 /* PLL_CON2 */
78 PLL_LOCK_STATUS_SHIFT = 31,
79 PLL_LOCK_STATUS_MASK = 1 << PLL_LOCK_STATUS_SHIFT,
80 PLL_FRACDIV_MASK = 0xffffff,
81 PLL_FRACDIV_SHIFT = 0,
82
83 /* PLL_CON3 */
84 PLL_MODE_SHIFT = 8,
85 PLL_MODE_MASK = 3 << PLL_MODE_SHIFT,
86 PLL_MODE_SLOW = 0,
87 PLL_MODE_NORM,
88 PLL_MODE_DEEP,
89 PLL_DSMPD_SHIFT = 3,
90 PLL_DSMPD_MASK = 1 << PLL_DSMPD_SHIFT,
91 PLL_INTEGER_MODE = 1,
92
93 /* PMUCRU_CLKSEL_CON0 */
94 PMU_PCLK_DIV_CON_MASK = 0x1f,
95 PMU_PCLK_DIV_CON_SHIFT = 0,
96
97 /* PMUCRU_CLKSEL_CON1 */
98 SPI3_PLL_SEL_SHIFT = 7,
99 SPI3_PLL_SEL_MASK = 1 << SPI3_PLL_SEL_SHIFT,
100 SPI3_PLL_SEL_24M = 0,
101 SPI3_PLL_SEL_PPLL = 1,
102 SPI3_DIV_CON_SHIFT = 0x0,
103 SPI3_DIV_CON_MASK = 0x7f,
104
105 /* PMUCRU_CLKSEL_CON2 */
106 I2C_DIV_CON_MASK = 0x7f,
Kever Yang5e79f442016-08-12 17:47:15 +0800107 CLK_I2C8_DIV_CON_SHIFT = 8,
108 CLK_I2C0_DIV_CON_SHIFT = 0,
Kever Yangb0b3c862016-07-29 10:35:25 +0800109
110 /* PMUCRU_CLKSEL_CON3 */
Kever Yang5e79f442016-08-12 17:47:15 +0800111 CLK_I2C4_DIV_CON_SHIFT = 0,
Kever Yangb0b3c862016-07-29 10:35:25 +0800112
113 /* CLKSEL_CON0 */
114 ACLKM_CORE_L_DIV_CON_SHIFT = 8,
115 ACLKM_CORE_L_DIV_CON_MASK = 0x1f << ACLKM_CORE_L_DIV_CON_SHIFT,
116 CLK_CORE_L_PLL_SEL_SHIFT = 6,
117 CLK_CORE_L_PLL_SEL_MASK = 3 << CLK_CORE_L_PLL_SEL_SHIFT,
118 CLK_CORE_L_PLL_SEL_ALPLL = 0x0,
119 CLK_CORE_L_PLL_SEL_ABPLL = 0x1,
120 CLK_CORE_L_PLL_SEL_DPLL = 0x10,
121 CLK_CORE_L_PLL_SEL_GPLL = 0x11,
122 CLK_CORE_L_DIV_MASK = 0x1f,
123 CLK_CORE_L_DIV_SHIFT = 0,
124
125 /* CLKSEL_CON1 */
126 PCLK_DBG_L_DIV_SHIFT = 0x8,
127 PCLK_DBG_L_DIV_MASK = 0x1f << PCLK_DBG_L_DIV_SHIFT,
128 ATCLK_CORE_L_DIV_SHIFT = 0,
129 ATCLK_CORE_L_DIV_MASK = 0x1f << ATCLK_CORE_L_DIV_SHIFT,
130
131 /* CLKSEL_CON14 */
132 PCLK_PERIHP_DIV_CON_SHIFT = 12,
133 PCLK_PERIHP_DIV_CON_MASK = 0x7 << PCLK_PERIHP_DIV_CON_SHIFT,
134 HCLK_PERIHP_DIV_CON_SHIFT = 8,
135 HCLK_PERIHP_DIV_CON_MASK = 3 << HCLK_PERIHP_DIV_CON_SHIFT,
136 ACLK_PERIHP_PLL_SEL_SHIFT = 7,
137 ACLK_PERIHP_PLL_SEL_MASK = 1 << ACLK_PERIHP_PLL_SEL_SHIFT,
138 ACLK_PERIHP_PLL_SEL_CPLL = 0,
139 ACLK_PERIHP_PLL_SEL_GPLL = 1,
140 ACLK_PERIHP_DIV_CON_SHIFT = 0,
141 ACLK_PERIHP_DIV_CON_MASK = 0x1f,
142
143 /* CLKSEL_CON21 */
144 ACLK_EMMC_PLL_SEL_SHIFT = 7,
145 ACLK_EMMC_PLL_SEL_MASK = 0x1 << ACLK_EMMC_PLL_SEL_SHIFT,
146 ACLK_EMMC_PLL_SEL_GPLL = 0x1,
147 ACLK_EMMC_DIV_CON_SHIFT = 0,
148 ACLK_EMMC_DIV_CON_MASK = 0x1f,
149
150 /* CLKSEL_CON22 */
151 CLK_EMMC_PLL_SHIFT = 8,
152 CLK_EMMC_PLL_MASK = 0x7 << CLK_EMMC_PLL_SHIFT,
153 CLK_EMMC_PLL_SEL_GPLL = 0x1,
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800154 CLK_EMMC_PLL_SEL_24M = 0x5,
Kever Yangb0b3c862016-07-29 10:35:25 +0800155 CLK_EMMC_DIV_CON_SHIFT = 0,
156 CLK_EMMC_DIV_CON_MASK = 0x7f << CLK_EMMC_DIV_CON_SHIFT,
157
158 /* CLKSEL_CON23 */
159 PCLK_PERILP0_DIV_CON_SHIFT = 12,
160 PCLK_PERILP0_DIV_CON_MASK = 0x7 << PCLK_PERILP0_DIV_CON_SHIFT,
161 HCLK_PERILP0_DIV_CON_SHIFT = 8,
162 HCLK_PERILP0_DIV_CON_MASK = 3 << HCLK_PERILP0_DIV_CON_SHIFT,
163 ACLK_PERILP0_PLL_SEL_SHIFT = 7,
164 ACLK_PERILP0_PLL_SEL_MASK = 1 << ACLK_PERILP0_PLL_SEL_SHIFT,
165 ACLK_PERILP0_PLL_SEL_CPLL = 0,
166 ACLK_PERILP0_PLL_SEL_GPLL = 1,
167 ACLK_PERILP0_DIV_CON_SHIFT = 0,
168 ACLK_PERILP0_DIV_CON_MASK = 0x1f,
169
170 /* CLKSEL_CON25 */
171 PCLK_PERILP1_DIV_CON_SHIFT = 8,
172 PCLK_PERILP1_DIV_CON_MASK = 0x7 << PCLK_PERILP1_DIV_CON_SHIFT,
173 HCLK_PERILP1_PLL_SEL_SHIFT = 7,
174 HCLK_PERILP1_PLL_SEL_MASK = 1 << HCLK_PERILP1_PLL_SEL_SHIFT,
175 HCLK_PERILP1_PLL_SEL_CPLL = 0,
176 HCLK_PERILP1_PLL_SEL_GPLL = 1,
177 HCLK_PERILP1_DIV_CON_SHIFT = 0,
178 HCLK_PERILP1_DIV_CON_MASK = 0x1f,
179
180 /* CLKSEL_CON26 */
181 CLK_SARADC_DIV_CON_SHIFT = 8,
David Wu364fc732017-09-20 14:38:58 +0800182 CLK_SARADC_DIV_CON_MASK = GENMASK(15, 8),
183 CLK_SARADC_DIV_CON_WIDTH = 8,
Kever Yangb0b3c862016-07-29 10:35:25 +0800184
185 /* CLKSEL_CON27 */
186 CLK_TSADC_SEL_X24M = 0x0,
187 CLK_TSADC_SEL_SHIFT = 15,
188 CLK_TSADC_SEL_MASK = 1 << CLK_TSADC_SEL_SHIFT,
189 CLK_TSADC_DIV_CON_SHIFT = 0,
190 CLK_TSADC_DIV_CON_MASK = 0x3ff,
191
192 /* CLKSEL_CON47 & CLKSEL_CON48 */
193 ACLK_VOP_PLL_SEL_SHIFT = 6,
194 ACLK_VOP_PLL_SEL_MASK = 0x3 << ACLK_VOP_PLL_SEL_SHIFT,
195 ACLK_VOP_PLL_SEL_CPLL = 0x1,
196 ACLK_VOP_DIV_CON_SHIFT = 0,
197 ACLK_VOP_DIV_CON_MASK = 0x1f << ACLK_VOP_DIV_CON_SHIFT,
198
199 /* CLKSEL_CON49 & CLKSEL_CON50 */
200 DCLK_VOP_DCLK_SEL_SHIFT = 11,
201 DCLK_VOP_DCLK_SEL_MASK = 1 << DCLK_VOP_DCLK_SEL_SHIFT,
202 DCLK_VOP_DCLK_SEL_DIVOUT = 0,
203 DCLK_VOP_PLL_SEL_SHIFT = 8,
204 DCLK_VOP_PLL_SEL_MASK = 3 << DCLK_VOP_PLL_SEL_SHIFT,
205 DCLK_VOP_PLL_SEL_VPLL = 0,
206 DCLK_VOP_DIV_CON_MASK = 0xff,
207 DCLK_VOP_DIV_CON_SHIFT = 0,
208
209 /* CLKSEL_CON58 */
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200210 CLK_SPI_PLL_SEL_WIDTH = 1,
211 CLK_SPI_PLL_SEL_MASK = ((1 < CLK_SPI_PLL_SEL_WIDTH) - 1),
212 CLK_SPI_PLL_SEL_CPLL = 0,
213 CLK_SPI_PLL_SEL_GPLL = 1,
214 CLK_SPI_PLL_DIV_CON_WIDTH = 7,
215 CLK_SPI_PLL_DIV_CON_MASK = ((1 << CLK_SPI_PLL_DIV_CON_WIDTH) - 1),
216
217 CLK_SPI5_PLL_DIV_CON_SHIFT = 8,
218 CLK_SPI5_PLL_SEL_SHIFT = 15,
Kever Yangb0b3c862016-07-29 10:35:25 +0800219
220 /* CLKSEL_CON59 */
221 CLK_SPI1_PLL_SEL_SHIFT = 15,
222 CLK_SPI1_PLL_DIV_CON_SHIFT = 8,
223 CLK_SPI0_PLL_SEL_SHIFT = 7,
224 CLK_SPI0_PLL_DIV_CON_SHIFT = 0,
225
226 /* CLKSEL_CON60 */
227 CLK_SPI4_PLL_SEL_SHIFT = 15,
228 CLK_SPI4_PLL_DIV_CON_SHIFT = 8,
229 CLK_SPI2_PLL_SEL_SHIFT = 7,
230 CLK_SPI2_PLL_DIV_CON_SHIFT = 0,
231
232 /* CLKSEL_CON61 */
233 CLK_I2C_PLL_SEL_MASK = 1,
234 CLK_I2C_PLL_SEL_CPLL = 0,
235 CLK_I2C_PLL_SEL_GPLL = 1,
236 CLK_I2C5_PLL_SEL_SHIFT = 15,
237 CLK_I2C5_DIV_CON_SHIFT = 8,
238 CLK_I2C1_PLL_SEL_SHIFT = 7,
239 CLK_I2C1_DIV_CON_SHIFT = 0,
240
241 /* CLKSEL_CON62 */
242 CLK_I2C6_PLL_SEL_SHIFT = 15,
243 CLK_I2C6_DIV_CON_SHIFT = 8,
244 CLK_I2C2_PLL_SEL_SHIFT = 7,
245 CLK_I2C2_DIV_CON_SHIFT = 0,
246
247 /* CLKSEL_CON63 */
248 CLK_I2C7_PLL_SEL_SHIFT = 15,
249 CLK_I2C7_DIV_CON_SHIFT = 8,
250 CLK_I2C3_PLL_SEL_SHIFT = 7,
251 CLK_I2C3_DIV_CON_SHIFT = 0,
252
253 /* CRU_SOFTRST_CON4 */
254 RESETN_DDR0_REQ_SHIFT = 8,
255 RESETN_DDR0_REQ_MASK = 1 << RESETN_DDR0_REQ_SHIFT,
256 RESETN_DDRPHY0_REQ_SHIFT = 9,
257 RESETN_DDRPHY0_REQ_MASK = 1 << RESETN_DDRPHY0_REQ_SHIFT,
258 RESETN_DDR1_REQ_SHIFT = 12,
259 RESETN_DDR1_REQ_MASK = 1 << RESETN_DDR1_REQ_SHIFT,
260 RESETN_DDRPHY1_REQ_SHIFT = 13,
261 RESETN_DDRPHY1_REQ_MASK = 1 << RESETN_DDRPHY1_REQ_SHIFT,
262};
263
264#define VCO_MAX_KHZ (3200 * (MHz / KHz))
265#define VCO_MIN_KHZ (800 * (MHz / KHz))
266#define OUTPUT_MAX_KHZ (3200 * (MHz / KHz))
267#define OUTPUT_MIN_KHZ (16 * (MHz / KHz))
268
269/*
270 * the div restructions of pll in integer mode, these are defined in
271 * * CRU_*PLL_CON0 or PMUCRU_*PLL_CON0
272 */
273#define PLL_DIV_MIN 16
274#define PLL_DIV_MAX 3200
275
276/*
277 * How to calculate the PLL(from TRM V0.3 Part 1 Page 63):
278 * Formulas also embedded within the Fractional PLL Verilog model:
279 * If DSMPD = 1 (DSM is disabled, "integer mode")
280 * FOUTVCO = FREF / REFDIV * FBDIV
281 * FOUTPOSTDIV = FOUTVCO / POSTDIV1 / POSTDIV2
282 * Where:
283 * FOUTVCO = Fractional PLL non-divided output frequency
284 * FOUTPOSTDIV = Fractional PLL divided output frequency
285 * (output of second post divider)
286 * FREF = Fractional PLL input reference frequency, (the OSC_HZ 24MHz input)
287 * REFDIV = Fractional PLL input reference clock divider
288 * FBDIV = Integer value programmed into feedback divide
289 *
290 */
291static void rkclk_set_pll(u32 *pll_con, const struct pll_div *div)
292{
293 /* All 8 PLLs have same VCO and output frequency range restrictions. */
294 u32 vco_khz = OSC_HZ / 1000 * div->fbdiv / div->refdiv;
295 u32 output_khz = vco_khz / div->postdiv1 / div->postdiv2;
296
297 debug("PLL at %p: fbdiv=%d, refdiv=%d, postdiv1=%d, "
298 "postdiv2=%d, vco=%u khz, output=%u khz\n",
299 pll_con, div->fbdiv, div->refdiv, div->postdiv1,
300 div->postdiv2, vco_khz, output_khz);
301 assert(vco_khz >= VCO_MIN_KHZ && vco_khz <= VCO_MAX_KHZ &&
302 output_khz >= OUTPUT_MIN_KHZ && output_khz <= OUTPUT_MAX_KHZ &&
303 div->fbdiv >= PLL_DIV_MIN && div->fbdiv <= PLL_DIV_MAX);
304
305 /*
306 * When power on or changing PLL setting,
307 * we must force PLL into slow mode to ensure output stable clock.
308 */
309 rk_clrsetreg(&pll_con[3], PLL_MODE_MASK,
310 PLL_MODE_SLOW << PLL_MODE_SHIFT);
311
312 /* use integer mode */
313 rk_clrsetreg(&pll_con[3], PLL_DSMPD_MASK,
314 PLL_INTEGER_MODE << PLL_DSMPD_SHIFT);
315
316 rk_clrsetreg(&pll_con[0], PLL_FBDIV_MASK,
317 div->fbdiv << PLL_FBDIV_SHIFT);
318 rk_clrsetreg(&pll_con[1],
319 PLL_POSTDIV2_MASK | PLL_POSTDIV1_MASK |
320 PLL_REFDIV_MASK | PLL_REFDIV_SHIFT,
321 (div->postdiv2 << PLL_POSTDIV2_SHIFT) |
322 (div->postdiv1 << PLL_POSTDIV1_SHIFT) |
323 (div->refdiv << PLL_REFDIV_SHIFT));
324
325 /* waiting for pll lock */
326 while (!(readl(&pll_con[2]) & (1 << PLL_LOCK_STATUS_SHIFT)))
327 udelay(1);
328
329 /* pll enter normal mode */
330 rk_clrsetreg(&pll_con[3], PLL_MODE_MASK,
331 PLL_MODE_NORM << PLL_MODE_SHIFT);
332}
333
334static int pll_para_config(u32 freq_hz, struct pll_div *div)
335{
336 u32 ref_khz = OSC_HZ / KHz, refdiv, fbdiv = 0;
337 u32 postdiv1, postdiv2 = 1;
338 u32 fref_khz;
339 u32 diff_khz, best_diff_khz;
340 const u32 max_refdiv = 63, max_fbdiv = 3200, min_fbdiv = 16;
341 const u32 max_postdiv1 = 7, max_postdiv2 = 7;
342 u32 vco_khz;
343 u32 freq_khz = freq_hz / KHz;
344
345 if (!freq_hz) {
346 printf("%s: the frequency can't be 0 Hz\n", __func__);
347 return -1;
348 }
349
350 postdiv1 = DIV_ROUND_UP(VCO_MIN_KHZ, freq_khz);
351 if (postdiv1 > max_postdiv1) {
352 postdiv2 = DIV_ROUND_UP(postdiv1, max_postdiv1);
353 postdiv1 = DIV_ROUND_UP(postdiv1, postdiv2);
354 }
355
356 vco_khz = freq_khz * postdiv1 * postdiv2;
357
358 if (vco_khz < VCO_MIN_KHZ || vco_khz > VCO_MAX_KHZ ||
359 postdiv2 > max_postdiv2) {
360 printf("%s: Cannot find out a supported VCO"
361 " for Frequency (%uHz).\n", __func__, freq_hz);
362 return -1;
363 }
364
365 div->postdiv1 = postdiv1;
366 div->postdiv2 = postdiv2;
367
368 best_diff_khz = vco_khz;
369 for (refdiv = 1; refdiv < max_refdiv && best_diff_khz; refdiv++) {
370 fref_khz = ref_khz / refdiv;
371
372 fbdiv = vco_khz / fref_khz;
373 if ((fbdiv >= max_fbdiv) || (fbdiv <= min_fbdiv))
374 continue;
375 diff_khz = vco_khz - fbdiv * fref_khz;
376 if (fbdiv + 1 < max_fbdiv && diff_khz > fref_khz / 2) {
377 fbdiv++;
378 diff_khz = fref_khz - diff_khz;
379 }
380
381 if (diff_khz >= best_diff_khz)
382 continue;
383
384 best_diff_khz = diff_khz;
385 div->refdiv = refdiv;
386 div->fbdiv = fbdiv;
387 }
388
389 if (best_diff_khz > 4 * (MHz/KHz)) {
390 printf("%s: Failed to match output frequency %u, "
391 "difference is %u Hz,exceed 4MHZ\n", __func__, freq_hz,
392 best_diff_khz * KHz);
393 return -1;
394 }
395 return 0;
396}
397
Kever Yangb0b3c862016-07-29 10:35:25 +0800398void rk3399_configure_cpu(struct rk3399_cru *cru,
399 enum apll_l_frequencies apll_l_freq)
400{
401 u32 aclkm_div;
402 u32 pclk_dbg_div;
403 u32 atclk_div;
404
405 rkclk_set_pll(&cru->apll_l_con[0], apll_l_cfgs[apll_l_freq]);
406
407 aclkm_div = APLL_HZ / ACLKM_CORE_HZ - 1;
408 assert((aclkm_div + 1) * ACLKM_CORE_HZ == APLL_HZ &&
409 aclkm_div < 0x1f);
410
411 pclk_dbg_div = APLL_HZ / PCLK_DBG_HZ - 1;
412 assert((pclk_dbg_div + 1) * PCLK_DBG_HZ == APLL_HZ &&
413 pclk_dbg_div < 0x1f);
414
415 atclk_div = APLL_HZ / ATCLK_CORE_HZ - 1;
416 assert((atclk_div + 1) * ATCLK_CORE_HZ == APLL_HZ &&
417 atclk_div < 0x1f);
418
419 rk_clrsetreg(&cru->clksel_con[0],
420 ACLKM_CORE_L_DIV_CON_MASK | CLK_CORE_L_PLL_SEL_MASK |
421 CLK_CORE_L_DIV_MASK,
422 aclkm_div << ACLKM_CORE_L_DIV_CON_SHIFT |
423 CLK_CORE_L_PLL_SEL_ALPLL << CLK_CORE_L_PLL_SEL_SHIFT |
424 0 << CLK_CORE_L_DIV_SHIFT);
425
426 rk_clrsetreg(&cru->clksel_con[1],
427 PCLK_DBG_L_DIV_MASK | ATCLK_CORE_L_DIV_MASK,
428 pclk_dbg_div << PCLK_DBG_L_DIV_SHIFT |
429 atclk_div << ATCLK_CORE_L_DIV_SHIFT);
430}
431#define I2C_CLK_REG_MASK(bus) \
432 (I2C_DIV_CON_MASK << \
433 CLK_I2C ##bus## _DIV_CON_SHIFT | \
434 CLK_I2C_PLL_SEL_MASK << \
435 CLK_I2C ##bus## _PLL_SEL_SHIFT)
436
437#define I2C_CLK_REG_VALUE(bus, clk_div) \
438 ((clk_div - 1) << \
439 CLK_I2C ##bus## _DIV_CON_SHIFT | \
440 CLK_I2C_PLL_SEL_GPLL << \
441 CLK_I2C ##bus## _PLL_SEL_SHIFT)
442
443#define I2C_CLK_DIV_VALUE(con, bus) \
444 (con >> CLK_I2C ##bus## _DIV_CON_SHIFT) & \
445 I2C_DIV_CON_MASK;
446
Kever Yang5e79f442016-08-12 17:47:15 +0800447#define I2C_PMUCLK_REG_MASK(bus) \
448 (I2C_DIV_CON_MASK << \
449 CLK_I2C ##bus## _DIV_CON_SHIFT)
450
451#define I2C_PMUCLK_REG_VALUE(bus, clk_div) \
452 ((clk_div - 1) << \
453 CLK_I2C ##bus## _DIV_CON_SHIFT)
454
Kever Yangb0b3c862016-07-29 10:35:25 +0800455static ulong rk3399_i2c_get_clk(struct rk3399_cru *cru, ulong clk_id)
456{
457 u32 div, con;
458
459 switch (clk_id) {
460 case SCLK_I2C1:
461 con = readl(&cru->clksel_con[61]);
462 div = I2C_CLK_DIV_VALUE(con, 1);
463 break;
464 case SCLK_I2C2:
465 con = readl(&cru->clksel_con[62]);
466 div = I2C_CLK_DIV_VALUE(con, 2);
467 break;
468 case SCLK_I2C3:
469 con = readl(&cru->clksel_con[63]);
470 div = I2C_CLK_DIV_VALUE(con, 3);
471 break;
472 case SCLK_I2C5:
473 con = readl(&cru->clksel_con[61]);
474 div = I2C_CLK_DIV_VALUE(con, 5);
475 break;
476 case SCLK_I2C6:
477 con = readl(&cru->clksel_con[62]);
478 div = I2C_CLK_DIV_VALUE(con, 6);
479 break;
480 case SCLK_I2C7:
481 con = readl(&cru->clksel_con[63]);
482 div = I2C_CLK_DIV_VALUE(con, 7);
483 break;
484 default:
485 printf("do not support this i2c bus\n");
486 return -EINVAL;
487 }
488
489 return DIV_TO_RATE(GPLL_HZ, div);
490}
491
492static ulong rk3399_i2c_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz)
493{
494 int src_clk_div;
495
496 /* i2c0,4,8 src clock from ppll, i2c1,2,3,5,6,7 src clock from gpll*/
497 src_clk_div = GPLL_HZ / hz;
498 assert(src_clk_div - 1 < 127);
499
500 switch (clk_id) {
501 case SCLK_I2C1:
502 rk_clrsetreg(&cru->clksel_con[61], I2C_CLK_REG_MASK(1),
503 I2C_CLK_REG_VALUE(1, src_clk_div));
504 break;
505 case SCLK_I2C2:
506 rk_clrsetreg(&cru->clksel_con[62], I2C_CLK_REG_MASK(2),
507 I2C_CLK_REG_VALUE(2, src_clk_div));
508 break;
509 case SCLK_I2C3:
510 rk_clrsetreg(&cru->clksel_con[63], I2C_CLK_REG_MASK(3),
511 I2C_CLK_REG_VALUE(3, src_clk_div));
512 break;
513 case SCLK_I2C5:
514 rk_clrsetreg(&cru->clksel_con[61], I2C_CLK_REG_MASK(5),
515 I2C_CLK_REG_VALUE(5, src_clk_div));
516 break;
517 case SCLK_I2C6:
518 rk_clrsetreg(&cru->clksel_con[62], I2C_CLK_REG_MASK(6),
519 I2C_CLK_REG_VALUE(6, src_clk_div));
520 break;
521 case SCLK_I2C7:
522 rk_clrsetreg(&cru->clksel_con[63], I2C_CLK_REG_MASK(7),
523 I2C_CLK_REG_VALUE(7, src_clk_div));
524 break;
525 default:
526 printf("do not support this i2c bus\n");
527 return -EINVAL;
528 }
529
Philipp Tomsichbeb90a52017-04-20 22:05:50 +0200530 return rk3399_i2c_get_clk(cru, clk_id);
Kever Yangb0b3c862016-07-29 10:35:25 +0800531}
532
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200533/*
534 * RK3399 SPI clocks have a common divider-width (7 bits) and a single bit
535 * to select either CPLL or GPLL as the clock-parent. The location within
536 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
537 */
538
539struct spi_clkreg {
540 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
541 uint8_t div_shift;
542 uint8_t sel_shift;
543};
544
545/*
546 * The entries are numbered relative to their offset from SCLK_SPI0.
547 *
548 * Note that SCLK_SPI3 (which is configured via PMUCRU and requires different
549 * logic is not supported).
550 */
551static const struct spi_clkreg spi_clkregs[] = {
552 [0] = { .reg = 59,
553 .div_shift = CLK_SPI0_PLL_DIV_CON_SHIFT,
554 .sel_shift = CLK_SPI0_PLL_SEL_SHIFT, },
555 [1] = { .reg = 59,
556 .div_shift = CLK_SPI1_PLL_DIV_CON_SHIFT,
557 .sel_shift = CLK_SPI1_PLL_SEL_SHIFT, },
558 [2] = { .reg = 60,
559 .div_shift = CLK_SPI2_PLL_DIV_CON_SHIFT,
560 .sel_shift = CLK_SPI2_PLL_SEL_SHIFT, },
561 [3] = { .reg = 60,
562 .div_shift = CLK_SPI4_PLL_DIV_CON_SHIFT,
563 .sel_shift = CLK_SPI4_PLL_SEL_SHIFT, },
564 [4] = { .reg = 58,
565 .div_shift = CLK_SPI5_PLL_DIV_CON_SHIFT,
566 .sel_shift = CLK_SPI5_PLL_SEL_SHIFT, },
567};
568
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200569static ulong rk3399_spi_get_clk(struct rk3399_cru *cru, ulong clk_id)
570{
571 const struct spi_clkreg *spiclk = NULL;
572 u32 div, val;
573
574 switch (clk_id) {
575 case SCLK_SPI0 ... SCLK_SPI5:
576 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
577 break;
578
579 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900580 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200581 return -EINVAL;
582 }
583
584 val = readl(&cru->clksel_con[spiclk->reg]);
Philipp Tomsicha8ee98d2017-11-22 19:45:04 +0100585 div = bitfield_extract(val, spiclk->div_shift,
586 CLK_SPI_PLL_DIV_CON_WIDTH);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200587
588 return DIV_TO_RATE(GPLL_HZ, div);
589}
590
591static ulong rk3399_spi_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz)
592{
593 const struct spi_clkreg *spiclk = NULL;
594 int src_clk_div;
595
Kever Yang217273c2017-07-27 12:54:02 +0800596 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz) - 1;
597 assert(src_clk_div < 128);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200598
599 switch (clk_id) {
600 case SCLK_SPI1 ... SCLK_SPI5:
601 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
602 break;
603
604 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900605 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200606 return -EINVAL;
607 }
608
609 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
610 ((CLK_SPI_PLL_DIV_CON_MASK << spiclk->div_shift) |
611 (CLK_SPI_PLL_SEL_GPLL << spiclk->sel_shift)),
612 ((src_clk_div << spiclk->div_shift) |
613 (CLK_SPI_PLL_SEL_GPLL << spiclk->sel_shift)));
614
Philipp Tomsichbeb90a52017-04-20 22:05:50 +0200615 return rk3399_spi_get_clk(cru, clk_id);
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200616}
617
Kever Yangb0b3c862016-07-29 10:35:25 +0800618static ulong rk3399_vop_set_clk(struct rk3399_cru *cru, ulong clk_id, u32 hz)
619{
620 struct pll_div vpll_config = {0};
621 int aclk_vop = 198*MHz;
622 void *aclkreg_addr, *dclkreg_addr;
623 u32 div;
624
625 switch (clk_id) {
626 case DCLK_VOP0:
627 aclkreg_addr = &cru->clksel_con[47];
628 dclkreg_addr = &cru->clksel_con[49];
629 break;
630 case DCLK_VOP1:
631 aclkreg_addr = &cru->clksel_con[48];
632 dclkreg_addr = &cru->clksel_con[50];
633 break;
634 default:
635 return -EINVAL;
636 }
637 /* vop aclk source clk: cpll */
638 div = CPLL_HZ / aclk_vop;
639 assert(div - 1 < 32);
640
641 rk_clrsetreg(aclkreg_addr,
642 ACLK_VOP_PLL_SEL_MASK | ACLK_VOP_DIV_CON_MASK,
643 ACLK_VOP_PLL_SEL_CPLL << ACLK_VOP_PLL_SEL_SHIFT |
644 (div - 1) << ACLK_VOP_DIV_CON_SHIFT);
645
646 /* vop dclk source from vpll, and equals to vpll(means div == 1) */
647 if (pll_para_config(hz, &vpll_config))
648 return -1;
649
650 rkclk_set_pll(&cru->vpll_con[0], &vpll_config);
651
652 rk_clrsetreg(dclkreg_addr,
653 DCLK_VOP_DCLK_SEL_MASK | DCLK_VOP_PLL_SEL_MASK|
654 DCLK_VOP_DIV_CON_MASK,
655 DCLK_VOP_DCLK_SEL_DIVOUT << DCLK_VOP_DCLK_SEL_SHIFT |
656 DCLK_VOP_PLL_SEL_VPLL << DCLK_VOP_PLL_SEL_SHIFT |
657 (1 - 1) << DCLK_VOP_DIV_CON_SHIFT);
658
659 return hz;
660}
661
662static ulong rk3399_mmc_get_clk(struct rk3399_cru *cru, uint clk_id)
663{
664 u32 div, con;
665
666 switch (clk_id) {
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200667 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800668 case SCLK_SDMMC:
669 con = readl(&cru->clksel_con[16]);
Kever Yang3a94d752017-07-27 12:54:01 +0800670 /* dwmmc controller have internal div 2 */
671 div = 2;
Kever Yangb0b3c862016-07-29 10:35:25 +0800672 break;
673 case SCLK_EMMC:
674 con = readl(&cru->clksel_con[21]);
Kever Yang3a94d752017-07-27 12:54:01 +0800675 div = 1;
Kever Yangb0b3c862016-07-29 10:35:25 +0800676 break;
677 default:
678 return -EINVAL;
679 }
Kever Yangb0b3c862016-07-29 10:35:25 +0800680
Kever Yang3a94d752017-07-27 12:54:01 +0800681 div *= (con & CLK_EMMC_DIV_CON_MASK) >> CLK_EMMC_DIV_CON_SHIFT;
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800682 if ((con & CLK_EMMC_PLL_MASK) >> CLK_EMMC_PLL_SHIFT
683 == CLK_EMMC_PLL_SEL_24M)
Kever Yang3a94d752017-07-27 12:54:01 +0800684 return DIV_TO_RATE(OSC_HZ, div);
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800685 else
686 return DIV_TO_RATE(GPLL_HZ, div);
Kever Yangb0b3c862016-07-29 10:35:25 +0800687}
688
689static ulong rk3399_mmc_set_clk(struct rk3399_cru *cru,
690 ulong clk_id, ulong set_rate)
691{
692 int src_clk_div;
693 int aclk_emmc = 198*MHz;
694
695 switch (clk_id) {
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200696 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800697 case SCLK_SDMMC:
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800698 /* Select clk_sdmmc source from GPLL by default */
Kever Yang3a94d752017-07-27 12:54:01 +0800699 /* mmc clock defaulg div 2 internal, provide double in cru */
700 src_clk_div = DIV_ROUND_UP(GPLL_HZ / 2, set_rate);
Kever Yangb0b3c862016-07-29 10:35:25 +0800701
Kever Yang217273c2017-07-27 12:54:02 +0800702 if (src_clk_div > 128) {
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800703 /* use 24MHz source for 400KHz clock */
Kever Yang3a94d752017-07-27 12:54:01 +0800704 src_clk_div = DIV_ROUND_UP(OSC_HZ / 2, set_rate);
Kever Yang217273c2017-07-27 12:54:02 +0800705 assert(src_clk_div - 1 < 128);
Kever Yangfd4b2dc2016-08-04 11:44:58 +0800706 rk_clrsetreg(&cru->clksel_con[16],
707 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
708 CLK_EMMC_PLL_SEL_24M << CLK_EMMC_PLL_SHIFT |
709 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
710 } else {
711 rk_clrsetreg(&cru->clksel_con[16],
712 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
713 CLK_EMMC_PLL_SEL_GPLL << CLK_EMMC_PLL_SHIFT |
714 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
715 }
Kever Yangb0b3c862016-07-29 10:35:25 +0800716 break;
717 case SCLK_EMMC:
718 /* Select aclk_emmc source from GPLL */
Kever Yang217273c2017-07-27 12:54:02 +0800719 src_clk_div = DIV_ROUND_UP(GPLL_HZ , aclk_emmc);
720 assert(src_clk_div - 1 < 32);
Kever Yangb0b3c862016-07-29 10:35:25 +0800721
722 rk_clrsetreg(&cru->clksel_con[21],
723 ACLK_EMMC_PLL_SEL_MASK | ACLK_EMMC_DIV_CON_MASK,
724 ACLK_EMMC_PLL_SEL_GPLL << ACLK_EMMC_PLL_SEL_SHIFT |
725 (src_clk_div - 1) << ACLK_EMMC_DIV_CON_SHIFT);
726
727 /* Select clk_emmc source from GPLL too */
Kever Yang217273c2017-07-27 12:54:02 +0800728 src_clk_div = DIV_ROUND_UP(GPLL_HZ, set_rate);
729 assert(src_clk_div - 1 < 128);
Kever Yangb0b3c862016-07-29 10:35:25 +0800730
731 rk_clrsetreg(&cru->clksel_con[22],
732 CLK_EMMC_PLL_MASK | CLK_EMMC_DIV_CON_MASK,
733 CLK_EMMC_PLL_SEL_GPLL << CLK_EMMC_PLL_SHIFT |
734 (src_clk_div - 1) << CLK_EMMC_DIV_CON_SHIFT);
735 break;
736 default:
737 return -EINVAL;
738 }
739 return rk3399_mmc_get_clk(cru, clk_id);
740}
741
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100742static ulong rk3399_gmac_set_clk(struct rk3399_cru *cru, ulong rate)
743{
744 ulong ret;
745
746 /*
747 * The RGMII CLK can be derived either from an external "clkin"
748 * or can be generated from internally by a divider from SCLK_MAC.
749 */
750 if (readl(&cru->clksel_con[19]) & BIT(4)) {
751 /* An external clock will always generate the right rate... */
752 ret = rate;
753 } else {
754 /*
755 * No platform uses an internal clock to date.
756 * Implement this once it becomes necessary and print an error
757 * if someone tries to use it (while it remains unimplemented).
758 */
759 pr_err("%s: internal clock is UNIMPLEMENTED\n", __func__);
760 ret = 0;
761 }
762
763 return ret;
764}
765
Kever Yang5ae2fd92017-02-13 17:38:56 +0800766#define PMUSGRF_DDR_RGN_CON16 0xff330040
767static ulong rk3399_ddr_set_clk(struct rk3399_cru *cru,
768 ulong set_rate)
769{
770 struct pll_div dpll_cfg;
771
772 /* IC ECO bug, need to set this register */
773 writel(0xc000c000, PMUSGRF_DDR_RGN_CON16);
774
775 /* clk_ddrc == DPLL = 24MHz / refdiv * fbdiv / postdiv1 / postdiv2 */
776 switch (set_rate) {
777 case 200*MHz:
778 dpll_cfg = (struct pll_div)
779 {.refdiv = 1, .fbdiv = 50, .postdiv1 = 6, .postdiv2 = 1};
780 break;
781 case 300*MHz:
782 dpll_cfg = (struct pll_div)
783 {.refdiv = 2, .fbdiv = 100, .postdiv1 = 4, .postdiv2 = 1};
784 break;
785 case 666*MHz:
786 dpll_cfg = (struct pll_div)
787 {.refdiv = 2, .fbdiv = 111, .postdiv1 = 2, .postdiv2 = 1};
788 break;
789 case 800*MHz:
790 dpll_cfg = (struct pll_div)
791 {.refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1};
792 break;
793 case 933*MHz:
794 dpll_cfg = (struct pll_div)
795 {.refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1};
796 break;
797 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900798 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Kever Yang5ae2fd92017-02-13 17:38:56 +0800799 }
800 rkclk_set_pll(&cru->dpll_con[0], &dpll_cfg);
801
802 return set_rate;
803}
David Wu364fc732017-09-20 14:38:58 +0800804
805static ulong rk3399_saradc_get_clk(struct rk3399_cru *cru)
806{
807 u32 div, val;
808
809 val = readl(&cru->clksel_con[26]);
810 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
811 CLK_SARADC_DIV_CON_WIDTH);
812
813 return DIV_TO_RATE(OSC_HZ, div);
814}
815
816static ulong rk3399_saradc_set_clk(struct rk3399_cru *cru, uint hz)
817{
818 int src_clk_div;
819
820 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
821 assert(src_clk_div < 128);
822
823 rk_clrsetreg(&cru->clksel_con[26],
824 CLK_SARADC_DIV_CON_MASK,
825 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
826
827 return rk3399_saradc_get_clk(cru);
828}
829
Kever Yangb0b3c862016-07-29 10:35:25 +0800830static ulong rk3399_clk_get_rate(struct clk *clk)
831{
832 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
833 ulong rate = 0;
834
835 switch (clk->id) {
836 case 0 ... 63:
837 return 0;
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200838 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800839 case SCLK_SDMMC:
840 case SCLK_EMMC:
841 rate = rk3399_mmc_get_clk(priv->cru, clk->id);
842 break;
843 case SCLK_I2C1:
844 case SCLK_I2C2:
845 case SCLK_I2C3:
846 case SCLK_I2C5:
847 case SCLK_I2C6:
848 case SCLK_I2C7:
849 rate = rk3399_i2c_get_clk(priv->cru, clk->id);
850 break;
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200851 case SCLK_SPI0...SCLK_SPI5:
852 rate = rk3399_spi_get_clk(priv->cru, clk->id);
853 break;
854 case SCLK_UART0:
855 case SCLK_UART2:
856 return 24000000;
Philipp Tomsichffc1fac2017-04-28 18:33:57 +0200857 break;
858 case PCLK_HDMI_CTRL:
859 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800860 case DCLK_VOP0:
861 case DCLK_VOP1:
862 break;
Philipp Tomsicha70feb42017-04-28 17:11:55 +0200863 case PCLK_EFUSE1024NS:
864 break;
David Wu364fc732017-09-20 14:38:58 +0800865 case SCLK_SARADC:
866 rate = rk3399_saradc_get_clk(priv->cru);
867 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800868 default:
869 return -ENOENT;
870 }
871
872 return rate;
873}
874
875static ulong rk3399_clk_set_rate(struct clk *clk, ulong rate)
876{
877 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
878 ulong ret = 0;
879
880 switch (clk->id) {
881 case 0 ... 63:
882 return 0;
Philipp Tomsichd2f1f1a2018-01-08 14:00:27 +0100883
884 case ACLK_PERIHP:
885 case HCLK_PERIHP:
886 case PCLK_PERIHP:
887 return 0;
888
889 case ACLK_PERILP0:
890 case HCLK_PERILP0:
891 case PCLK_PERILP0:
892 return 0;
893
894 case ACLK_CCI:
895 return 0;
896
897 case HCLK_PERILP1:
898 case PCLK_PERILP1:
899 return 0;
900
Philipp Tomsich998c61a2017-04-25 09:52:06 +0200901 case HCLK_SDMMC:
Kever Yangb0b3c862016-07-29 10:35:25 +0800902 case SCLK_SDMMC:
903 case SCLK_EMMC:
904 ret = rk3399_mmc_set_clk(priv->cru, clk->id, rate);
905 break;
Philipp Tomsich65d83302017-03-24 19:24:25 +0100906 case SCLK_MAC:
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100907 ret = rk3399_gmac_set_clk(priv->cru, rate);
Philipp Tomsich65d83302017-03-24 19:24:25 +0100908 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800909 case SCLK_I2C1:
910 case SCLK_I2C2:
911 case SCLK_I2C3:
912 case SCLK_I2C5:
913 case SCLK_I2C6:
914 case SCLK_I2C7:
915 ret = rk3399_i2c_set_clk(priv->cru, clk->id, rate);
916 break;
Philipp Tomsich8fa69792017-04-20 22:05:49 +0200917 case SCLK_SPI0...SCLK_SPI5:
918 ret = rk3399_spi_set_clk(priv->cru, clk->id, rate);
919 break;
Philipp Tomsichffc1fac2017-04-28 18:33:57 +0200920 case PCLK_HDMI_CTRL:
921 case PCLK_VIO_GRF:
922 /* the PCLK gates for video are enabled by default */
923 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800924 case DCLK_VOP0:
925 case DCLK_VOP1:
Kever Yang5e79f442016-08-12 17:47:15 +0800926 ret = rk3399_vop_set_clk(priv->cru, clk->id, rate);
Kever Yangb0b3c862016-07-29 10:35:25 +0800927 break;
Kever Yang5ae2fd92017-02-13 17:38:56 +0800928 case SCLK_DDRCLK:
929 ret = rk3399_ddr_set_clk(priv->cru, rate);
930 break;
Philipp Tomsicha70feb42017-04-28 17:11:55 +0200931 case PCLK_EFUSE1024NS:
932 break;
David Wu364fc732017-09-20 14:38:58 +0800933 case SCLK_SARADC:
934 ret = rk3399_saradc_set_clk(priv->cru, rate);
935 break;
Kever Yangb0b3c862016-07-29 10:35:25 +0800936 default:
937 return -ENOENT;
938 }
939
940 return ret;
941}
942
Philipp Tomsich75b381a2018-01-25 15:27:10 +0100943static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, struct clk *parent)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100944{
945 struct rk3399_clk_priv *priv = dev_get_priv(clk->dev);
946 const char *clock_output_name;
947 int ret;
948
949 /*
950 * If the requested parent is in the same clock-controller and
951 * the id is SCLK_MAC ("clk_gmac"), switch to the internal clock.
952 */
953 if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) {
954 debug("%s: switching RGMII to SCLK_MAC\n", __func__);
955 rk_clrreg(&priv->cru->clksel_con[19], BIT(4));
956 return 0;
957 }
958
959 /*
960 * Otherwise, we need to check the clock-output-names of the
961 * requested parent to see if the requested id is "clkin_gmac".
962 */
963 ret = dev_read_string_index(parent->dev, "clock-output-names",
964 parent->id, &clock_output_name);
965 if (ret < 0)
966 return -ENODATA;
967
968 /* If this is "clkin_gmac", switch to the external clock input */
969 if (!strcmp(clock_output_name, "clkin_gmac")) {
970 debug("%s: switching RGMII to CLKIN\n", __func__);
971 rk_setreg(&priv->cru->clksel_con[19], BIT(4));
972 return 0;
973 }
974
975 return -EINVAL;
976}
977
Philipp Tomsich75b381a2018-01-25 15:27:10 +0100978static int __maybe_unused rk3399_clk_set_parent(struct clk *clk, struct clk *parent)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +0100979{
980 switch (clk->id) {
981 case SCLK_RMII_SRC:
982 return rk3399_gmac_set_parent(clk, parent);
983 }
984
985 debug("%s: unsupported clk %ld\n", __func__, clk->id);
986 return -ENOENT;
987}
988
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +0200989static int rk3399_clk_enable(struct clk *clk)
990{
991 switch (clk->id) {
992 case HCLK_HOST0:
993 case HCLK_HOST0_ARB:
994 case HCLK_HOST1:
995 case HCLK_HOST1_ARB:
996 return 0;
Philipp Tomsicha9bdd672018-02-16 16:07:24 +0100997
998 case SCLK_MAC:
999 case SCLK_MAC_RX:
1000 case SCLK_MAC_TX:
1001 case SCLK_MACREF:
1002 case SCLK_MACREF_OUT:
1003 case ACLK_GMAC:
1004 case PCLK_GMAC:
1005 /* Required to successfully probe the Designware GMAC driver */
1006 return 0;
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +02001007 }
1008
1009 debug("%s: unsupported clk %ld\n", __func__, clk->id);
1010 return -ENOENT;
1011}
1012
Kever Yangb0b3c862016-07-29 10:35:25 +08001013static struct clk_ops rk3399_clk_ops = {
1014 .get_rate = rk3399_clk_get_rate,
1015 .set_rate = rk3399_clk_set_rate,
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001016#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
Philipp Tomsicha45f17e2018-01-08 13:11:01 +01001017 .set_parent = rk3399_clk_set_parent,
Philipp Tomsich75b381a2018-01-25 15:27:10 +01001018#endif
Philipp Tomsich2f01a2b2017-09-12 17:30:56 +02001019 .enable = rk3399_clk_enable,
Kever Yangb0b3c862016-07-29 10:35:25 +08001020};
1021
Kever Yang9f636a22017-10-12 15:27:29 +08001022#ifdef CONFIG_SPL_BUILD
1023static void rkclk_init(struct rk3399_cru *cru)
1024{
1025 u32 aclk_div;
1026 u32 hclk_div;
1027 u32 pclk_div;
1028
1029 rk3399_configure_cpu(cru, APLL_L_600_MHZ);
1030 /*
1031 * some cru registers changed by bootrom, we'd better reset them to
1032 * reset/default values described in TRM to avoid confusion in kernel.
1033 * Please consider these three lines as a fix of bootrom bug.
1034 */
1035 rk_clrsetreg(&cru->clksel_con[12], 0xffff, 0x4101);
1036 rk_clrsetreg(&cru->clksel_con[19], 0xffff, 0x033f);
1037 rk_clrsetreg(&cru->clksel_con[56], 0x0003, 0x0003);
1038
1039 /* configure gpll cpll */
1040 rkclk_set_pll(&cru->gpll_con[0], &gpll_init_cfg);
1041 rkclk_set_pll(&cru->cpll_con[0], &cpll_init_cfg);
1042
1043 /* configure perihp aclk, hclk, pclk */
1044 aclk_div = GPLL_HZ / PERIHP_ACLK_HZ - 1;
1045 assert((aclk_div + 1) * PERIHP_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
1046
1047 hclk_div = PERIHP_ACLK_HZ / PERIHP_HCLK_HZ - 1;
1048 assert((hclk_div + 1) * PERIHP_HCLK_HZ ==
1049 PERIHP_ACLK_HZ && (hclk_div < 0x4));
1050
1051 pclk_div = PERIHP_ACLK_HZ / PERIHP_PCLK_HZ - 1;
1052 assert((pclk_div + 1) * PERIHP_PCLK_HZ ==
1053 PERIHP_ACLK_HZ && (pclk_div < 0x7));
1054
1055 rk_clrsetreg(&cru->clksel_con[14],
1056 PCLK_PERIHP_DIV_CON_MASK | HCLK_PERIHP_DIV_CON_MASK |
1057 ACLK_PERIHP_PLL_SEL_MASK | ACLK_PERIHP_DIV_CON_MASK,
1058 pclk_div << PCLK_PERIHP_DIV_CON_SHIFT |
1059 hclk_div << HCLK_PERIHP_DIV_CON_SHIFT |
1060 ACLK_PERIHP_PLL_SEL_GPLL << ACLK_PERIHP_PLL_SEL_SHIFT |
1061 aclk_div << ACLK_PERIHP_DIV_CON_SHIFT);
1062
1063 /* configure perilp0 aclk, hclk, pclk */
1064 aclk_div = GPLL_HZ / PERILP0_ACLK_HZ - 1;
1065 assert((aclk_div + 1) * PERILP0_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
1066
1067 hclk_div = PERILP0_ACLK_HZ / PERILP0_HCLK_HZ - 1;
1068 assert((hclk_div + 1) * PERILP0_HCLK_HZ ==
1069 PERILP0_ACLK_HZ && (hclk_div < 0x4));
1070
1071 pclk_div = PERILP0_ACLK_HZ / PERILP0_PCLK_HZ - 1;
1072 assert((pclk_div + 1) * PERILP0_PCLK_HZ ==
1073 PERILP0_ACLK_HZ && (pclk_div < 0x7));
1074
1075 rk_clrsetreg(&cru->clksel_con[23],
1076 PCLK_PERILP0_DIV_CON_MASK | HCLK_PERILP0_DIV_CON_MASK |
1077 ACLK_PERILP0_PLL_SEL_MASK | ACLK_PERILP0_DIV_CON_MASK,
1078 pclk_div << PCLK_PERILP0_DIV_CON_SHIFT |
1079 hclk_div << HCLK_PERILP0_DIV_CON_SHIFT |
1080 ACLK_PERILP0_PLL_SEL_GPLL << ACLK_PERILP0_PLL_SEL_SHIFT |
1081 aclk_div << ACLK_PERILP0_DIV_CON_SHIFT);
1082
1083 /* perilp1 hclk select gpll as source */
1084 hclk_div = GPLL_HZ / PERILP1_HCLK_HZ - 1;
1085 assert((hclk_div + 1) * PERILP1_HCLK_HZ ==
1086 GPLL_HZ && (hclk_div < 0x1f));
1087
1088 pclk_div = PERILP1_HCLK_HZ / PERILP1_HCLK_HZ - 1;
1089 assert((pclk_div + 1) * PERILP1_HCLK_HZ ==
1090 PERILP1_HCLK_HZ && (hclk_div < 0x7));
1091
1092 rk_clrsetreg(&cru->clksel_con[25],
1093 PCLK_PERILP1_DIV_CON_MASK | HCLK_PERILP1_DIV_CON_MASK |
1094 HCLK_PERILP1_PLL_SEL_MASK,
1095 pclk_div << PCLK_PERILP1_DIV_CON_SHIFT |
1096 hclk_div << HCLK_PERILP1_DIV_CON_SHIFT |
1097 HCLK_PERILP1_PLL_SEL_GPLL << HCLK_PERILP1_PLL_SEL_SHIFT);
1098}
1099#endif
1100
Kever Yangb0b3c862016-07-29 10:35:25 +08001101static int rk3399_clk_probe(struct udevice *dev)
1102{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001103#ifdef CONFIG_SPL_BUILD
Kever Yangb0b3c862016-07-29 10:35:25 +08001104 struct rk3399_clk_priv *priv = dev_get_priv(dev);
1105
Kever Yang5ae2fd92017-02-13 17:38:56 +08001106#if CONFIG_IS_ENABLED(OF_PLATDATA)
1107 struct rk3399_clk_plat *plat = dev_get_platdata(dev);
Kever Yangb0b3c862016-07-29 10:35:25 +08001108
Simon Glassc20ee0e2017-08-29 14:15:50 -06001109 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001110#endif
1111 rkclk_init(priv->cru);
1112#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001113 return 0;
1114}
1115
1116static int rk3399_clk_ofdata_to_platdata(struct udevice *dev)
1117{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001118#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Kever Yangb0b3c862016-07-29 10:35:25 +08001119 struct rk3399_clk_priv *priv = dev_get_priv(dev);
1120
Philipp Tomsich75c78592017-09-12 17:32:24 +02001121 priv->cru = dev_read_addr_ptr(dev);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001122#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001123 return 0;
1124}
1125
1126static int rk3399_clk_bind(struct udevice *dev)
1127{
1128 int ret;
Kever Yangf24e36d2017-11-03 15:16:13 +08001129 struct udevice *sys_child;
1130 struct sysreset_reg *priv;
Kever Yangb0b3c862016-07-29 10:35:25 +08001131
1132 /* The reset driver does not have a device node, so bind it here */
Kever Yangf24e36d2017-11-03 15:16:13 +08001133 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
1134 &sys_child);
1135 if (ret) {
1136 debug("Warning: No sysreset driver: ret=%d\n", ret);
1137 } else {
1138 priv = malloc(sizeof(struct sysreset_reg));
1139 priv->glb_srst_fst_value = offsetof(struct rk3399_cru,
1140 glb_srst_fst_value);
1141 priv->glb_srst_snd_value = offsetof(struct rk3399_cru,
1142 glb_srst_snd_value);
1143 sys_child->priv = priv;
1144 }
Kever Yangb0b3c862016-07-29 10:35:25 +08001145
Elaine Zhang538f67c2017-12-19 18:22:38 +08001146#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
1147 ret = offsetof(struct rk3399_cru, softrst_con[0]);
1148 ret = rockchip_reset_bind(dev, ret, 21);
1149 if (ret)
1150 debug("Warning: software reset driver bind faile\n");
1151#endif
1152
Kever Yangb0b3c862016-07-29 10:35:25 +08001153 return 0;
1154}
1155
1156static const struct udevice_id rk3399_clk_ids[] = {
1157 { .compatible = "rockchip,rk3399-cru" },
1158 { }
1159};
1160
1161U_BOOT_DRIVER(clk_rk3399) = {
Kever Yang5ae2fd92017-02-13 17:38:56 +08001162 .name = "rockchip_rk3399_cru",
Kever Yangb0b3c862016-07-29 10:35:25 +08001163 .id = UCLASS_CLK,
1164 .of_match = rk3399_clk_ids,
1165 .priv_auto_alloc_size = sizeof(struct rk3399_clk_priv),
1166 .ofdata_to_platdata = rk3399_clk_ofdata_to_platdata,
1167 .ops = &rk3399_clk_ops,
1168 .bind = rk3399_clk_bind,
1169 .probe = rk3399_clk_probe,
Kever Yang5ae2fd92017-02-13 17:38:56 +08001170#if CONFIG_IS_ENABLED(OF_PLATDATA)
1171 .platdata_auto_alloc_size = sizeof(struct rk3399_clk_plat),
1172#endif
Kever Yangb0b3c862016-07-29 10:35:25 +08001173};
Kever Yang5e79f442016-08-12 17:47:15 +08001174
1175static ulong rk3399_i2c_get_pmuclk(struct rk3399_pmucru *pmucru, ulong clk_id)
1176{
1177 u32 div, con;
1178
1179 switch (clk_id) {
1180 case SCLK_I2C0_PMU:
1181 con = readl(&pmucru->pmucru_clksel[2]);
1182 div = I2C_CLK_DIV_VALUE(con, 0);
1183 break;
1184 case SCLK_I2C4_PMU:
1185 con = readl(&pmucru->pmucru_clksel[3]);
1186 div = I2C_CLK_DIV_VALUE(con, 4);
1187 break;
1188 case SCLK_I2C8_PMU:
1189 con = readl(&pmucru->pmucru_clksel[2]);
1190 div = I2C_CLK_DIV_VALUE(con, 8);
1191 break;
1192 default:
1193 printf("do not support this i2c bus\n");
1194 return -EINVAL;
1195 }
1196
1197 return DIV_TO_RATE(PPLL_HZ, div);
1198}
1199
1200static ulong rk3399_i2c_set_pmuclk(struct rk3399_pmucru *pmucru, ulong clk_id,
1201 uint hz)
1202{
1203 int src_clk_div;
1204
1205 src_clk_div = PPLL_HZ / hz;
1206 assert(src_clk_div - 1 < 127);
1207
1208 switch (clk_id) {
1209 case SCLK_I2C0_PMU:
1210 rk_clrsetreg(&pmucru->pmucru_clksel[2], I2C_PMUCLK_REG_MASK(0),
1211 I2C_PMUCLK_REG_VALUE(0, src_clk_div));
1212 break;
1213 case SCLK_I2C4_PMU:
1214 rk_clrsetreg(&pmucru->pmucru_clksel[3], I2C_PMUCLK_REG_MASK(4),
1215 I2C_PMUCLK_REG_VALUE(4, src_clk_div));
1216 break;
1217 case SCLK_I2C8_PMU:
1218 rk_clrsetreg(&pmucru->pmucru_clksel[2], I2C_PMUCLK_REG_MASK(8),
1219 I2C_PMUCLK_REG_VALUE(8, src_clk_div));
1220 break;
1221 default:
1222 printf("do not support this i2c bus\n");
1223 return -EINVAL;
1224 }
1225
1226 return DIV_TO_RATE(PPLL_HZ, src_clk_div);
1227}
1228
1229static ulong rk3399_pwm_get_clk(struct rk3399_pmucru *pmucru)
1230{
1231 u32 div, con;
1232
1233 /* PWM closk rate is same as pclk_pmu */
1234 con = readl(&pmucru->pmucru_clksel[0]);
1235 div = con & PMU_PCLK_DIV_CON_MASK;
1236
1237 return DIV_TO_RATE(PPLL_HZ, div);
1238}
1239
1240static ulong rk3399_pmuclk_get_rate(struct clk *clk)
1241{
1242 struct rk3399_pmuclk_priv *priv = dev_get_priv(clk->dev);
1243 ulong rate = 0;
1244
1245 switch (clk->id) {
Philipp Tomsich434d5a02018-02-23 17:36:41 +01001246 case PLL_PPLL:
1247 return PPLL_HZ;
Kever Yang5e79f442016-08-12 17:47:15 +08001248 case PCLK_RKPWM_PMU:
1249 rate = rk3399_pwm_get_clk(priv->pmucru);
1250 break;
1251 case SCLK_I2C0_PMU:
1252 case SCLK_I2C4_PMU:
1253 case SCLK_I2C8_PMU:
1254 rate = rk3399_i2c_get_pmuclk(priv->pmucru, clk->id);
1255 break;
1256 default:
1257 return -ENOENT;
1258 }
1259
1260 return rate;
1261}
1262
1263static ulong rk3399_pmuclk_set_rate(struct clk *clk, ulong rate)
1264{
1265 struct rk3399_pmuclk_priv *priv = dev_get_priv(clk->dev);
1266 ulong ret = 0;
1267
1268 switch (clk->id) {
Philipp Tomsich434d5a02018-02-23 17:36:41 +01001269 case PLL_PPLL:
1270 /*
1271 * This has already been set up and we don't want/need
1272 * to change it here. Accept the request though, as the
1273 * device-tree has this in an 'assigned-clocks' list.
1274 */
1275 return PPLL_HZ;
Kever Yang5e79f442016-08-12 17:47:15 +08001276 case SCLK_I2C0_PMU:
1277 case SCLK_I2C4_PMU:
1278 case SCLK_I2C8_PMU:
1279 ret = rk3399_i2c_set_pmuclk(priv->pmucru, clk->id, rate);
1280 break;
1281 default:
1282 return -ENOENT;
1283 }
1284
1285 return ret;
1286}
1287
1288static struct clk_ops rk3399_pmuclk_ops = {
1289 .get_rate = rk3399_pmuclk_get_rate,
1290 .set_rate = rk3399_pmuclk_set_rate,
1291};
1292
Kever Yang5ae2fd92017-02-13 17:38:56 +08001293#ifndef CONFIG_SPL_BUILD
Kever Yang5e79f442016-08-12 17:47:15 +08001294static void pmuclk_init(struct rk3399_pmucru *pmucru)
1295{
1296 u32 pclk_div;
1297
1298 /* configure pmu pll(ppll) */
1299 rkclk_set_pll(&pmucru->ppll_con[0], &ppll_init_cfg);
1300
1301 /* configure pmu pclk */
1302 pclk_div = PPLL_HZ / PMU_PCLK_HZ - 1;
Kever Yang5e79f442016-08-12 17:47:15 +08001303 rk_clrsetreg(&pmucru->pmucru_clksel[0],
1304 PMU_PCLK_DIV_CON_MASK,
1305 pclk_div << PMU_PCLK_DIV_CON_SHIFT);
1306}
Kever Yang5ae2fd92017-02-13 17:38:56 +08001307#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001308
1309static int rk3399_pmuclk_probe(struct udevice *dev)
1310{
Philipp Tomsich61dff332017-03-24 19:24:24 +01001311#if CONFIG_IS_ENABLED(OF_PLATDATA) || !defined(CONFIG_SPL_BUILD)
Kever Yang5e79f442016-08-12 17:47:15 +08001312 struct rk3399_pmuclk_priv *priv = dev_get_priv(dev);
Philipp Tomsich61dff332017-03-24 19:24:24 +01001313#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001314
Kever Yang5ae2fd92017-02-13 17:38:56 +08001315#if CONFIG_IS_ENABLED(OF_PLATDATA)
1316 struct rk3399_pmuclk_plat *plat = dev_get_platdata(dev);
Kever Yang5e79f442016-08-12 17:47:15 +08001317
Simon Glassc20ee0e2017-08-29 14:15:50 -06001318 priv->pmucru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001319#endif
1320
1321#ifndef CONFIG_SPL_BUILD
1322 pmuclk_init(priv->pmucru);
1323#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001324 return 0;
1325}
1326
1327static int rk3399_pmuclk_ofdata_to_platdata(struct udevice *dev)
1328{
Kever Yang5ae2fd92017-02-13 17:38:56 +08001329#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Kever Yang5e79f442016-08-12 17:47:15 +08001330 struct rk3399_pmuclk_priv *priv = dev_get_priv(dev);
1331
Philipp Tomsich75c78592017-09-12 17:32:24 +02001332 priv->pmucru = dev_read_addr_ptr(dev);
Kever Yang5ae2fd92017-02-13 17:38:56 +08001333#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001334 return 0;
1335}
1336
Elaine Zhang538f67c2017-12-19 18:22:38 +08001337static int rk3399_pmuclk_bind(struct udevice *dev)
1338{
1339#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
1340 int ret;
1341
1342 ret = offsetof(struct rk3399_pmucru, pmucru_softrst_con[0]);
1343 ret = rockchip_reset_bind(dev, ret, 2);
1344 if (ret)
1345 debug("Warning: software reset driver bind faile\n");
1346#endif
1347 return 0;
1348}
1349
Kever Yang5e79f442016-08-12 17:47:15 +08001350static const struct udevice_id rk3399_pmuclk_ids[] = {
1351 { .compatible = "rockchip,rk3399-pmucru" },
1352 { }
1353};
1354
Simon Glassc8a6bc92016-10-01 20:04:51 -06001355U_BOOT_DRIVER(rockchip_rk3399_pmuclk) = {
Kever Yang5ae2fd92017-02-13 17:38:56 +08001356 .name = "rockchip_rk3399_pmucru",
Kever Yang5e79f442016-08-12 17:47:15 +08001357 .id = UCLASS_CLK,
1358 .of_match = rk3399_pmuclk_ids,
1359 .priv_auto_alloc_size = sizeof(struct rk3399_pmuclk_priv),
1360 .ofdata_to_platdata = rk3399_pmuclk_ofdata_to_platdata,
1361 .ops = &rk3399_pmuclk_ops,
1362 .probe = rk3399_pmuclk_probe,
Elaine Zhang538f67c2017-12-19 18:22:38 +08001363 .bind = rk3399_pmuclk_bind,
Kever Yang5ae2fd92017-02-13 17:38:56 +08001364#if CONFIG_IS_ENABLED(OF_PLATDATA)
1365 .platdata_auto_alloc_size = sizeof(struct rk3399_pmuclk_plat),
1366#endif
Kever Yang5e79f442016-08-12 17:47:15 +08001367};