blob: 3661769748f20532fec6e1719b3770a995828ad4 [file] [log] [blame]
Andy Yand1dcf852017-05-15 17:49:56 +08001/*
2 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
3 * Author: Andy Yan <andy.yan@rock-chips.com>
Philipp Tomsichddfe77d2017-06-22 23:47:11 +02004 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Andy Yand1dcf852017-05-15 17:49:56 +08005 * SPDX-License-Identifier: GPL-2.0
6 */
7
8#include <common.h>
9#include <clk-uclass.h>
10#include <dm.h>
Philipp Tomsichbee61802017-06-22 23:51:37 +020011#include <dt-structs.h>
Andy Yand1dcf852017-05-15 17:49:56 +080012#include <errno.h>
Philipp Tomsichbee61802017-06-22 23:51:37 +020013#include <mapmem.h>
Andy Yand1dcf852017-05-15 17:49:56 +080014#include <syscon.h>
David Wu615514c2017-09-20 14:37:50 +080015#include <bitfield.h>
Andy Yand1dcf852017-05-15 17:49:56 +080016#include <asm/arch/clock.h>
17#include <asm/arch/cru_rk3368.h>
18#include <asm/arch/hardware.h>
19#include <asm/io.h>
20#include <dm/lists.h>
21#include <dt-bindings/clock/rk3368-cru.h>
22
23DECLARE_GLOBAL_DATA_PTR;
24
Philipp Tomsichbee61802017-06-22 23:51:37 +020025#if CONFIG_IS_ENABLED(OF_PLATDATA)
26struct rk3368_clk_plat {
27 struct dtd_rockchip_rk3368_cru dtd;
28};
29#endif
30
Andy Yand1dcf852017-05-15 17:49:56 +080031struct pll_div {
32 u32 nr;
33 u32 nf;
34 u32 no;
35};
36
37#define OSC_HZ (24 * 1000 * 1000)
38#define APLL_L_HZ (800 * 1000 * 1000)
39#define APLL_B_HZ (816 * 1000 * 1000)
40#define GPLL_HZ (576 * 1000 * 1000)
41#define CPLL_HZ (400 * 1000 * 1000)
42
Andy Yand1dcf852017-05-15 17:49:56 +080043#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
44
45#define PLL_DIVISORS(hz, _nr, _no) { \
46 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no}; \
47 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
48 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL " \
49 "divisors on line " __stringify(__LINE__));
50
Philipp Tomsich4bebf942017-06-22 23:53:44 +020051#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yand1dcf852017-05-15 17:49:56 +080052static const struct pll_div apll_l_init_cfg = PLL_DIVISORS(APLL_L_HZ, 12, 2);
53static const struct pll_div apll_b_init_cfg = PLL_DIVISORS(APLL_B_HZ, 1, 2);
Philipp Tomsich4bebf942017-06-22 23:53:44 +020054#if !defined(CONFIG_TPL_BUILD)
Andy Yand1dcf852017-05-15 17:49:56 +080055static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 1, 2);
56static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 6);
Philipp Tomsich4bebf942017-06-22 23:53:44 +020057#endif
58#endif
Andy Yand1dcf852017-05-15 17:49:56 +080059
Philipp Tomsichf5a43292017-07-04 14:49:38 +020060static ulong rk3368_clk_get_rate(struct clk *clk);
61
Andy Yand1dcf852017-05-15 17:49:56 +080062/* Get pll rate by id */
63static uint32_t rkclk_pll_get_rate(struct rk3368_cru *cru,
64 enum rk3368_pll_id pll_id)
65{
66 uint32_t nr, no, nf;
67 uint32_t con;
68 struct rk3368_pll *pll = &cru->pll[pll_id];
69
70 con = readl(&pll->con3);
71
72 switch ((con & PLL_MODE_MASK) >> PLL_MODE_SHIFT) {
73 case PLL_MODE_SLOW:
74 return OSC_HZ;
75 case PLL_MODE_NORMAL:
76 con = readl(&pll->con0);
77 no = ((con & PLL_OD_MASK) >> PLL_OD_SHIFT) + 1;
78 nr = ((con & PLL_NR_MASK) >> PLL_NR_SHIFT) + 1;
79 con = readl(&pll->con1);
80 nf = ((con & PLL_NF_MASK) >> PLL_NF_SHIFT) + 1;
81
82 return (24 * nf / (nr * no)) * 1000000;
83 case PLL_MODE_DEEP_SLOW:
84 default:
85 return 32768;
86 }
87}
88
Philipp Tomsich4bebf942017-06-22 23:53:44 +020089#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yand1dcf852017-05-15 17:49:56 +080090static int rkclk_set_pll(struct rk3368_cru *cru, enum rk3368_pll_id pll_id,
Philipp Tomsichddfe77d2017-06-22 23:47:11 +020091 const struct pll_div *div)
Andy Yand1dcf852017-05-15 17:49:56 +080092{
93 struct rk3368_pll *pll = &cru->pll[pll_id];
94 /* All PLLs have same VCO and output frequency range restrictions*/
95 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
96 uint output_hz = vco_hz / div->no;
97
98 debug("PLL at %p: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
99 pll, div->nf, div->nr, div->no, vco_hz, output_hz);
100
101 /* enter slow mode and reset pll */
102 rk_clrsetreg(&pll->con3, PLL_MODE_MASK | PLL_RESET_MASK,
103 PLL_RESET << PLL_RESET_SHIFT);
104
105 rk_clrsetreg(&pll->con0, PLL_NR_MASK | PLL_OD_MASK,
106 ((div->nr - 1) << PLL_NR_SHIFT) |
107 ((div->no - 1) << PLL_OD_SHIFT));
108 writel((div->nf - 1) << PLL_NF_SHIFT, &pll->con1);
Philipp Tomsichddfe77d2017-06-22 23:47:11 +0200109 /*
110 * BWADJ should be set to NF / 2 to ensure the nominal bandwidth.
111 * Compare the RK3368 TRM, section "3.6.4 PLL Bandwidth Adjustment".
112 */
113 clrsetbits_le32(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
114
Andy Yand1dcf852017-05-15 17:49:56 +0800115 udelay(10);
116
117 /* return from reset */
118 rk_clrreg(&pll->con3, PLL_RESET_MASK);
119
120 /* waiting for pll lock */
121 while (!(readl(&pll->con1) & PLL_LOCK_STA))
122 udelay(1);
123
124 rk_clrsetreg(&pll->con3, PLL_MODE_MASK,
125 PLL_MODE_NORMAL << PLL_MODE_SHIFT);
126
127 return 0;
128}
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200129#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800130
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200131#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yand1dcf852017-05-15 17:49:56 +0800132static void rkclk_init(struct rk3368_cru *cru)
133{
134 u32 apllb, aplll, dpll, cpll, gpll;
135
Philipp Tomsichddfe77d2017-06-22 23:47:11 +0200136 rkclk_set_pll(cru, APLLB, &apll_b_init_cfg);
137 rkclk_set_pll(cru, APLLL, &apll_l_init_cfg);
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200138#if !defined(CONFIG_TPL_BUILD)
139 /*
140 * If we plan to return to the boot ROM, we can't increase the
141 * GPLL rate from the SPL stage.
142 */
Philipp Tomsichddfe77d2017-06-22 23:47:11 +0200143 rkclk_set_pll(cru, GPLL, &gpll_init_cfg);
144 rkclk_set_pll(cru, CPLL, &cpll_init_cfg);
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200145#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800146
147 apllb = rkclk_pll_get_rate(cru, APLLB);
148 aplll = rkclk_pll_get_rate(cru, APLLL);
149 dpll = rkclk_pll_get_rate(cru, DPLL);
150 cpll = rkclk_pll_get_rate(cru, CPLL);
151 gpll = rkclk_pll_get_rate(cru, GPLL);
152
153 debug("%s apllb(%d) apll(%d) dpll(%d) cpll(%d) gpll(%d)\n",
154 __func__, apllb, aplll, dpll, cpll, gpll);
155}
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200156#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800157
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200158#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
Andy Yand1dcf852017-05-15 17:49:56 +0800159static ulong rk3368_mmc_get_clk(struct rk3368_cru *cru, uint clk_id)
160{
161 u32 div, con, con_id, rate;
162 u32 pll_rate;
163
164 switch (clk_id) {
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200165 case HCLK_SDMMC:
Andy Yand1dcf852017-05-15 17:49:56 +0800166 con_id = 50;
167 break;
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200168 case HCLK_EMMC:
Andy Yand1dcf852017-05-15 17:49:56 +0800169 con_id = 51;
170 break;
171 case SCLK_SDIO0:
172 con_id = 48;
173 break;
174 default:
175 return -EINVAL;
176 }
177
178 con = readl(&cru->clksel_con[con_id]);
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200179 switch (con & MMC_PLL_SEL_MASK) {
Andy Yand1dcf852017-05-15 17:49:56 +0800180 case MMC_PLL_SEL_GPLL:
181 pll_rate = rkclk_pll_get_rate(cru, GPLL);
182 break;
183 case MMC_PLL_SEL_24M:
184 pll_rate = OSC_HZ;
185 break;
186 case MMC_PLL_SEL_CPLL:
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200187 pll_rate = rkclk_pll_get_rate(cru, CPLL);
188 break;
Andy Yand1dcf852017-05-15 17:49:56 +0800189 case MMC_PLL_SEL_USBPHY_480M:
190 default:
191 return -EINVAL;
192 }
193 div = (con & MMC_CLK_DIV_MASK) >> MMC_CLK_DIV_SHIFT;
194 rate = DIV_TO_RATE(pll_rate, div);
195
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200196 debug("%s: raw rate %d (post-divide by 2)\n", __func__, rate);
Andy Yand1dcf852017-05-15 17:49:56 +0800197 return rate >> 1;
198}
199
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200200static ulong rk3368_mmc_find_best_rate_and_parent(struct clk *clk,
201 ulong rate,
202 u32 *best_mux,
203 u32 *best_div)
Andy Yand1dcf852017-05-15 17:49:56 +0800204{
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200205 int i;
206 ulong best_rate = 0;
207 const ulong MHz = 1000000;
208 const struct {
209 u32 mux;
210 ulong rate;
211 } parents[] = {
212 { .mux = MMC_PLL_SEL_CPLL, .rate = CPLL_HZ },
213 { .mux = MMC_PLL_SEL_GPLL, .rate = GPLL_HZ },
214 { .mux = MMC_PLL_SEL_24M, .rate = 24 * MHz }
215 };
Andy Yand1dcf852017-05-15 17:49:56 +0800216
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200217 debug("%s: target rate %ld\n", __func__, rate);
218 for (i = 0; i < ARRAY_SIZE(parents); ++i) {
219 /*
220 * Find the largest rate no larger than the target-rate for
221 * the current parent.
222 */
223 ulong parent_rate = parents[i].rate;
224 u32 div = DIV_ROUND_UP(parent_rate, rate);
225 u32 adj_div = div;
226 ulong new_rate = parent_rate / adj_div;
227
228 debug("%s: rate %ld, parent-mux %d, parent-rate %ld, div %d\n",
229 __func__, rate, parents[i].mux, parents[i].rate, div);
230
231 /* Skip, if not representable */
232 if ((div - 1) > MMC_CLK_DIV_MASK)
233 continue;
234
235 /* Skip, if we already have a better (or equal) solution */
236 if (new_rate <= best_rate)
237 continue;
238
239 /* This is our new best rate. */
240 best_rate = new_rate;
241 *best_mux = parents[i].mux;
242 *best_div = div - 1;
243 }
244
245 debug("%s: best_mux = %x, best_div = %d, best_rate = %ld\n",
246 __func__, *best_mux, *best_div, best_rate);
247
248 return best_rate;
249}
250
251static ulong rk3368_mmc_set_clk(struct clk *clk, ulong rate)
252{
253 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
254 struct rk3368_cru *cru = priv->cru;
255 ulong clk_id = clk->id;
256 u32 con_id, mux = 0, div = 0;
257
258 /* Find the best parent and rate */
259 rk3368_mmc_find_best_rate_and_parent(clk, rate << 1, &mux, &div);
Andy Yand1dcf852017-05-15 17:49:56 +0800260
261 switch (clk_id) {
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200262 case HCLK_SDMMC:
Andy Yand1dcf852017-05-15 17:49:56 +0800263 con_id = 50;
264 break;
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200265 case HCLK_EMMC:
Andy Yand1dcf852017-05-15 17:49:56 +0800266 con_id = 51;
267 break;
268 case SCLK_SDIO0:
269 con_id = 48;
270 break;
271 default:
272 return -EINVAL;
273 }
274
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200275 rk_clrsetreg(&cru->clksel_con[con_id],
276 MMC_PLL_SEL_MASK | MMC_CLK_DIV_MASK,
277 mux | div);
Andy Yand1dcf852017-05-15 17:49:56 +0800278
279 return rk3368_mmc_get_clk(cru, clk_id);
280}
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200281#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800282
Philipp Tomsich62924692017-07-05 11:55:23 +0200283#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsicha00dfa02017-06-23 00:01:10 +0200284static ulong rk3368_ddr_set_clk(struct rk3368_cru *cru, ulong set_rate)
285{
286 const struct pll_div *dpll_cfg = NULL;
287 const ulong MHz = 1000000;
288
289 /* Fout = ((Fin /NR) * NF )/ NO */
Philipp Tomsich62924692017-07-05 11:55:23 +0200290 static const struct pll_div dpll_1200 = PLL_DIVISORS(1200 * MHz, 1, 1);
291 static const struct pll_div dpll_1332 = PLL_DIVISORS(1332 * MHz, 2, 1);
292 static const struct pll_div dpll_1600 = PLL_DIVISORS(1600 * MHz, 3, 2);
Philipp Tomsicha00dfa02017-06-23 00:01:10 +0200293
294 switch (set_rate) {
295 case 1200*MHz:
296 dpll_cfg = &dpll_1200;
297 break;
298 case 1332*MHz:
299 dpll_cfg = &dpll_1332;
300 break;
301 case 1600*MHz:
302 dpll_cfg = &dpll_1600;
303 break;
304 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900305 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Philipp Tomsicha00dfa02017-06-23 00:01:10 +0200306 }
307 rkclk_set_pll(cru, DPLL, dpll_cfg);
308
309 return set_rate;
310}
Philipp Tomsich62924692017-07-05 11:55:23 +0200311#endif
Philipp Tomsicha00dfa02017-06-23 00:01:10 +0200312
Philipp Tomsichdf0ae002017-07-14 19:57:39 +0200313#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
314static ulong rk3368_gmac_set_clk(struct rk3368_cru *cru,
315 ulong clk_id, ulong set_rate)
316{
317 /*
318 * This models the 'assigned-clock-parents = <&ext_gmac>' from
319 * the DTS and switches to the 'ext_gmac' clock parent.
320 */
321 rk_setreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
322 return set_rate;
323}
324#endif
325
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200326/*
327 * RK3368 SPI clocks have a common divider-width (7 bits) and a single bit
328 * to select either CPLL or GPLL as the clock-parent. The location within
329 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
330 */
331
332struct spi_clkreg {
333 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
334 uint8_t div_shift;
335 uint8_t sel_shift;
336};
337
338/*
339 * The entries are numbered relative to their offset from SCLK_SPI0.
340 */
341static const struct spi_clkreg spi_clkregs[] = {
342 [0] = { .reg = 45, .div_shift = 0, .sel_shift = 7, },
343 [1] = { .reg = 45, .div_shift = 8, .sel_shift = 15, },
344 [2] = { .reg = 46, .div_shift = 8, .sel_shift = 15, },
345};
346
347static inline u32 extract_bits(u32 val, unsigned width, unsigned shift)
348{
349 return (val >> shift) & ((1 << width) - 1);
350}
351
352static ulong rk3368_spi_get_clk(struct rk3368_cru *cru, ulong clk_id)
353{
354 const struct spi_clkreg *spiclk = NULL;
355 u32 div, val;
356
357 switch (clk_id) {
358 case SCLK_SPI0 ... SCLK_SPI2:
359 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
360 break;
361
362 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900363 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200364 return -EINVAL;
365 }
366
367 val = readl(&cru->clksel_con[spiclk->reg]);
368 div = extract_bits(val, 7, spiclk->div_shift);
369
370 debug("%s: div 0x%x\n", __func__, div);
371 return DIV_TO_RATE(GPLL_HZ, div);
372}
373
374static ulong rk3368_spi_set_clk(struct rk3368_cru *cru, ulong clk_id, uint hz)
375{
376 const struct spi_clkreg *spiclk = NULL;
377 int src_clk_div;
378
379 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz);
380 assert(src_clk_div < 127);
381
382 switch (clk_id) {
383 case SCLK_SPI0 ... SCLK_SPI2:
384 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
385 break;
386
387 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900388 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200389 return -EINVAL;
390 }
391
392 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
393 ((0x7f << spiclk->div_shift) |
394 (0x1 << spiclk->sel_shift)),
395 ((src_clk_div << spiclk->div_shift) |
396 (1 << spiclk->sel_shift)));
397
398 return rk3368_spi_get_clk(cru, clk_id);
399}
400
David Wu615514c2017-09-20 14:37:50 +0800401static ulong rk3368_saradc_get_clk(struct rk3368_cru *cru)
402{
403 u32 div, val;
404
405 val = readl(&cru->clksel_con[25]);
406 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
407 CLK_SARADC_DIV_CON_WIDTH);
408
409 return DIV_TO_RATE(OSC_HZ, div);
410}
411
412static ulong rk3368_saradc_set_clk(struct rk3368_cru *cru, uint hz)
413{
414 int src_clk_div;
415
416 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
417 assert(src_clk_div < 128);
418
419 rk_clrsetreg(&cru->clksel_con[25],
420 CLK_SARADC_DIV_CON_MASK,
421 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
422
423 return rk3368_saradc_get_clk(cru);
424}
425
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200426static ulong rk3368_clk_get_rate(struct clk *clk)
427{
428 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
429 ulong rate = 0;
430
431 debug("%s: id %ld\n", __func__, clk->id);
432 switch (clk->id) {
433 case PLL_CPLL:
434 rate = rkclk_pll_get_rate(priv->cru, CPLL);
435 break;
436 case PLL_GPLL:
437 rate = rkclk_pll_get_rate(priv->cru, GPLL);
438 break;
439 case SCLK_SPI0 ... SCLK_SPI2:
440 rate = rk3368_spi_get_clk(priv->cru, clk->id);
441 break;
442#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
443 case HCLK_SDMMC:
444 case HCLK_EMMC:
445 rate = rk3368_mmc_get_clk(priv->cru, clk->id);
446 break;
447#endif
David Wu615514c2017-09-20 14:37:50 +0800448 case SCLK_SARADC:
449 rate = rk3368_saradc_get_clk(priv->cru);
450 break;
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200451 default:
452 return -ENOENT;
453 }
454
455 return rate;
456}
457
Andy Yand1dcf852017-05-15 17:49:56 +0800458static ulong rk3368_clk_set_rate(struct clk *clk, ulong rate)
459{
Philipp Tomsich4e4c40d2017-07-05 12:11:58 +0200460 __maybe_unused struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
Andy Yand1dcf852017-05-15 17:49:56 +0800461 ulong ret = 0;
462
463 debug("%s id:%ld rate:%ld\n", __func__, clk->id, rate);
464 switch (clk->id) {
Philipp Tomsichcf8aceb2017-07-25 16:48:16 +0200465 case SCLK_SPI0 ... SCLK_SPI2:
466 ret = rk3368_spi_set_clk(priv->cru, clk->id, rate);
467 break;
Philipp Tomsich62924692017-07-05 11:55:23 +0200468#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsicha00dfa02017-06-23 00:01:10 +0200469 case CLK_DDR:
470 ret = rk3368_ddr_set_clk(priv->cru, rate);
471 break;
Philipp Tomsich62924692017-07-05 11:55:23 +0200472#endif
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200473#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
474 case HCLK_SDMMC:
475 case HCLK_EMMC:
476 ret = rk3368_mmc_set_clk(clk, rate);
477 break;
478#endif
Philipp Tomsichdf0ae002017-07-14 19:57:39 +0200479#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
Philipp Tomsichf5a43292017-07-04 14:49:38 +0200480 case SCLK_MAC:
Philipp Tomsichdf0ae002017-07-14 19:57:39 +0200481 /* select the external clock */
482 ret = rk3368_gmac_set_clk(priv->cru, clk->id, rate);
Andy Yand1dcf852017-05-15 17:49:56 +0800483 break;
Philipp Tomsichdf0ae002017-07-14 19:57:39 +0200484#endif
David Wu615514c2017-09-20 14:37:50 +0800485 case SCLK_SARADC:
486 ret = rk3368_saradc_set_clk(priv->cru, rate);
487 break;
Andy Yand1dcf852017-05-15 17:49:56 +0800488 default:
489 return -ENOENT;
490 }
491
492 return ret;
493}
494
495static struct clk_ops rk3368_clk_ops = {
496 .get_rate = rk3368_clk_get_rate,
497 .set_rate = rk3368_clk_set_rate,
498};
499
500static int rk3368_clk_probe(struct udevice *dev)
501{
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200502 struct rk3368_clk_priv __maybe_unused *priv = dev_get_priv(dev);
Philipp Tomsichbee61802017-06-22 23:51:37 +0200503#if CONFIG_IS_ENABLED(OF_PLATDATA)
504 struct rk3368_clk_plat *plat = dev_get_platdata(dev);
Andy Yand1dcf852017-05-15 17:49:56 +0800505
Simon Glassc20ee0e2017-08-29 14:15:50 -0600506 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Philipp Tomsichbee61802017-06-22 23:51:37 +0200507#endif
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200508#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yand1dcf852017-05-15 17:49:56 +0800509 rkclk_init(priv->cru);
Philipp Tomsich4bebf942017-06-22 23:53:44 +0200510#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800511
512 return 0;
513}
514
515static int rk3368_clk_ofdata_to_platdata(struct udevice *dev)
516{
Philipp Tomsichbee61802017-06-22 23:51:37 +0200517#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Andy Yand1dcf852017-05-15 17:49:56 +0800518 struct rk3368_clk_priv *priv = dev_get_priv(dev);
519
Philipp Tomsich9a342f42017-09-11 22:04:18 +0200520 priv->cru = dev_read_addr_ptr(dev);
Philipp Tomsichbee61802017-06-22 23:51:37 +0200521#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800522
523 return 0;
524}
525
526static int rk3368_clk_bind(struct udevice *dev)
527{
528 int ret;
529
530 /* The reset driver does not have a device node, so bind it here */
531 ret = device_bind_driver(gd->dm_root, "rk3368_sysreset", "reset", &dev);
532 if (ret)
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900533 pr_err("bind RK3368 reset driver failed: ret=%d\n", ret);
Andy Yand1dcf852017-05-15 17:49:56 +0800534
535 return ret;
536}
537
538static const struct udevice_id rk3368_clk_ids[] = {
539 { .compatible = "rockchip,rk3368-cru" },
540 { }
541};
542
543U_BOOT_DRIVER(rockchip_rk3368_cru) = {
544 .name = "rockchip_rk3368_cru",
545 .id = UCLASS_CLK,
546 .of_match = rk3368_clk_ids,
Philipp Tomsichcdc60802017-07-11 20:59:45 +0200547 .priv_auto_alloc_size = sizeof(struct rk3368_clk_priv),
Philipp Tomsichbee61802017-06-22 23:51:37 +0200548#if CONFIG_IS_ENABLED(OF_PLATDATA)
549 .platdata_auto_alloc_size = sizeof(struct rk3368_clk_plat),
550#endif
Andy Yand1dcf852017-05-15 17:49:56 +0800551 .ofdata_to_platdata = rk3368_clk_ofdata_to_platdata,
552 .ops = &rk3368_clk_ops,
553 .bind = rk3368_clk_bind,
554 .probe = rk3368_clk_probe,
555};