blob: 3845e0730978e602e6df83e317c8fe1a2ab0e846 [file] [log] [blame]
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +01001/*
2 * Copyright (C) 2017 Weidmüller Interface GmbH & Co. KG
3 * Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
4 *
5 * Copyright (C) 2013 Soren Brinkmann <soren.brinkmann@xilinx.com>
6 * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
7 *
8 * SPDX-License-Identifier: GPL-2.0+
9 */
10
11#include <common.h>
12#include <clk-uclass.h>
13#include <dm.h>
14#include <dm/lists.h>
15#include <errno.h>
16#include <asm/io.h>
17#include <asm/arch/clk.h>
18#include <asm/arch/hardware.h>
19#include <asm/arch/sys_proto.h>
20
21/* Register bitfield defines */
22#define PLLCTRL_FBDIV_MASK 0x7f000
23#define PLLCTRL_FBDIV_SHIFT 12
24#define PLLCTRL_BPFORCE_MASK (1 << 4)
25#define PLLCTRL_PWRDWN_MASK 2
26#define PLLCTRL_PWRDWN_SHIFT 1
27#define PLLCTRL_RESET_MASK 1
28#define PLLCTRL_RESET_SHIFT 0
29
30#define ZYNQ_CLK_MAXDIV 0x3f
31#define CLK_CTRL_DIV1_SHIFT 20
32#define CLK_CTRL_DIV1_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV1_SHIFT)
33#define CLK_CTRL_DIV0_SHIFT 8
34#define CLK_CTRL_DIV0_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV0_SHIFT)
35#define CLK_CTRL_SRCSEL_SHIFT 4
36#define CLK_CTRL_SRCSEL_MASK (0x3 << CLK_CTRL_SRCSEL_SHIFT)
37
38#define CLK_CTRL_DIV2X_SHIFT 26
39#define CLK_CTRL_DIV2X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV2X_SHIFT)
40#define CLK_CTRL_DIV3X_SHIFT 20
41#define CLK_CTRL_DIV3X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV3X_SHIFT)
42
43DECLARE_GLOBAL_DATA_PTR;
44
45#ifndef CONFIG_SPL_BUILD
46enum zynq_clk_rclk {mio_clk, emio_clk};
47#endif
48
49struct zynq_clk_priv {
50 ulong ps_clk_freq;
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +010051#ifndef CONFIG_SPL_BUILD
52 struct clk gem_emio_clk[2];
53#endif
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +010054};
55
56static void *zynq_clk_get_register(enum zynq_clk id)
57{
58 switch (id) {
59 case armpll_clk:
60 return &slcr_base->arm_pll_ctrl;
61 case ddrpll_clk:
62 return &slcr_base->ddr_pll_ctrl;
63 case iopll_clk:
64 return &slcr_base->io_pll_ctrl;
65 case lqspi_clk:
66 return &slcr_base->lqspi_clk_ctrl;
67 case smc_clk:
68 return &slcr_base->smc_clk_ctrl;
69 case pcap_clk:
70 return &slcr_base->pcap_clk_ctrl;
71 case sdio0_clk ... sdio1_clk:
72 return &slcr_base->sdio_clk_ctrl;
73 case uart0_clk ... uart1_clk:
74 return &slcr_base->uart_clk_ctrl;
75 case spi0_clk ... spi1_clk:
76 return &slcr_base->spi_clk_ctrl;
77#ifndef CONFIG_SPL_BUILD
78 case dci_clk:
79 return &slcr_base->dci_clk_ctrl;
80 case gem0_clk:
81 return &slcr_base->gem0_clk_ctrl;
82 case gem1_clk:
83 return &slcr_base->gem1_clk_ctrl;
84 case fclk0_clk:
85 return &slcr_base->fpga0_clk_ctrl;
86 case fclk1_clk:
87 return &slcr_base->fpga1_clk_ctrl;
88 case fclk2_clk:
89 return &slcr_base->fpga2_clk_ctrl;
90 case fclk3_clk:
91 return &slcr_base->fpga3_clk_ctrl;
92 case can0_clk ... can1_clk:
93 return &slcr_base->can_clk_ctrl;
94 case dbg_trc_clk ... dbg_apb_clk:
95 /* fall through */
96#endif
97 default:
98 return &slcr_base->dbg_clk_ctrl;
99 }
100}
101
102static enum zynq_clk zynq_clk_get_cpu_pll(u32 clk_ctrl)
103{
104 u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
105
106 switch (srcsel) {
107 case 2:
108 return ddrpll_clk;
109 case 3:
110 return iopll_clk;
111 case 0 ... 1:
112 default:
113 return armpll_clk;
114 }
115}
116
117static enum zynq_clk zynq_clk_get_peripheral_pll(u32 clk_ctrl)
118{
119 u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
120
121 switch (srcsel) {
122 case 2:
123 return armpll_clk;
124 case 3:
125 return ddrpll_clk;
126 case 0 ... 1:
127 default:
128 return iopll_clk;
129 }
130}
131
132static ulong zynq_clk_get_pll_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
133{
134 u32 clk_ctrl, reset, pwrdwn, mul, bypass;
135
136 clk_ctrl = readl(zynq_clk_get_register(id));
137
138 reset = (clk_ctrl & PLLCTRL_RESET_MASK) >> PLLCTRL_RESET_SHIFT;
139 pwrdwn = (clk_ctrl & PLLCTRL_PWRDWN_MASK) >> PLLCTRL_PWRDWN_SHIFT;
140 if (reset || pwrdwn)
141 return 0;
142
143 bypass = clk_ctrl & PLLCTRL_BPFORCE_MASK;
144 if (bypass)
145 mul = 1;
146 else
147 mul = (clk_ctrl & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT;
148
149 return priv->ps_clk_freq * mul;
150}
151
152#ifndef CONFIG_SPL_BUILD
153static enum zynq_clk_rclk zynq_clk_get_gem_rclk(enum zynq_clk id)
154{
155 u32 clk_ctrl, srcsel;
156
157 if (id == gem0_clk)
158 clk_ctrl = readl(&slcr_base->gem0_rclk_ctrl);
159 else
160 clk_ctrl = readl(&slcr_base->gem1_rclk_ctrl);
161
162 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
163 if (srcsel)
164 return emio_clk;
165 else
166 return mio_clk;
167}
168#endif
169
170static ulong zynq_clk_get_cpu_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
171{
172 u32 clk_621, clk_ctrl, div;
173 enum zynq_clk pll;
174
175 clk_ctrl = readl(&slcr_base->arm_clk_ctrl);
176
177 div = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
178
179 switch (id) {
180 case cpu_1x_clk:
181 div *= 2;
182 /* fall through */
183 case cpu_2x_clk:
184 clk_621 = readl(&slcr_base->clk_621_true) & 1;
185 div *= 2 + clk_621;
186 break;
187 case cpu_3or2x_clk:
188 div *= 2;
189 /* fall through */
190 case cpu_6or4x_clk:
191 break;
192 default:
193 return 0;
194 }
195
196 pll = zynq_clk_get_cpu_pll(clk_ctrl);
197
198 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, pll), div);
199}
200
201#ifndef CONFIG_SPL_BUILD
202static ulong zynq_clk_get_ddr2x_rate(struct zynq_clk_priv *priv)
203{
204 u32 clk_ctrl, div;
205
206 clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
207
208 div = (clk_ctrl & CLK_CTRL_DIV2X_MASK) >> CLK_CTRL_DIV2X_SHIFT;
209
210 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
211}
212#endif
213
214static ulong zynq_clk_get_ddr3x_rate(struct zynq_clk_priv *priv)
215{
216 u32 clk_ctrl, div;
217
218 clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
219
220 div = (clk_ctrl & CLK_CTRL_DIV3X_MASK) >> CLK_CTRL_DIV3X_SHIFT;
221
222 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
223}
224
225#ifndef CONFIG_SPL_BUILD
226static ulong zynq_clk_get_dci_rate(struct zynq_clk_priv *priv)
227{
228 u32 clk_ctrl, div0, div1;
229
230 clk_ctrl = readl(&slcr_base->dci_clk_ctrl);
231
232 div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
233 div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
234
235 return DIV_ROUND_CLOSEST(DIV_ROUND_CLOSEST(
236 zynq_clk_get_pll_rate(priv, ddrpll_clk), div0), div1);
237}
238#endif
239
240static ulong zynq_clk_get_peripheral_rate(struct zynq_clk_priv *priv,
241 enum zynq_clk id, bool two_divs)
242{
243 enum zynq_clk pll;
244 u32 clk_ctrl, div0;
245 u32 div1 = 1;
246
247 clk_ctrl = readl(zynq_clk_get_register(id));
248
249 div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
250 if (!div0)
251 div0 = 1;
252
253#ifndef CONFIG_SPL_BUILD
254 if (two_divs) {
255 div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
256 if (!div1)
257 div1 = 1;
258 }
259#endif
260
261 pll = zynq_clk_get_peripheral_pll(clk_ctrl);
262
263 return
264 DIV_ROUND_CLOSEST(
265 DIV_ROUND_CLOSEST(
266 zynq_clk_get_pll_rate(priv, pll), div0),
267 div1);
268}
269
270#ifndef CONFIG_SPL_BUILD
271static ulong zynq_clk_get_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
272{
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100273 struct clk *parent;
274
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100275 if (zynq_clk_get_gem_rclk(id) == mio_clk)
276 return zynq_clk_get_peripheral_rate(priv, id, true);
277
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100278 parent = &priv->gem_emio_clk[id - gem0_clk];
279 if (parent->dev)
280 return clk_get_rate(parent);
281
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100282 debug("%s: gem%d emio rx clock source unknown\n", __func__,
283 id - gem0_clk);
284
285 return -ENOSYS;
286}
287
288static unsigned long zynq_clk_calc_peripheral_two_divs(ulong rate,
289 ulong pll_rate,
290 u32 *div0, u32 *div1)
291{
292 long new_err, best_err = (long)(~0UL >> 1);
293 ulong new_rate, best_rate = 0;
294 u32 d0, d1;
295
296 for (d0 = 1; d0 <= ZYNQ_CLK_MAXDIV; d0++) {
297 for (d1 = 1; d1 <= ZYNQ_CLK_MAXDIV >> 1; d1++) {
298 new_rate = DIV_ROUND_CLOSEST(
299 DIV_ROUND_CLOSEST(pll_rate, d0), d1);
300 new_err = abs(new_rate - rate);
301
302 if (new_err < best_err) {
303 *div0 = d0;
304 *div1 = d1;
305 best_err = new_err;
306 best_rate = new_rate;
307 }
308 }
309 }
310
311 return best_rate;
312}
313
314static ulong zynq_clk_set_peripheral_rate(struct zynq_clk_priv *priv,
315 enum zynq_clk id, ulong rate,
316 bool two_divs)
317{
318 enum zynq_clk pll;
319 u32 clk_ctrl, div0 = 0, div1 = 0;
320 ulong pll_rate, new_rate;
321 u32 *reg;
322
323 reg = zynq_clk_get_register(id);
324 clk_ctrl = readl(reg);
325
326 pll = zynq_clk_get_peripheral_pll(clk_ctrl);
327 pll_rate = zynq_clk_get_pll_rate(priv, pll);
328 clk_ctrl &= ~CLK_CTRL_DIV0_MASK;
329 if (two_divs) {
330 clk_ctrl &= ~CLK_CTRL_DIV1_MASK;
331 new_rate = zynq_clk_calc_peripheral_two_divs(rate, pll_rate,
332 &div0, &div1);
333 clk_ctrl |= div1 << CLK_CTRL_DIV1_SHIFT;
334 } else {
335 div0 = DIV_ROUND_CLOSEST(pll_rate, rate);
336 if (div0 > ZYNQ_CLK_MAXDIV)
337 div0 = ZYNQ_CLK_MAXDIV;
338 new_rate = DIV_ROUND_CLOSEST(rate, div0);
339 }
340 clk_ctrl |= div0 << CLK_CTRL_DIV0_SHIFT;
341
342 zynq_slcr_unlock();
343 writel(clk_ctrl, reg);
344 zynq_slcr_lock();
345
346 return new_rate;
347}
348
349static ulong zynq_clk_set_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id,
350 ulong rate)
351{
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100352 struct clk *parent;
353
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100354 if (zynq_clk_get_gem_rclk(id) == mio_clk)
355 return zynq_clk_set_peripheral_rate(priv, id, rate, true);
356
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100357 parent = &priv->gem_emio_clk[id - gem0_clk];
358 if (parent->dev)
359 return clk_set_rate(parent, rate);
360
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100361 debug("%s: gem%d emio rx clock source unknown\n", __func__,
362 id - gem0_clk);
363
364 return -ENOSYS;
365}
366#endif
367
368#ifndef CONFIG_SPL_BUILD
369static ulong zynq_clk_get_rate(struct clk *clk)
370{
371 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
372 enum zynq_clk id = clk->id;
373 bool two_divs = false;
374
375 switch (id) {
376 case armpll_clk ... iopll_clk:
377 return zynq_clk_get_pll_rate(priv, id);
378 case cpu_6or4x_clk ... cpu_1x_clk:
379 return zynq_clk_get_cpu_rate(priv, id);
380 case ddr2x_clk:
381 return zynq_clk_get_ddr2x_rate(priv);
382 case ddr3x_clk:
383 return zynq_clk_get_ddr3x_rate(priv);
384 case dci_clk:
385 return zynq_clk_get_dci_rate(priv);
386 case gem0_clk ... gem1_clk:
387 return zynq_clk_get_gem_rate(priv, id);
388 case fclk0_clk ... can1_clk:
389 two_divs = true;
390 /* fall through */
391 case dbg_trc_clk ... dbg_apb_clk:
392 case lqspi_clk ... pcap_clk:
393 case sdio0_clk ... spi1_clk:
394 return zynq_clk_get_peripheral_rate(priv, id, two_divs);
395 case dma_clk:
396 return zynq_clk_get_cpu_rate(priv, cpu_2x_clk);
Michal Simek58afff42018-02-21 15:06:20 +0100397 case usb0_aper_clk ... swdt_clk:
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100398 return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
399 default:
400 return -ENXIO;
401 }
402}
403
404static ulong zynq_clk_set_rate(struct clk *clk, ulong rate)
405{
406 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
407 enum zynq_clk id = clk->id;
408 bool two_divs = false;
409
410 switch (id) {
411 case gem0_clk ... gem1_clk:
412 return zynq_clk_set_gem_rate(priv, id, rate);
413 case fclk0_clk ... can1_clk:
414 two_divs = true;
415 /* fall through */
416 case lqspi_clk ... pcap_clk:
417 case sdio0_clk ... spi1_clk:
418 case dbg_trc_clk ... dbg_apb_clk:
419 return zynq_clk_set_peripheral_rate(priv, id, rate, two_divs);
420 default:
421 return -ENXIO;
422 }
423}
424#else
425static ulong zynq_clk_get_rate(struct clk *clk)
426{
427 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
428 enum zynq_clk id = clk->id;
429
430 switch (id) {
431 case cpu_6or4x_clk ... cpu_1x_clk:
432 return zynq_clk_get_cpu_rate(priv, id);
433 case ddr3x_clk:
434 return zynq_clk_get_ddr3x_rate(priv);
435 case lqspi_clk ... pcap_clk:
436 case sdio0_clk ... spi1_clk:
437 return zynq_clk_get_peripheral_rate(priv, id, 0);
438 default:
439 return -ENXIO;
440 }
441}
442#endif
443
444static struct clk_ops zynq_clk_ops = {
445 .get_rate = zynq_clk_get_rate,
446#ifndef CONFIG_SPL_BUILD
447 .set_rate = zynq_clk_set_rate,
448#endif
449};
450
451static int zynq_clk_probe(struct udevice *dev)
452{
453 struct zynq_clk_priv *priv = dev_get_priv(dev);
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100454#ifndef CONFIG_SPL_BUILD
455 unsigned int i;
456 char name[16];
457 int ret;
458
459 for (i = 0; i < 2; i++) {
460 sprintf(name, "gem%d_emio_clk", i);
461 ret = clk_get_by_name(dev, name, &priv->gem_emio_clk[i]);
Simon Glassaa9bb092017-05-30 21:47:29 -0600462 if (ret < 0 && ret != -ENODATA) {
Stefan Herbrechtsmeier9bb803d2017-01-17 16:27:31 +0100463 dev_err(dev, "failed to get %s clock\n", name);
464 return ret;
465 }
466 }
467#endif
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100468
Simon Glassda409cc2017-05-17 17:18:09 -0600469 priv->ps_clk_freq = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev),
Stefan Herbrechtsmeier3a64b252017-01-17 16:27:29 +0100470 "ps-clk-frequency", 33333333UL);
471
472 return 0;
473}
474
475static const struct udevice_id zynq_clk_ids[] = {
476 { .compatible = "xlnx,ps7-clkc"},
477 {}
478};
479
480U_BOOT_DRIVER(zynq_clk) = {
481 .name = "zynq_clk",
482 .id = UCLASS_CLK,
483 .of_match = zynq_clk_ids,
484 .flags = DM_FLAG_PRE_RELOC,
485 .ops = &zynq_clk_ops,
486 .priv_auto_alloc_size = sizeof(struct zynq_clk_priv),
487 .probe = zynq_clk_probe,
488};