blob: c52537cedf3aa8b6a4c1920d7119ee50c5003720 [file] [log] [blame]
Ryder Lee0bd7dc72018-11-15 10:07:54 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek common clock driver
4 *
5 * Copyright (C) 2018 MediaTek Inc.
6 * Author: Ryder Lee <ryder.lee@mediatek.com>
7 */
8
9#include <common.h>
10#include <clk-uclass.h>
11#include <div64.h>
12#include <dm.h>
13#include <asm/io.h>
14
15#include "clk-mtk.h"
16
17#define REG_CON0 0
18#define REG_CON1 4
19
20#define CON0_BASE_EN BIT(0)
21#define CON0_PWR_ON BIT(0)
22#define CON0_ISO_EN BIT(1)
23#define CON1_PCW_CHG BIT(31)
24
25#define POSTDIV_MASK 0x7
26#define INTEGER_BITS 7
27
28/* scpsys clock off control */
29#define CLK_SCP_CFG0 0x200
30#define CLK_SCP_CFG1 0x204
31#define SCP_ARMCK_OFF_EN GENMASK(9, 0)
32#define SCP_AXICK_DCM_DIS_EN BIT(0)
33#define SCP_AXICK_26M_SEL_EN BIT(4)
34
35/* shared functions */
36
37/*
38 * In case the rate change propagation to parent clocks is undesirable,
39 * this function is recursively called to find the parent to calculate
40 * the accurate frequency.
41 */
Sam Shihd8588ba2020-01-10 16:30:30 +080042static ulong mtk_clk_find_parent_rate(struct clk *clk, int id,
Fabien Parent832685f2019-10-17 21:02:05 +020043 const struct driver *drv)
Ryder Lee0bd7dc72018-11-15 10:07:54 +080044{
45 struct clk parent = { .id = id, };
46
47 if (drv) {
48 struct udevice *dev;
49
50 if (uclass_get_device_by_driver(UCLASS_CLK, drv, &dev))
51 return -ENODEV;
52
53 parent.dev = dev;
54 } else {
55 parent.dev = clk->dev;
56 }
57
58 return clk_get_rate(&parent);
59}
60
61static int mtk_clk_mux_set_parent(void __iomem *base, u32 parent,
62 const struct mtk_composite *mux)
63{
64 u32 val, index = 0;
65
66 while (mux->parent[index] != parent)
67 if (++index == mux->num_parents)
68 return -EINVAL;
69
mingming leef62168d2019-12-31 11:29:21 +080070 if (mux->flags & CLK_MUX_SETCLR_UPD) {
71 val = (mux->mux_mask << mux->mux_shift);
72 writel(val, base + mux->mux_clr_reg);
Ryder Lee0bd7dc72018-11-15 10:07:54 +080073
mingming leef62168d2019-12-31 11:29:21 +080074 val = (index << mux->mux_shift);
75 writel(val, base + mux->mux_set_reg);
76
77 if (mux->upd_shift >= 0)
78 writel(BIT(mux->upd_shift), base + mux->upd_reg);
79 } else {
80 /* switch mux to a select parent */
81 val = readl(base + mux->mux_reg);
82 val &= ~(mux->mux_mask << mux->mux_shift);
83
84 val |= index << mux->mux_shift;
85 writel(val, base + mux->mux_reg);
86 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +080087
88 return 0;
89}
90
91/* apmixedsys functions */
92
93static unsigned long __mtk_pll_recalc_rate(const struct mtk_pll_data *pll,
94 u32 fin, u32 pcw, int postdiv)
95{
96 int pcwbits = pll->pcwbits;
97 int pcwfbits;
mingming lee0670adb2019-12-31 11:29:22 +080098 int ibits;
Ryder Lee0bd7dc72018-11-15 10:07:54 +080099 u64 vco;
100 u8 c = 0;
101
102 /* The fractional part of the PLL divider. */
mingming lee0670adb2019-12-31 11:29:22 +0800103 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
104 pcwfbits = pcwbits > ibits ? pcwbits - ibits : 0;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800105
106 vco = (u64)fin * pcw;
107
108 if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0)))
109 c = 1;
110
111 vco >>= pcwfbits;
112
113 if (c)
114 vco++;
115
116 return ((unsigned long)vco + postdiv - 1) / postdiv;
117}
118
119/**
120 * MediaTek PLLs are configured through their pcw value. The pcw value
121 * describes a divider in the PLL feedback loop which consists of 7 bits
122 * for the integer part and the remaining bits (if present) for the
123 * fractional part. Also they have a 3 bit power-of-two post divider.
124 */
125static void mtk_pll_set_rate_regs(struct clk *clk, u32 pcw, int postdiv)
126{
127 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
128 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
mingming lee0670adb2019-12-31 11:29:22 +0800129 u32 val, chg;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800130
131 /* set postdiv */
132 val = readl(priv->base + pll->pd_reg);
133 val &= ~(POSTDIV_MASK << pll->pd_shift);
134 val |= (ffs(postdiv) - 1) << pll->pd_shift;
135
136 /* postdiv and pcw need to set at the same time if on same register */
137 if (pll->pd_reg != pll->pcw_reg) {
138 writel(val, priv->base + pll->pd_reg);
139 val = readl(priv->base + pll->pcw_reg);
140 }
141
142 /* set pcw */
143 val &= ~GENMASK(pll->pcw_shift + pll->pcwbits - 1, pll->pcw_shift);
144 val |= pcw << pll->pcw_shift;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800145
mingming lee0670adb2019-12-31 11:29:22 +0800146 if (pll->pcw_chg_reg) {
147 chg = readl(priv->base + pll->pcw_chg_reg);
148 chg |= CON1_PCW_CHG;
149 writel(val, priv->base + pll->pcw_reg);
150 writel(chg, priv->base + pll->pcw_chg_reg);
151 } else {
152 val |= CON1_PCW_CHG;
153 writel(val, priv->base + pll->pcw_reg);
154 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800155
156 udelay(20);
157}
158
159/**
160 * mtk_pll_calc_values - calculate good values for a given input frequency.
161 * @clk: The clk
162 * @pcw: The pcw value (output)
163 * @postdiv: The post divider (output)
164 * @freq: The desired target frequency
165 */
166static void mtk_pll_calc_values(struct clk *clk, u32 *pcw, u32 *postdiv,
167 u32 freq)
168{
169 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
170 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
mingming lee0670adb2019-12-31 11:29:22 +0800171 unsigned long fmin = pll->fmin ? pll->fmin : 1000 * MHZ;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800172 u64 _pcw;
mingming lee0670adb2019-12-31 11:29:22 +0800173 int ibits;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800174 u32 val;
175
176 if (freq > pll->fmax)
177 freq = pll->fmax;
178
179 for (val = 0; val < 5; val++) {
180 *postdiv = 1 << val;
181 if ((u64)freq * *postdiv >= fmin)
182 break;
183 }
184
185 /* _pcw = freq * postdiv / xtal_rate * 2^pcwfbits */
mingming lee0670adb2019-12-31 11:29:22 +0800186 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
187 _pcw = ((u64)freq << val) << (pll->pcwbits - ibits);
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800188 do_div(_pcw, priv->tree->xtal2_rate);
189
190 *pcw = (u32)_pcw;
191}
192
193static ulong mtk_apmixedsys_set_rate(struct clk *clk, ulong rate)
194{
195 u32 pcw = 0;
196 u32 postdiv;
197
198 mtk_pll_calc_values(clk, &pcw, &postdiv, rate);
199 mtk_pll_set_rate_regs(clk, pcw, postdiv);
200
201 return 0;
202}
203
204static ulong mtk_apmixedsys_get_rate(struct clk *clk)
205{
206 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
207 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
208 u32 postdiv;
209 u32 pcw;
210
211 postdiv = (readl(priv->base + pll->pd_reg) >> pll->pd_shift) &
212 POSTDIV_MASK;
213 postdiv = 1 << postdiv;
214
215 pcw = readl(priv->base + pll->pcw_reg) >> pll->pcw_shift;
216 pcw &= GENMASK(pll->pcwbits - 1, 0);
217
218 return __mtk_pll_recalc_rate(pll, priv->tree->xtal2_rate,
219 pcw, postdiv);
220}
221
222static int mtk_apmixedsys_enable(struct clk *clk)
223{
224 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
225 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
226 u32 r;
227
228 r = readl(priv->base + pll->pwr_reg) | CON0_PWR_ON;
229 writel(r, priv->base + pll->pwr_reg);
230 udelay(1);
231
232 r = readl(priv->base + pll->pwr_reg) & ~CON0_ISO_EN;
233 writel(r, priv->base + pll->pwr_reg);
234 udelay(1);
235
236 r = readl(priv->base + pll->reg + REG_CON0);
237 r |= pll->en_mask;
238 writel(r, priv->base + pll->reg + REG_CON0);
239
240 udelay(20);
241
242 if (pll->flags & HAVE_RST_BAR) {
243 r = readl(priv->base + pll->reg + REG_CON0);
244 r |= pll->rst_bar_mask;
245 writel(r, priv->base + pll->reg + REG_CON0);
246 }
247
248 return 0;
249}
250
251static int mtk_apmixedsys_disable(struct clk *clk)
252{
253 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
254 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
255 u32 r;
256
257 if (pll->flags & HAVE_RST_BAR) {
258 r = readl(priv->base + pll->reg + REG_CON0);
259 r &= ~pll->rst_bar_mask;
260 writel(r, priv->base + pll->reg + REG_CON0);
261 }
262
263 r = readl(priv->base + pll->reg + REG_CON0);
264 r &= ~CON0_BASE_EN;
265 writel(r, priv->base + pll->reg + REG_CON0);
266
267 r = readl(priv->base + pll->pwr_reg) | CON0_ISO_EN;
268 writel(r, priv->base + pll->pwr_reg);
269
270 r = readl(priv->base + pll->pwr_reg) & ~CON0_PWR_ON;
271 writel(r, priv->base + pll->pwr_reg);
272
273 return 0;
274}
275
276/* topckgen functions */
277
278static ulong mtk_factor_recalc_rate(const struct mtk_fixed_factor *fdiv,
279 ulong parent_rate)
280{
281 u64 rate = parent_rate * fdiv->mult;
282
283 do_div(rate, fdiv->div);
284
285 return rate;
286}
287
Sam Shihd8588ba2020-01-10 16:30:30 +0800288static ulong mtk_topckgen_get_factor_rate(struct clk *clk, u32 off)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800289{
290 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
291 const struct mtk_fixed_factor *fdiv = &priv->tree->fdivs[off];
292 ulong rate;
293
294 switch (fdiv->flags & CLK_PARENT_MASK) {
295 case CLK_PARENT_APMIXED:
296 rate = mtk_clk_find_parent_rate(clk, fdiv->parent,
297 DM_GET_DRIVER(mtk_clk_apmixedsys));
298 break;
299 case CLK_PARENT_TOPCKGEN:
300 rate = mtk_clk_find_parent_rate(clk, fdiv->parent, NULL);
301 break;
302
303 default:
304 rate = priv->tree->xtal_rate;
305 }
306
307 return mtk_factor_recalc_rate(fdiv, rate);
308}
309
Sam Shihd8588ba2020-01-10 16:30:30 +0800310static ulong mtk_topckgen_get_mux_rate(struct clk *clk, u32 off)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800311{
312 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
313 const struct mtk_composite *mux = &priv->tree->muxes[off];
314 u32 index;
315
316 index = readl(priv->base + mux->mux_reg);
317 index &= mux->mux_mask << mux->mux_shift;
318 index = index >> mux->mux_shift;
319
320 if (mux->parent[index])
321 return mtk_clk_find_parent_rate(clk, mux->parent[index],
322 NULL);
323
324 return priv->tree->xtal_rate;
325}
326
327static ulong mtk_topckgen_get_rate(struct clk *clk)
328{
329 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
330
331 if (clk->id < priv->tree->fdivs_offs)
332 return priv->tree->fclks[clk->id].rate;
333 else if (clk->id < priv->tree->muxes_offs)
334 return mtk_topckgen_get_factor_rate(clk, clk->id -
335 priv->tree->fdivs_offs);
336 else
337 return mtk_topckgen_get_mux_rate(clk, clk->id -
338 priv->tree->muxes_offs);
339}
340
341static int mtk_topckgen_enable(struct clk *clk)
342{
343 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
344 const struct mtk_composite *mux;
345 u32 val;
346
347 if (clk->id < priv->tree->muxes_offs)
348 return 0;
349
350 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
351 if (mux->gate_shift < 0)
352 return 0;
353
354 /* enable clock gate */
mingming leef62168d2019-12-31 11:29:21 +0800355 if (mux->flags & CLK_MUX_SETCLR_UPD) {
356 val = BIT(mux->gate_shift);
357 writel(val, priv->base + mux->mux_clr_reg);
358 } else {
359 val = readl(priv->base + mux->gate_reg);
360 val &= ~BIT(mux->gate_shift);
361 writel(val, priv->base + mux->gate_reg);
362 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800363
364 if (mux->flags & CLK_DOMAIN_SCPSYS) {
365 /* enable scpsys clock off control */
366 writel(SCP_ARMCK_OFF_EN, priv->base + CLK_SCP_CFG0);
367 writel(SCP_AXICK_DCM_DIS_EN | SCP_AXICK_26M_SEL_EN,
368 priv->base + CLK_SCP_CFG1);
369 }
370
371 return 0;
372}
373
374static int mtk_topckgen_disable(struct clk *clk)
375{
376 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
377 const struct mtk_composite *mux;
378 u32 val;
379
380 if (clk->id < priv->tree->muxes_offs)
381 return 0;
382
383 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
384 if (mux->gate_shift < 0)
385 return 0;
386
387 /* disable clock gate */
mingming leef62168d2019-12-31 11:29:21 +0800388 if (mux->flags & CLK_MUX_SETCLR_UPD) {
389 val = BIT(mux->gate_shift);
390 writel(val, priv->base + mux->mux_set_reg);
391 } else {
392 val = readl(priv->base + mux->gate_reg);
393 val |= BIT(mux->gate_shift);
394 writel(val, priv->base + mux->gate_reg);
395 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800396
397 return 0;
398}
399
400static int mtk_topckgen_set_parent(struct clk *clk, struct clk *parent)
401{
402 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
403
404 if (clk->id < priv->tree->muxes_offs)
405 return 0;
406
407 return mtk_clk_mux_set_parent(priv->base, parent->id,
408 &priv->tree->muxes[clk->id - priv->tree->muxes_offs]);
409}
410
411/* CG functions */
412
413static int mtk_clk_gate_enable(struct clk *clk)
414{
415 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
416 const struct mtk_gate *gate = &priv->gates[clk->id];
417 u32 bit = BIT(gate->shift);
418
419 switch (gate->flags & CLK_GATE_MASK) {
420 case CLK_GATE_SETCLR:
421 writel(bit, priv->base + gate->regs->clr_ofs);
422 break;
Fabien Parentfe913a82019-03-24 16:46:35 +0100423 case CLK_GATE_SETCLR_INV:
424 writel(bit, priv->base + gate->regs->set_ofs);
425 break;
426 case CLK_GATE_NO_SETCLR:
427 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
428 break;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800429 case CLK_GATE_NO_SETCLR_INV:
430 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
431 break;
432
433 default:
434 return -EINVAL;
435 }
436
437 return 0;
438}
439
440static int mtk_clk_gate_disable(struct clk *clk)
441{
442 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
443 const struct mtk_gate *gate = &priv->gates[clk->id];
444 u32 bit = BIT(gate->shift);
445
446 switch (gate->flags & CLK_GATE_MASK) {
447 case CLK_GATE_SETCLR:
448 writel(bit, priv->base + gate->regs->set_ofs);
449 break;
Fabien Parentfe913a82019-03-24 16:46:35 +0100450 case CLK_GATE_SETCLR_INV:
451 writel(bit, priv->base + gate->regs->clr_ofs);
452 break;
453 case CLK_GATE_NO_SETCLR:
454 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
455 break;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800456 case CLK_GATE_NO_SETCLR_INV:
457 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
458 break;
459
460 default:
461 return -EINVAL;
462 }
463
464 return 0;
465}
466
467static ulong mtk_clk_gate_get_rate(struct clk *clk)
468{
469 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
470 const struct mtk_gate *gate = &priv->gates[clk->id];
471
472 switch (gate->flags & CLK_PARENT_MASK) {
473 case CLK_PARENT_APMIXED:
474 return mtk_clk_find_parent_rate(clk, gate->parent,
475 DM_GET_DRIVER(mtk_clk_apmixedsys));
476 break;
477 case CLK_PARENT_TOPCKGEN:
478 return mtk_clk_find_parent_rate(clk, gate->parent,
479 DM_GET_DRIVER(mtk_clk_topckgen));
480 break;
481
482 default:
483 return priv->tree->xtal_rate;
484 }
485}
486
487const struct clk_ops mtk_clk_apmixedsys_ops = {
488 .enable = mtk_apmixedsys_enable,
489 .disable = mtk_apmixedsys_disable,
490 .set_rate = mtk_apmixedsys_set_rate,
491 .get_rate = mtk_apmixedsys_get_rate,
492};
493
494const struct clk_ops mtk_clk_topckgen_ops = {
495 .enable = mtk_topckgen_enable,
496 .disable = mtk_topckgen_disable,
497 .get_rate = mtk_topckgen_get_rate,
498 .set_parent = mtk_topckgen_set_parent,
499};
500
501const struct clk_ops mtk_clk_gate_ops = {
502 .enable = mtk_clk_gate_enable,
503 .disable = mtk_clk_gate_disable,
504 .get_rate = mtk_clk_gate_get_rate,
505};
506
507int mtk_common_clk_init(struct udevice *dev,
508 const struct mtk_clk_tree *tree)
509{
510 struct mtk_clk_priv *priv = dev_get_priv(dev);
511
512 priv->base = dev_read_addr_ptr(dev);
513 if (!priv->base)
514 return -ENOENT;
515
516 priv->tree = tree;
517
518 return 0;
519}
520
521int mtk_common_clk_gate_init(struct udevice *dev,
522 const struct mtk_clk_tree *tree,
523 const struct mtk_gate *gates)
524{
525 struct mtk_cg_priv *priv = dev_get_priv(dev);
526
527 priv->base = dev_read_addr_ptr(dev);
528 if (!priv->base)
529 return -ENOENT;
530
531 priv->tree = tree;
532 priv->gates = gates;
533
534 return 0;
535}