blob: 4303300d3a8d3729148757dfc3a96c37a95ad974 [file] [log] [blame]
Ryder Lee0bd7dc72018-11-15 10:07:54 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek common clock driver
4 *
5 * Copyright (C) 2018 MediaTek Inc.
6 * Author: Ryder Lee <ryder.lee@mediatek.com>
7 */
8
9#include <common.h>
10#include <clk-uclass.h>
11#include <div64.h>
12#include <dm.h>
13#include <asm/io.h>
Simon Glasscd93d622020-05-10 11:40:13 -060014#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060015#include <linux/delay.h>
Ryder Lee0bd7dc72018-11-15 10:07:54 +080016
17#include "clk-mtk.h"
18
19#define REG_CON0 0
20#define REG_CON1 4
21
22#define CON0_BASE_EN BIT(0)
23#define CON0_PWR_ON BIT(0)
24#define CON0_ISO_EN BIT(1)
25#define CON1_PCW_CHG BIT(31)
26
27#define POSTDIV_MASK 0x7
28#define INTEGER_BITS 7
29
30/* scpsys clock off control */
31#define CLK_SCP_CFG0 0x200
32#define CLK_SCP_CFG1 0x204
33#define SCP_ARMCK_OFF_EN GENMASK(9, 0)
34#define SCP_AXICK_DCM_DIS_EN BIT(0)
35#define SCP_AXICK_26M_SEL_EN BIT(4)
36
37/* shared functions */
38
39/*
40 * In case the rate change propagation to parent clocks is undesirable,
41 * this function is recursively called to find the parent to calculate
42 * the accurate frequency.
43 */
Sam Shihd8588ba2020-01-10 16:30:30 +080044static ulong mtk_clk_find_parent_rate(struct clk *clk, int id,
Weijie Gao98a8bbb2022-09-09 20:00:01 +080045 struct udevice *pdev)
Ryder Lee0bd7dc72018-11-15 10:07:54 +080046{
47 struct clk parent = { .id = id, };
48
Weijie Gao98a8bbb2022-09-09 20:00:01 +080049 if (pdev)
50 parent.dev = pdev;
51 else
Ryder Lee0bd7dc72018-11-15 10:07:54 +080052 parent.dev = clk->dev;
Ryder Lee0bd7dc72018-11-15 10:07:54 +080053
54 return clk_get_rate(&parent);
55}
56
57static int mtk_clk_mux_set_parent(void __iomem *base, u32 parent,
58 const struct mtk_composite *mux)
59{
60 u32 val, index = 0;
61
62 while (mux->parent[index] != parent)
63 if (++index == mux->num_parents)
64 return -EINVAL;
65
mingming leef62168d2019-12-31 11:29:21 +080066 if (mux->flags & CLK_MUX_SETCLR_UPD) {
67 val = (mux->mux_mask << mux->mux_shift);
68 writel(val, base + mux->mux_clr_reg);
Ryder Lee0bd7dc72018-11-15 10:07:54 +080069
mingming leef62168d2019-12-31 11:29:21 +080070 val = (index << mux->mux_shift);
71 writel(val, base + mux->mux_set_reg);
72
73 if (mux->upd_shift >= 0)
74 writel(BIT(mux->upd_shift), base + mux->upd_reg);
75 } else {
76 /* switch mux to a select parent */
77 val = readl(base + mux->mux_reg);
78 val &= ~(mux->mux_mask << mux->mux_shift);
79
80 val |= index << mux->mux_shift;
81 writel(val, base + mux->mux_reg);
82 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +080083
84 return 0;
85}
86
87/* apmixedsys functions */
88
89static unsigned long __mtk_pll_recalc_rate(const struct mtk_pll_data *pll,
90 u32 fin, u32 pcw, int postdiv)
91{
92 int pcwbits = pll->pcwbits;
93 int pcwfbits;
mingming lee0670adb2019-12-31 11:29:22 +080094 int ibits;
Ryder Lee0bd7dc72018-11-15 10:07:54 +080095 u64 vco;
96 u8 c = 0;
97
98 /* The fractional part of the PLL divider. */
mingming lee0670adb2019-12-31 11:29:22 +080099 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
100 pcwfbits = pcwbits > ibits ? pcwbits - ibits : 0;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800101
102 vco = (u64)fin * pcw;
103
104 if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0)))
105 c = 1;
106
107 vco >>= pcwfbits;
108
109 if (c)
110 vco++;
111
112 return ((unsigned long)vco + postdiv - 1) / postdiv;
113}
114
115/**
116 * MediaTek PLLs are configured through their pcw value. The pcw value
117 * describes a divider in the PLL feedback loop which consists of 7 bits
118 * for the integer part and the remaining bits (if present) for the
119 * fractional part. Also they have a 3 bit power-of-two post divider.
120 */
121static void mtk_pll_set_rate_regs(struct clk *clk, u32 pcw, int postdiv)
122{
123 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
124 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
mingming lee0670adb2019-12-31 11:29:22 +0800125 u32 val, chg;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800126
127 /* set postdiv */
128 val = readl(priv->base + pll->pd_reg);
129 val &= ~(POSTDIV_MASK << pll->pd_shift);
130 val |= (ffs(postdiv) - 1) << pll->pd_shift;
131
132 /* postdiv and pcw need to set at the same time if on same register */
133 if (pll->pd_reg != pll->pcw_reg) {
134 writel(val, priv->base + pll->pd_reg);
135 val = readl(priv->base + pll->pcw_reg);
136 }
137
138 /* set pcw */
139 val &= ~GENMASK(pll->pcw_shift + pll->pcwbits - 1, pll->pcw_shift);
140 val |= pcw << pll->pcw_shift;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800141
mingming lee0670adb2019-12-31 11:29:22 +0800142 if (pll->pcw_chg_reg) {
143 chg = readl(priv->base + pll->pcw_chg_reg);
144 chg |= CON1_PCW_CHG;
145 writel(val, priv->base + pll->pcw_reg);
146 writel(chg, priv->base + pll->pcw_chg_reg);
147 } else {
148 val |= CON1_PCW_CHG;
149 writel(val, priv->base + pll->pcw_reg);
150 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800151
152 udelay(20);
153}
154
155/**
156 * mtk_pll_calc_values - calculate good values for a given input frequency.
157 * @clk: The clk
158 * @pcw: The pcw value (output)
159 * @postdiv: The post divider (output)
160 * @freq: The desired target frequency
161 */
162static void mtk_pll_calc_values(struct clk *clk, u32 *pcw, u32 *postdiv,
163 u32 freq)
164{
165 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
166 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
mingming lee0670adb2019-12-31 11:29:22 +0800167 unsigned long fmin = pll->fmin ? pll->fmin : 1000 * MHZ;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800168 u64 _pcw;
mingming lee0670adb2019-12-31 11:29:22 +0800169 int ibits;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800170 u32 val;
171
172 if (freq > pll->fmax)
173 freq = pll->fmax;
174
175 for (val = 0; val < 5; val++) {
176 *postdiv = 1 << val;
177 if ((u64)freq * *postdiv >= fmin)
178 break;
179 }
180
181 /* _pcw = freq * postdiv / xtal_rate * 2^pcwfbits */
mingming lee0670adb2019-12-31 11:29:22 +0800182 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
183 _pcw = ((u64)freq << val) << (pll->pcwbits - ibits);
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800184 do_div(_pcw, priv->tree->xtal2_rate);
185
186 *pcw = (u32)_pcw;
187}
188
189static ulong mtk_apmixedsys_set_rate(struct clk *clk, ulong rate)
190{
191 u32 pcw = 0;
192 u32 postdiv;
193
194 mtk_pll_calc_values(clk, &pcw, &postdiv, rate);
195 mtk_pll_set_rate_regs(clk, pcw, postdiv);
196
197 return 0;
198}
199
200static ulong mtk_apmixedsys_get_rate(struct clk *clk)
201{
202 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
203 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
204 u32 postdiv;
205 u32 pcw;
206
207 postdiv = (readl(priv->base + pll->pd_reg) >> pll->pd_shift) &
208 POSTDIV_MASK;
209 postdiv = 1 << postdiv;
210
211 pcw = readl(priv->base + pll->pcw_reg) >> pll->pcw_shift;
212 pcw &= GENMASK(pll->pcwbits - 1, 0);
213
214 return __mtk_pll_recalc_rate(pll, priv->tree->xtal2_rate,
215 pcw, postdiv);
216}
217
218static int mtk_apmixedsys_enable(struct clk *clk)
219{
220 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
221 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
222 u32 r;
223
224 r = readl(priv->base + pll->pwr_reg) | CON0_PWR_ON;
225 writel(r, priv->base + pll->pwr_reg);
226 udelay(1);
227
228 r = readl(priv->base + pll->pwr_reg) & ~CON0_ISO_EN;
229 writel(r, priv->base + pll->pwr_reg);
230 udelay(1);
231
232 r = readl(priv->base + pll->reg + REG_CON0);
233 r |= pll->en_mask;
234 writel(r, priv->base + pll->reg + REG_CON0);
235
236 udelay(20);
237
238 if (pll->flags & HAVE_RST_BAR) {
239 r = readl(priv->base + pll->reg + REG_CON0);
240 r |= pll->rst_bar_mask;
241 writel(r, priv->base + pll->reg + REG_CON0);
242 }
243
244 return 0;
245}
246
247static int mtk_apmixedsys_disable(struct clk *clk)
248{
249 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
250 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
251 u32 r;
252
253 if (pll->flags & HAVE_RST_BAR) {
254 r = readl(priv->base + pll->reg + REG_CON0);
255 r &= ~pll->rst_bar_mask;
256 writel(r, priv->base + pll->reg + REG_CON0);
257 }
258
259 r = readl(priv->base + pll->reg + REG_CON0);
260 r &= ~CON0_BASE_EN;
261 writel(r, priv->base + pll->reg + REG_CON0);
262
263 r = readl(priv->base + pll->pwr_reg) | CON0_ISO_EN;
264 writel(r, priv->base + pll->pwr_reg);
265
266 r = readl(priv->base + pll->pwr_reg) & ~CON0_PWR_ON;
267 writel(r, priv->base + pll->pwr_reg);
268
269 return 0;
270}
271
272/* topckgen functions */
273
274static ulong mtk_factor_recalc_rate(const struct mtk_fixed_factor *fdiv,
275 ulong parent_rate)
276{
277 u64 rate = parent_rate * fdiv->mult;
278
279 do_div(rate, fdiv->div);
280
281 return rate;
282}
283
Sam Shihd8588ba2020-01-10 16:30:30 +0800284static ulong mtk_topckgen_get_factor_rate(struct clk *clk, u32 off)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800285{
286 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
287 const struct mtk_fixed_factor *fdiv = &priv->tree->fdivs[off];
288 ulong rate;
289
290 switch (fdiv->flags & CLK_PARENT_MASK) {
291 case CLK_PARENT_APMIXED:
292 rate = mtk_clk_find_parent_rate(clk, fdiv->parent,
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800293 priv->parent);
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800294 break;
295 case CLK_PARENT_TOPCKGEN:
296 rate = mtk_clk_find_parent_rate(clk, fdiv->parent, NULL);
297 break;
298
Weijie Gaoad832b92022-09-09 20:00:07 +0800299 case CLK_PARENT_XTAL:
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800300 default:
301 rate = priv->tree->xtal_rate;
302 }
303
304 return mtk_factor_recalc_rate(fdiv, rate);
305}
306
Weijie Gao570b0842022-09-09 20:00:04 +0800307static ulong mtk_infrasys_get_factor_rate(struct clk *clk, u32 off)
308{
309 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
310 const struct mtk_fixed_factor *fdiv = &priv->tree->fdivs[off];
311 ulong rate;
312
313 switch (fdiv->flags & CLK_PARENT_MASK) {
314 case CLK_PARENT_TOPCKGEN:
315 rate = mtk_clk_find_parent_rate(clk, fdiv->parent,
316 priv->parent);
317 break;
Weijie Gaoad832b92022-09-09 20:00:07 +0800318 case CLK_PARENT_XTAL:
319 rate = priv->tree->xtal_rate;
320 break;
Weijie Gao570b0842022-09-09 20:00:04 +0800321 default:
322 rate = mtk_clk_find_parent_rate(clk, fdiv->parent, NULL);
323 }
324
325 return mtk_factor_recalc_rate(fdiv, rate);
326}
327
Sam Shihd8588ba2020-01-10 16:30:30 +0800328static ulong mtk_topckgen_get_mux_rate(struct clk *clk, u32 off)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800329{
330 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
331 const struct mtk_composite *mux = &priv->tree->muxes[off];
332 u32 index;
333
334 index = readl(priv->base + mux->mux_reg);
335 index &= mux->mux_mask << mux->mux_shift;
336 index = index >> mux->mux_shift;
337
Weijie Gao7fb33e92022-09-09 19:59:59 +0800338 if (mux->parent[index] > 0 ||
339 (mux->parent[index] == CLK_XTAL &&
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800340 priv->tree->flags & CLK_BYPASS_XTAL)) {
341 switch (mux->flags & CLK_PARENT_MASK) {
342 case CLK_PARENT_APMIXED:
343 return mtk_clk_find_parent_rate(clk, mux->parent[index],
344 priv->parent);
345 break;
346 default:
347 return mtk_clk_find_parent_rate(clk, mux->parent[index],
348 NULL);
349 break;
350 }
351 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800352
353 return priv->tree->xtal_rate;
354}
355
Weijie Gao570b0842022-09-09 20:00:04 +0800356static ulong mtk_infrasys_get_mux_rate(struct clk *clk, u32 off)
357{
358 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
359 const struct mtk_composite *mux = &priv->tree->muxes[off];
360 u32 index;
361
362 index = readl(priv->base + mux->mux_reg);
363 index &= mux->mux_mask << mux->mux_shift;
364 index = index >> mux->mux_shift;
365
366 if (mux->parent[index] > 0 ||
367 (mux->parent[index] == CLK_XTAL &&
368 priv->tree->flags & CLK_BYPASS_XTAL)) {
369 switch (mux->flags & CLK_PARENT_MASK) {
370 case CLK_PARENT_TOPCKGEN:
371 return mtk_clk_find_parent_rate(clk, mux->parent[index],
372 priv->parent);
373 break;
374 default:
375 return mtk_clk_find_parent_rate(clk, mux->parent[index],
376 NULL);
377 break;
378 }
379 }
380 return 0;
381}
382
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800383static ulong mtk_topckgen_get_rate(struct clk *clk)
384{
385 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
386
387 if (clk->id < priv->tree->fdivs_offs)
388 return priv->tree->fclks[clk->id].rate;
389 else if (clk->id < priv->tree->muxes_offs)
390 return mtk_topckgen_get_factor_rate(clk, clk->id -
391 priv->tree->fdivs_offs);
392 else
393 return mtk_topckgen_get_mux_rate(clk, clk->id -
394 priv->tree->muxes_offs);
395}
396
Weijie Gao570b0842022-09-09 20:00:04 +0800397static ulong mtk_infrasys_get_rate(struct clk *clk)
398{
399 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
400
401 ulong rate;
402
403 if (clk->id < priv->tree->fdivs_offs) {
404 rate = priv->tree->fclks[clk->id].rate;
405 } else if (clk->id < priv->tree->muxes_offs) {
406 rate = mtk_infrasys_get_factor_rate(clk, clk->id -
407 priv->tree->fdivs_offs);
408 } else {
409 rate = mtk_infrasys_get_mux_rate(clk, clk->id -
410 priv->tree->muxes_offs);
411 }
412
413 return rate;
414}
415
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800416static int mtk_clk_mux_enable(struct clk *clk)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800417{
418 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
419 const struct mtk_composite *mux;
420 u32 val;
421
422 if (clk->id < priv->tree->muxes_offs)
423 return 0;
424
425 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
426 if (mux->gate_shift < 0)
427 return 0;
428
429 /* enable clock gate */
mingming leef62168d2019-12-31 11:29:21 +0800430 if (mux->flags & CLK_MUX_SETCLR_UPD) {
431 val = BIT(mux->gate_shift);
432 writel(val, priv->base + mux->mux_clr_reg);
433 } else {
434 val = readl(priv->base + mux->gate_reg);
435 val &= ~BIT(mux->gate_shift);
436 writel(val, priv->base + mux->gate_reg);
437 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800438
439 if (mux->flags & CLK_DOMAIN_SCPSYS) {
440 /* enable scpsys clock off control */
441 writel(SCP_ARMCK_OFF_EN, priv->base + CLK_SCP_CFG0);
442 writel(SCP_AXICK_DCM_DIS_EN | SCP_AXICK_26M_SEL_EN,
443 priv->base + CLK_SCP_CFG1);
444 }
445
446 return 0;
447}
448
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800449static int mtk_clk_mux_disable(struct clk *clk)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800450{
451 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
452 const struct mtk_composite *mux;
453 u32 val;
454
455 if (clk->id < priv->tree->muxes_offs)
456 return 0;
457
458 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
459 if (mux->gate_shift < 0)
460 return 0;
461
462 /* disable clock gate */
mingming leef62168d2019-12-31 11:29:21 +0800463 if (mux->flags & CLK_MUX_SETCLR_UPD) {
464 val = BIT(mux->gate_shift);
465 writel(val, priv->base + mux->mux_set_reg);
466 } else {
467 val = readl(priv->base + mux->gate_reg);
468 val |= BIT(mux->gate_shift);
469 writel(val, priv->base + mux->gate_reg);
470 }
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800471
472 return 0;
473}
474
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800475static int mtk_common_clk_set_parent(struct clk *clk, struct clk *parent)
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800476{
477 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
478
479 if (clk->id < priv->tree->muxes_offs)
480 return 0;
481
482 return mtk_clk_mux_set_parent(priv->base, parent->id,
483 &priv->tree->muxes[clk->id - priv->tree->muxes_offs]);
484}
485
486/* CG functions */
487
488static int mtk_clk_gate_enable(struct clk *clk)
489{
490 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
491 const struct mtk_gate *gate = &priv->gates[clk->id];
492 u32 bit = BIT(gate->shift);
493
494 switch (gate->flags & CLK_GATE_MASK) {
495 case CLK_GATE_SETCLR:
496 writel(bit, priv->base + gate->regs->clr_ofs);
497 break;
Fabien Parentfe913a82019-03-24 16:46:35 +0100498 case CLK_GATE_SETCLR_INV:
499 writel(bit, priv->base + gate->regs->set_ofs);
500 break;
501 case CLK_GATE_NO_SETCLR:
502 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
503 break;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800504 case CLK_GATE_NO_SETCLR_INV:
505 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
506 break;
507
508 default:
509 return -EINVAL;
510 }
511
512 return 0;
513}
514
515static int mtk_clk_gate_disable(struct clk *clk)
516{
517 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
518 const struct mtk_gate *gate = &priv->gates[clk->id];
519 u32 bit = BIT(gate->shift);
520
521 switch (gate->flags & CLK_GATE_MASK) {
522 case CLK_GATE_SETCLR:
523 writel(bit, priv->base + gate->regs->set_ofs);
524 break;
Fabien Parentfe913a82019-03-24 16:46:35 +0100525 case CLK_GATE_SETCLR_INV:
526 writel(bit, priv->base + gate->regs->clr_ofs);
527 break;
528 case CLK_GATE_NO_SETCLR:
529 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
530 break;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800531 case CLK_GATE_NO_SETCLR_INV:
532 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
533 break;
534
535 default:
536 return -EINVAL;
537 }
538
539 return 0;
540}
541
542static ulong mtk_clk_gate_get_rate(struct clk *clk)
543{
544 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
545 const struct mtk_gate *gate = &priv->gates[clk->id];
546
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800547 return mtk_clk_find_parent_rate(clk, gate->parent, priv->parent);
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800548}
549
550const struct clk_ops mtk_clk_apmixedsys_ops = {
551 .enable = mtk_apmixedsys_enable,
552 .disable = mtk_apmixedsys_disable,
553 .set_rate = mtk_apmixedsys_set_rate,
554 .get_rate = mtk_apmixedsys_get_rate,
555};
556
557const struct clk_ops mtk_clk_topckgen_ops = {
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800558 .enable = mtk_clk_mux_enable,
559 .disable = mtk_clk_mux_disable,
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800560 .get_rate = mtk_topckgen_get_rate,
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800561 .set_parent = mtk_common_clk_set_parent,
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800562};
563
Weijie Gao570b0842022-09-09 20:00:04 +0800564const struct clk_ops mtk_clk_infrasys_ops = {
565 .enable = mtk_clk_mux_enable,
566 .disable = mtk_clk_mux_disable,
567 .get_rate = mtk_infrasys_get_rate,
568 .set_parent = mtk_common_clk_set_parent,
569};
570
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800571const struct clk_ops mtk_clk_gate_ops = {
572 .enable = mtk_clk_gate_enable,
573 .disable = mtk_clk_gate_disable,
574 .get_rate = mtk_clk_gate_get_rate,
575};
576
577int mtk_common_clk_init(struct udevice *dev,
578 const struct mtk_clk_tree *tree)
579{
580 struct mtk_clk_priv *priv = dev_get_priv(dev);
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800581 struct udevice *parent;
582 int ret;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800583
584 priv->base = dev_read_addr_ptr(dev);
585 if (!priv->base)
586 return -ENOENT;
587
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800588 ret = uclass_get_device_by_phandle(UCLASS_CLK, dev, "clock-parent", &parent);
589 if (ret || !parent) {
590 ret = uclass_get_device_by_driver(UCLASS_CLK,
591 DM_DRIVER_GET(mtk_clk_apmixedsys), &parent);
592 if (ret || !parent)
593 return -ENOENT;
594 }
595
596 priv->parent = parent;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800597 priv->tree = tree;
598
599 return 0;
600}
601
602int mtk_common_clk_gate_init(struct udevice *dev,
603 const struct mtk_clk_tree *tree,
604 const struct mtk_gate *gates)
605{
606 struct mtk_cg_priv *priv = dev_get_priv(dev);
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800607 struct udevice *parent;
608 int ret;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800609
610 priv->base = dev_read_addr_ptr(dev);
611 if (!priv->base)
612 return -ENOENT;
613
Weijie Gao98a8bbb2022-09-09 20:00:01 +0800614 ret = uclass_get_device_by_phandle(UCLASS_CLK, dev, "clock-parent", &parent);
615 if (ret || !parent) {
616 ret = uclass_get_device_by_driver(UCLASS_CLK,
617 DM_DRIVER_GET(mtk_clk_topckgen), &parent);
618 if (ret || !parent)
619 return -ENOENT;
620 }
621
622 priv->parent = parent;
Ryder Lee0bd7dc72018-11-15 10:07:54 +0800623 priv->tree = tree;
624 priv->gates = gates;
625
626 return 0;
627}