blob: 84f93e73f6d09c99c3e9f42374a7c658747bab1c [file] [log] [blame]
Aneesh V37768012011-07-21 09:10:07 -04001/*
2 *
3 * Clock initialization for OMAP4
4 *
5 * (C) Copyright 2010
6 * Texas Instruments, <www.ti.com>
7 *
8 * Aneesh V <aneesh@ti.com>
9 *
10 * Based on previous work by:
11 * Santosh Shilimkar <santosh.shilimkar@ti.com>
12 * Rajendra Nayak <rnayak@ti.com>
13 *
Wolfgang Denk1a459662013-07-08 09:37:19 +020014 * SPDX-License-Identifier: GPL-2.0+
Aneesh V37768012011-07-21 09:10:07 -040015 */
16#include <common.h>
Lokesh Vutla63fc0c72013-05-30 03:19:29 +000017#include <i2c.h>
Aneesh V37768012011-07-21 09:10:07 -040018#include <asm/omap_common.h>
Sanjeev Premi3b690eb2011-09-08 10:48:39 -040019#include <asm/gpio.h>
Lokesh Vutlaaf1d0022013-05-30 02:54:32 +000020#include <asm/arch/clock.h>
Aneesh V37768012011-07-21 09:10:07 -040021#include <asm/arch/sys_proto.h>
22#include <asm/utils.h>
Aneesh Vd5067192011-07-21 09:29:32 -040023#include <asm/omap_gpio.h>
Lokesh Vutla9ca8bfe2013-02-04 04:21:59 +000024#include <asm/emif.h>
Aneesh V37768012011-07-21 09:10:07 -040025
26#ifndef CONFIG_SPL_BUILD
27/*
28 * printing to console doesn't work unless
29 * this code is executed from SPL
30 */
31#define printf(fmt, args...)
32#define puts(s)
33#endif
34
SRICHARAN Ree9447b2013-02-04 04:22:01 +000035const u32 sys_clk_array[8] = {
36 12000000, /* 12 MHz */
Lokesh Vutla97405d82013-05-30 03:19:38 +000037 20000000, /* 20 MHz */
SRICHARAN Ree9447b2013-02-04 04:22:01 +000038 16800000, /* 16.8 MHz */
39 19200000, /* 19.2 MHz */
40 26000000, /* 26 MHz */
41 27000000, /* 27 MHz */
42 38400000, /* 38.4 MHz */
43};
44
Aneesh V37768012011-07-21 09:10:07 -040045static inline u32 __get_sys_clk_index(void)
46{
Lokesh Vutlaea8eff12013-02-12 21:29:05 +000047 s8 ind;
Aneesh V37768012011-07-21 09:10:07 -040048 /*
49 * For ES1 the ROM code calibration of sys clock is not reliable
50 * due to hw issue. So, use hard-coded value. If this value is not
51 * correct for any board over-ride this function in board file
52 * From ES2.0 onwards you will get this information from
53 * CM_SYS_CLKSEL
54 */
55 if (omap_revision() == OMAP4430_ES1_0)
56 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
57 else {
58 /* SYS_CLKSEL - 1 to match the dpll param array indices */
SRICHARAN R01b753f2013-02-04 04:22:00 +000059 ind = (readl((*prcm)->cm_sys_clksel) &
Aneesh V37768012011-07-21 09:10:07 -040060 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
61 }
62 return ind;
63}
64
65u32 get_sys_clk_index(void)
66 __attribute__ ((weak, alias("__get_sys_clk_index")));
67
68u32 get_sys_clk_freq(void)
69{
70 u8 index = get_sys_clk_index();
71 return sys_clk_array[index];
72}
73
SRICHARAN Ree9447b2013-02-04 04:22:01 +000074void setup_post_dividers(u32 const base, const struct dpll_params *params)
75{
76 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
77
78 /* Setup post-dividers */
79 if (params->m2 >= 0)
80 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
81 if (params->m3 >= 0)
82 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
83 if (params->m4_h11 >= 0)
84 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
85 if (params->m5_h12 >= 0)
86 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
87 if (params->m6_h13 >= 0)
88 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
89 if (params->m7_h14 >= 0)
90 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
SRICHARAN R47abc3d2013-02-12 01:33:43 +000091 if (params->h21 >= 0)
92 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
SRICHARAN Ree9447b2013-02-04 04:22:01 +000093 if (params->h22 >= 0)
94 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
95 if (params->h23 >= 0)
96 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
SRICHARAN R47abc3d2013-02-12 01:33:43 +000097 if (params->h24 >= 0)
98 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
SRICHARAN Ree9447b2013-02-04 04:22:01 +000099}
100
SRICHARAN R01b753f2013-02-04 04:22:00 +0000101static inline void do_bypass_dpll(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400102{
103 struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
104
105 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
106 CM_CLKMODE_DPLL_DPLL_EN_MASK,
107 DPLL_EN_FAST_RELOCK_BYPASS <<
108 CM_CLKMODE_DPLL_EN_SHIFT);
109}
110
SRICHARAN R01b753f2013-02-04 04:22:00 +0000111static inline void wait_for_bypass(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400112{
113 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
114
115 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
116 LDELAY)) {
SRICHARAN R01b753f2013-02-04 04:22:00 +0000117 printf("Bypassing DPLL failed %x\n", base);
Aneesh V37768012011-07-21 09:10:07 -0400118 }
119}
120
SRICHARAN R01b753f2013-02-04 04:22:00 +0000121static inline void do_lock_dpll(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400122{
123 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
124
125 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
126 CM_CLKMODE_DPLL_DPLL_EN_MASK,
127 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
128}
129
SRICHARAN R01b753f2013-02-04 04:22:00 +0000130static inline void wait_for_lock(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400131{
132 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
133
134 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
135 &dpll_regs->cm_idlest_dpll, LDELAY)) {
SRICHARAN R01b753f2013-02-04 04:22:00 +0000136 printf("DPLL locking failed for %x\n", base);
Aneesh V37768012011-07-21 09:10:07 -0400137 hang();
138 }
139}
140
SRICHARAN R01b753f2013-02-04 04:22:00 +0000141inline u32 check_for_lock(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400142{
Aneesh V37768012011-07-21 09:10:07 -0400143 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
Sricharan78f455c2011-11-15 09:50:03 -0500144 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
145
146 return lock;
147}
148
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000149const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
150{
151 u32 sysclk_ind = get_sys_clk_index();
152 return &dpll_data->mpu[sysclk_ind];
153}
154
155const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
156{
157 u32 sysclk_ind = get_sys_clk_index();
158 return &dpll_data->core[sysclk_ind];
159}
160
161const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
162{
163 u32 sysclk_ind = get_sys_clk_index();
164 return &dpll_data->per[sysclk_ind];
165}
166
167const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
168{
169 u32 sysclk_ind = get_sys_clk_index();
170 return &dpll_data->iva[sysclk_ind];
171}
172
173const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
174{
175 u32 sysclk_ind = get_sys_clk_index();
176 return &dpll_data->usb[sysclk_ind];
177}
178
179const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
180{
181#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
182 u32 sysclk_ind = get_sys_clk_index();
183 return &dpll_data->abe[sysclk_ind];
184#else
185 return dpll_data->abe;
186#endif
187}
188
Lokesh Vutlaea8eff12013-02-12 21:29:05 +0000189static const struct dpll_params *get_ddr_dpll_params
190 (struct dplls const *dpll_data)
191{
192 u32 sysclk_ind = get_sys_clk_index();
193
194 if (!dpll_data->ddr)
195 return NULL;
196 return &dpll_data->ddr[sysclk_ind];
197}
198
Lokesh Vutla65e9d562013-07-08 16:04:39 +0530199#ifdef CONFIG_DRIVER_TI_CPSW
200static const struct dpll_params *get_gmac_dpll_params
201 (struct dplls const *dpll_data)
202{
203 u32 sysclk_ind = get_sys_clk_index();
204
205 if (!dpll_data->gmac)
206 return NULL;
207 return &dpll_data->gmac[sysclk_ind];
208}
209#endif
210
SRICHARAN R01b753f2013-02-04 04:22:00 +0000211static void do_setup_dpll(u32 const base, const struct dpll_params *params,
Sricharan78f455c2011-11-15 09:50:03 -0500212 u8 lock, char *dpll)
213{
214 u32 temp, M, N;
215 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
216
Lokesh Vutlaea8eff12013-02-12 21:29:05 +0000217 if (!params)
218 return;
219
Sricharan78f455c2011-11-15 09:50:03 -0500220 temp = readl(&dpll_regs->cm_clksel_dpll);
221
222 if (check_for_lock(base)) {
223 /*
224 * The Dpll has already been locked by rom code using CH.
225 * Check if M,N are matching with Ideal nominal opp values.
226 * If matches, skip the rest otherwise relock.
227 */
228 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
229 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
230 if ((M != (params->m)) || (N != (params->n))) {
231 debug("\n %s Dpll locked, but not for ideal M = %d,"
232 "N = %d values, current values are M = %d,"
233 "N= %d" , dpll, params->m, params->n,
234 M, N);
235 } else {
236 /* Dpll locked with ideal values for nominal opps. */
237 debug("\n %s Dpll already locked with ideal"
238 "nominal opp values", dpll);
Lokesh Vutla9b77b192016-05-23 13:31:19 +0530239
240 bypass_dpll(base);
Sricharan78f455c2011-11-15 09:50:03 -0500241 goto setup_post_dividers;
242 }
243 }
Aneesh V37768012011-07-21 09:10:07 -0400244
245 bypass_dpll(base);
246
247 /* Set M & N */
Aneesh V37768012011-07-21 09:10:07 -0400248 temp &= ~CM_CLKSEL_DPLL_M_MASK;
249 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
250
251 temp &= ~CM_CLKSEL_DPLL_N_MASK;
252 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
253
254 writel(temp, &dpll_regs->cm_clksel_dpll);
255
Lokesh Vutla9b77b192016-05-23 13:31:19 +0530256setup_post_dividers:
257 setup_post_dividers(base, params);
258
Aneesh V37768012011-07-21 09:10:07 -0400259 /* Lock */
260 if (lock)
261 do_lock_dpll(base);
262
Aneesh V37768012011-07-21 09:10:07 -0400263 /* Wait till the DPLL locks */
264 if (lock)
265 wait_for_lock(base);
266}
267
Sricharan2e5ba482011-11-15 09:49:58 -0500268u32 omap_ddr_clk(void)
Aneesh V37768012011-07-21 09:10:07 -0400269{
Sricharan2e5ba482011-11-15 09:49:58 -0500270 u32 ddr_clk, sys_clk_khz, omap_rev, divider;
Aneesh V37768012011-07-21 09:10:07 -0400271 const struct dpll_params *core_dpll_params;
272
Sricharan2e5ba482011-11-15 09:49:58 -0500273 omap_rev = omap_revision();
Aneesh V37768012011-07-21 09:10:07 -0400274 sys_clk_khz = get_sys_clk_freq() / 1000;
275
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000276 core_dpll_params = get_core_dpll_params(*dplls_data);
Aneesh V37768012011-07-21 09:10:07 -0400277
278 debug("sys_clk %d\n ", sys_clk_khz * 1000);
279
280 /* Find Core DPLL locked frequency first */
281 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
282 (core_dpll_params->n + 1);
Aneesh V37768012011-07-21 09:10:07 -0400283
Sricharan2e5ba482011-11-15 09:49:58 -0500284 if (omap_rev < OMAP5430_ES1_0) {
285 /*
286 * DDR frequency is PHY_ROOT_CLK/2
287 * PHY_ROOT_CLK = Fdpll/2/M2
288 */
289 divider = 4;
290 } else {
291 /*
292 * DDR frequency is PHY_ROOT_CLK
293 * PHY_ROOT_CLK = Fdpll/2/M2
294 */
295 divider = 2;
296 }
297
298 ddr_clk = ddr_clk / divider / core_dpll_params->m2;
Aneesh V37768012011-07-21 09:10:07 -0400299 ddr_clk *= 1000; /* convert to Hz */
300 debug("ddr_clk %d\n ", ddr_clk);
301
302 return ddr_clk;
303}
304
Aneesh Vb4dc6442011-07-21 09:29:36 -0400305/*
306 * Lock MPU dpll
307 *
308 * Resulting MPU frequencies:
309 * 4430 ES1.0 : 600 MHz
310 * 4430 ES2.x : 792 MHz (OPP Turbo)
311 * 4460 : 920 MHz (OPP Turbo) - DCC disabled
312 */
313void configure_mpu_dpll(void)
314{
315 const struct dpll_params *params;
316 struct dpll_regs *mpu_dpll_regs;
Sricharan2e5ba482011-11-15 09:49:58 -0500317 u32 omap_rev;
318 omap_rev = omap_revision();
Aneesh Vb4dc6442011-07-21 09:29:36 -0400319
Sricharan2e5ba482011-11-15 09:49:58 -0500320 /*
321 * DCC and clock divider settings for 4460.
322 * DCC is required, if more than a certain frequency is required.
323 * For, 4460 > 1GHZ.
324 * 5430 > 1.4GHZ.
325 */
326 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
Aneesh Vb4dc6442011-07-21 09:29:36 -0400327 mpu_dpll_regs =
SRICHARAN R01b753f2013-02-04 04:22:00 +0000328 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
329 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
330 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
Aneesh Vb4dc6442011-07-21 09:29:36 -0400331 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000332 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
Aneesh Vb4dc6442011-07-21 09:29:36 -0400333 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
334 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
335 CM_CLKSEL_DCC_EN_MASK);
336 }
337
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000338 params = get_mpu_dpll_params(*dplls_data);
Sricharan78f455c2011-11-15 09:50:03 -0500339
SRICHARAN R01b753f2013-02-04 04:22:00 +0000340 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
Aneesh Vb4dc6442011-07-21 09:29:36 -0400341 debug("MPU DPLL locked\n");
342}
343
Paul Kocialkowski5e56b0a2016-02-27 19:19:01 +0100344#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
345 defined(CONFIG_USB_MUSB_OMAP2PLUS)
Govindraj.R860004c2012-02-06 03:55:36 +0000346static void setup_usb_dpll(void)
347{
348 const struct dpll_params *params;
349 u32 sys_clk_khz, sd_div, num, den;
350
351 sys_clk_khz = get_sys_clk_freq() / 1000;
352 /*
353 * USB:
354 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
355 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
356 * - where CLKINP is sys_clk in MHz
357 * Use CLKINP in KHz and adjust the denominator accordingly so
358 * that we have enough accuracy and at the same time no overflow
359 */
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000360 params = get_usb_dpll_params(*dplls_data);
Govindraj.R860004c2012-02-06 03:55:36 +0000361 num = params->m * sys_clk_khz;
362 den = (params->n + 1) * 250 * 1000;
363 num += den - 1;
364 sd_div = num / den;
SRICHARAN R01b753f2013-02-04 04:22:00 +0000365 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
Govindraj.R860004c2012-02-06 03:55:36 +0000366 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
367 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
368
369 /* Now setup the dpll with the regular function */
SRICHARAN R01b753f2013-02-04 04:22:00 +0000370 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
Govindraj.R860004c2012-02-06 03:55:36 +0000371}
372#endif
373
Aneesh V37768012011-07-21 09:10:07 -0400374static void setup_dplls(void)
375{
Anatolij Gustschin164a7502011-12-03 06:46:14 +0000376 u32 temp;
Aneesh V37768012011-07-21 09:10:07 -0400377 const struct dpll_params *params;
Tom Rini7c352cd2015-06-05 15:51:11 +0530378 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
Aneesh V37768012011-07-21 09:10:07 -0400379
Anatolij Gustschin164a7502011-12-03 06:46:14 +0000380 debug("setup_dplls\n");
Aneesh V37768012011-07-21 09:10:07 -0400381
382 /* CORE dpll */
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000383 params = get_core_dpll_params(*dplls_data); /* default - safest */
Aneesh V37768012011-07-21 09:10:07 -0400384 /*
385 * Do not lock the core DPLL now. Just set it up.
386 * Core DPLL will be locked after setting up EMIF
387 * using the FREQ_UPDATE method(freq_update_core())
388 */
Tom Rini7c352cd2015-06-05 15:51:11 +0530389 if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
390 EMIF_SDRAM_TYPE_LPDDR2)
SRICHARAN R01b753f2013-02-04 04:22:00 +0000391 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
Lokesh Vutla753bae82012-05-22 00:03:26 +0000392 DPLL_NO_LOCK, "core");
393 else
SRICHARAN R01b753f2013-02-04 04:22:00 +0000394 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
Lokesh Vutla753bae82012-05-22 00:03:26 +0000395 DPLL_LOCK, "core");
Aneesh V37768012011-07-21 09:10:07 -0400396 /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
397 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
398 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
399 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000400 writel(temp, (*prcm)->cm_clksel_core);
Aneesh V37768012011-07-21 09:10:07 -0400401 debug("Core DPLL configured\n");
402
403 /* lock PER dpll */
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000404 params = get_per_dpll_params(*dplls_data);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000405 do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
Sricharan78f455c2011-11-15 09:50:03 -0500406 params, DPLL_LOCK, "per");
Aneesh V37768012011-07-21 09:10:07 -0400407 debug("PER DPLL locked\n");
408
409 /* MPU dpll */
Aneesh Vb4dc6442011-07-21 09:29:36 -0400410 configure_mpu_dpll();
Govindraj.R860004c2012-02-06 03:55:36 +0000411
Paul Kocialkowski5e56b0a2016-02-27 19:19:01 +0100412#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
413 defined(CONFIG_USB_MUSB_OMAP2PLUS)
Govindraj.R860004c2012-02-06 03:55:36 +0000414 setup_usb_dpll();
415#endif
Lokesh Vutlaea8eff12013-02-12 21:29:05 +0000416 params = get_ddr_dpll_params(*dplls_data);
417 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
418 params, DPLL_LOCK, "ddr");
Lokesh Vutla65e9d562013-07-08 16:04:39 +0530419
420#ifdef CONFIG_DRIVER_TI_CPSW
421 params = get_gmac_dpll_params(*dplls_data);
422 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
423 DPLL_LOCK, "gmac");
424#endif
Aneesh V37768012011-07-21 09:10:07 -0400425}
426
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000427u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
Aneesh Vd5067192011-07-21 09:29:32 -0400428{
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000429 u32 offset_code;
Nishanth Menon3acb5532012-03-01 14:17:38 +0000430
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000431 volt_offset -= pmic->base_offset;
Nishanth Menon3acb5532012-03-01 14:17:38 +0000432
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000433 offset_code = (volt_offset + pmic->step - 1) / pmic->step;
Aneesh Vd5067192011-07-21 09:29:32 -0400434
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000435 /*
436 * Offset codes 1-6 all give the base voltage in Palmas
437 * Offset code 0 switches OFF the SMPS
438 */
439 return offset_code + pmic->start_code;
Aneesh Vd5067192011-07-21 09:29:32 -0400440}
441
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000442void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
Aneesh V37768012011-07-21 09:10:07 -0400443{
Nishanth Menona78274b2012-03-01 14:17:37 +0000444 u32 offset_code;
Aneesh V37768012011-07-21 09:10:07 -0400445 u32 offset = volt_mv;
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000446 int ret = 0;
447
Lokesh Vutla63fc0c72013-05-30 03:19:29 +0000448 if (!volt_mv)
449 return;
450
Lokesh Vutla4ca94d82013-05-30 02:54:33 +0000451 pmic->pmic_bus_init();
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000452 /* See if we can first get the GPIO if needed */
453 if (pmic->gpio_en)
454 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
455
456 if (ret < 0) {
457 printf("%s: gpio %d request failed %d\n", __func__,
458 pmic->gpio, ret);
459 return;
460 }
461
462 /* Pull the GPIO low to select SET0 register, while we program SET1 */
463 if (pmic->gpio_en)
464 gpio_direction_output(pmic->gpio, 0);
Lokesh Vutla53287172016-08-17 16:25:35 +0530465
Aneesh V37768012011-07-21 09:10:07 -0400466 /* convert to uV for better accuracy in the calculations */
467 offset *= 1000;
468
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000469 offset_code = get_offset_code(offset, pmic);
Aneesh V37768012011-07-21 09:10:07 -0400470
471 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
472 offset_code);
SRICHARAN R8de17f42012-03-12 02:25:38 +0000473
Lokesh Vutla4ca94d82013-05-30 02:54:33 +0000474 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
Aneesh V37768012011-07-21 09:10:07 -0400475 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000476 if (pmic->gpio_en)
477 gpio_direction_output(pmic->gpio, 1);
478}
479
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530480int __weak get_voltrail_opp(int rail_offset)
481{
482 /*
483 * By default return OPP_NOM for all voltage rails.
484 */
485 return OPP_NOM;
486}
487
488static u32 optimize_vcore_voltage(struct volts const *v, int opp)
Nishanth Menon18c9d552013-05-30 03:19:31 +0000489{
490 u32 val;
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530491
492 if (!v->value[opp])
Nishanth Menon18c9d552013-05-30 03:19:31 +0000493 return 0;
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530494 if (!v->efuse.reg[opp])
495 return v->value[opp];
Nishanth Menon18c9d552013-05-30 03:19:31 +0000496
497 switch (v->efuse.reg_bits) {
498 case 16:
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530499 val = readw(v->efuse.reg[opp]);
Nishanth Menon18c9d552013-05-30 03:19:31 +0000500 break;
501 case 32:
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530502 val = readl(v->efuse.reg[opp]);
Nishanth Menon18c9d552013-05-30 03:19:31 +0000503 break;
504 default:
505 printf("Error: efuse 0x%08x bits=%d unknown\n",
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530506 v->efuse.reg[opp], v->efuse.reg_bits);
507 return v->value[opp];
Nishanth Menon18c9d552013-05-30 03:19:31 +0000508 }
509
510 if (!val) {
511 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530512 v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
513 return v->value[opp];
Nishanth Menon18c9d552013-05-30 03:19:31 +0000514 }
515
516 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530517 __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
518 val);
Nishanth Menon18c9d552013-05-30 03:19:31 +0000519 return val;
520}
521
Lokesh Vutlaeda6fbc2015-06-04 16:42:36 +0530522#ifdef CONFIG_IODELAY_RECALIBRATION
523void __weak recalibrate_iodelay(void)
524{
525}
526#endif
527
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000528/*
Lubomir Popovb558af82014-12-19 17:34:31 +0200529 * Setup the voltages for the main SoC core power domains.
530 * We start with the maximum voltages allowed here, as set in the corresponding
531 * vcores_data struct, and then scale (usually down) to the fused values that
532 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
533 * are initialised.
534 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
535 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
536 * values back to the vcores_data struct for eventual reuse. Zero values mean
537 * that the corresponding rails are not controlled separately, and are not sent
538 * to the PMIC.
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000539 */
540void scale_vcores(struct vcores_data const *vcores)
541{
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530542 int i, opp, j, ol;
Lubomir Popovb558af82014-12-19 17:34:31 +0200543 struct volts *pv = (struct volts *)vcores;
544 struct volts *px;
545
546 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530547 opp = get_voltrail_opp(i);
548 debug("%d -> ", pv->value[opp]);
549
550 if (pv->value[opp]) {
Lubomir Popovb558af82014-12-19 17:34:31 +0200551 /* Handle non-empty members only */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530552 pv->value[opp] = optimize_vcore_voltage(pv, opp);
Lubomir Popovb558af82014-12-19 17:34:31 +0200553 px = (struct volts *)vcores;
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530554 j = 0;
Lubomir Popovb558af82014-12-19 17:34:31 +0200555 while (px < pv) {
556 /*
557 * Scan already handled non-empty members to see
558 * if we have a group and find the max voltage,
559 * which is set to the first occurance of the
560 * particular SMPS; the other group voltages are
561 * zeroed.
562 */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530563 ol = get_voltrail_opp(j);
564 if (px->value[ol] &&
565 (pv->pmic->i2c_slave_addr ==
566 px->pmic->i2c_slave_addr) &&
567 (pv->addr == px->addr)) {
568 /* Same PMIC, same SMPS */
569 if (pv->value[opp] > px->value[ol])
570 px->value[ol] = pv->value[opp];
Lubomir Popovb558af82014-12-19 17:34:31 +0200571
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530572 pv->value[opp] = 0;
573 }
Lubomir Popovb558af82014-12-19 17:34:31 +0200574 px++;
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530575 j++;
Lubomir Popovb558af82014-12-19 17:34:31 +0200576 }
577 }
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530578 debug("%d\n", pv->value[opp]);
Lubomir Popovb558af82014-12-19 17:34:31 +0200579 pv++;
580 }
581
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530582 opp = get_voltrail_opp(VOLT_CORE);
583 debug("cor: %d\n", vcores->core.value[opp]);
584 do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
585 vcores->core.pmic);
Lokesh Vutlaeda6fbc2015-06-04 16:42:36 +0530586 /*
587 * IO delay recalibration should be done immediately after
588 * adjusting AVS voltages for VDD_CORE_L.
589 * Respective boards should call __recalibrate_iodelay()
590 * with proper mux, virtual and manual mode configurations.
591 */
592#ifdef CONFIG_IODELAY_RECALIBRATION
593 recalibrate_iodelay();
594#endif
595
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530596 opp = get_voltrail_opp(VOLT_MPU);
597 debug("mpu: %d\n", vcores->mpu.value[opp]);
598 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
599 vcores->mpu.pmic);
Lubomir Popovb558af82014-12-19 17:34:31 +0200600 /* Configure MPU ABB LDO after scale */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530601 abb_setup(vcores->mpu.efuse.reg[opp],
Lubomir Popovb558af82014-12-19 17:34:31 +0200602 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
603 (*prcm)->prm_abbldo_mpu_setup,
604 (*prcm)->prm_abbldo_mpu_ctrl,
605 (*prcm)->prm_irqstatus_mpu_2,
Nishanth Menon3708e782016-04-21 14:34:23 -0500606 vcores->mpu.abb_tx_done_mask,
Lubomir Popovb558af82014-12-19 17:34:31 +0200607 OMAP_ABB_FAST_OPP);
608
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530609 opp = get_voltrail_opp(VOLT_MM);
610 debug("mm: %d\n", vcores->mm.value[opp]);
611 do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
612 vcores->mm.pmic);
Lokesh Vutlac359ae52016-08-17 16:25:36 +0530613 /* Configure MM ABB LDO after scale */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530614 abb_setup(vcores->mm.efuse.reg[opp],
Lokesh Vutlac359ae52016-08-17 16:25:36 +0530615 (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
616 (*prcm)->prm_abbldo_mm_setup,
617 (*prcm)->prm_abbldo_mm_ctrl,
618 (*prcm)->prm_irqstatus_mpu,
619 vcores->mm.abb_tx_done_mask,
620 OMAP_ABB_FAST_OPP);
Lubomir Popovb558af82014-12-19 17:34:31 +0200621
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530622 opp = get_voltrail_opp(VOLT_GPU);
623 debug("gpu: %d\n", vcores->gpu.value[opp]);
624 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
625 vcores->gpu.pmic);
Nishanth Menone52e3342016-04-21 14:34:25 -0500626 /* Configure GPU ABB LDO after scale */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530627 abb_setup(vcores->gpu.efuse.reg[opp],
Nishanth Menone52e3342016-04-21 14:34:25 -0500628 (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
629 (*prcm)->prm_abbldo_gpu_setup,
630 (*prcm)->prm_abbldo_gpu_ctrl,
631 (*prcm)->prm_irqstatus_mpu,
632 vcores->gpu.abb_tx_done_mask,
633 OMAP_ABB_FAST_OPP);
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530634
635 opp = get_voltrail_opp(VOLT_EVE);
636 debug("eve: %d\n", vcores->eve.value[opp]);
637 do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
638 vcores->eve.pmic);
Nishanth Menone52e3342016-04-21 14:34:25 -0500639 /* Configure EVE ABB LDO after scale */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530640 abb_setup(vcores->eve.efuse.reg[opp],
Nishanth Menone52e3342016-04-21 14:34:25 -0500641 (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
642 (*prcm)->prm_abbldo_eve_setup,
643 (*prcm)->prm_abbldo_eve_ctrl,
644 (*prcm)->prm_irqstatus_mpu,
645 vcores->eve.abb_tx_done_mask,
646 OMAP_ABB_FAST_OPP);
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530647
648 opp = get_voltrail_opp(VOLT_IVA);
649 debug("iva: %d\n", vcores->iva.value[opp]);
650 do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
651 vcores->iva.pmic);
Nishanth Menone52e3342016-04-21 14:34:25 -0500652 /* Configure IVA ABB LDO after scale */
Lokesh Vutlabeb71272016-11-23 12:54:39 +0530653 abb_setup(vcores->iva.efuse.reg[opp],
Nishanth Menone52e3342016-04-21 14:34:25 -0500654 (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
655 (*prcm)->prm_abbldo_iva_setup,
656 (*prcm)->prm_abbldo_iva_ctrl,
657 (*prcm)->prm_irqstatus_mpu,
658 vcores->iva.abb_tx_done_mask,
659 OMAP_ABB_FAST_OPP);
Aneesh V37768012011-07-21 09:10:07 -0400660}
661
SRICHARAN R01b753f2013-02-04 04:22:00 +0000662static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
Aneesh V37768012011-07-21 09:10:07 -0400663{
664 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
665 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000666 debug("Enable clock domain - %x\n", clkctrl_reg);
Aneesh V37768012011-07-21 09:10:07 -0400667}
668
Kishon Vijay Abraham I16ca1d02015-08-17 13:29:51 +0530669static inline void disable_clock_domain(u32 const clkctrl_reg)
670{
671 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
672 CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
673 CD_CLKCTRL_CLKTRCTRL_SHIFT);
674 debug("Disable clock domain - %x\n", clkctrl_reg);
675}
676
SRICHARAN R01b753f2013-02-04 04:22:00 +0000677static inline void wait_for_clk_enable(u32 clkctrl_addr)
Aneesh V37768012011-07-21 09:10:07 -0400678{
679 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
680 u32 bound = LDELAY;
681
682 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
683 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
684
685 clkctrl = readl(clkctrl_addr);
686 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
687 MODULE_CLKCTRL_IDLEST_SHIFT;
688 if (--bound == 0) {
SRICHARAN R01b753f2013-02-04 04:22:00 +0000689 printf("Clock enable failed for 0x%x idlest 0x%x\n",
Aneesh V37768012011-07-21 09:10:07 -0400690 clkctrl_addr, clkctrl);
691 return;
692 }
693 }
694}
695
SRICHARAN R01b753f2013-02-04 04:22:00 +0000696static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
Aneesh V37768012011-07-21 09:10:07 -0400697 u32 wait_for_enable)
698{
699 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
700 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000701 debug("Enable clock module - %x\n", clkctrl_addr);
Aneesh V37768012011-07-21 09:10:07 -0400702 if (wait_for_enable)
703 wait_for_clk_enable(clkctrl_addr);
704}
705
Kishon Vijay Abraham I16ca1d02015-08-17 13:29:51 +0530706static inline void wait_for_clk_disable(u32 clkctrl_addr)
707{
708 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
709 u32 bound = LDELAY;
710
711 while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
712 clkctrl = readl(clkctrl_addr);
713 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
714 MODULE_CLKCTRL_IDLEST_SHIFT;
715 if (--bound == 0) {
716 printf("Clock disable failed for 0x%x idlest 0x%x\n",
717 clkctrl_addr, clkctrl);
718 return;
719 }
720 }
721}
722
723static inline void disable_clock_module(u32 const clkctrl_addr,
724 u32 wait_for_disable)
725{
726 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
727 MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
728 MODULE_CLKCTRL_MODULEMODE_SHIFT);
729 debug("Disable clock module - %x\n", clkctrl_addr);
730 if (wait_for_disable)
731 wait_for_clk_disable(clkctrl_addr);
732}
733
Aneesh V37768012011-07-21 09:10:07 -0400734void freq_update_core(void)
735{
736 u32 freq_config1 = 0;
737 const struct dpll_params *core_dpll_params;
SRICHARAN Rf4010732012-03-12 02:25:37 +0000738 u32 omap_rev = omap_revision();
Aneesh V37768012011-07-21 09:10:07 -0400739
SRICHARAN Ree9447b2013-02-04 04:22:01 +0000740 core_dpll_params = get_core_dpll_params(*dplls_data);
Aneesh V37768012011-07-21 09:10:07 -0400741 /* Put EMIF clock domain in sw wakeup mode */
SRICHARAN R01b753f2013-02-04 04:22:00 +0000742 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
Aneesh V37768012011-07-21 09:10:07 -0400743 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000744 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
745 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
Aneesh V37768012011-07-21 09:10:07 -0400746
747 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
748 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
749
750 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
751 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
752
753 freq_config1 |= (core_dpll_params->m2 <<
754 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
755 SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
756
SRICHARAN R01b753f2013-02-04 04:22:00 +0000757 writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
Aneesh V37768012011-07-21 09:10:07 -0400758 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
SRICHARAN R01b753f2013-02-04 04:22:00 +0000759 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
Aneesh V37768012011-07-21 09:10:07 -0400760 puts("FREQ UPDATE procedure failed!!");
761 hang();
762 }
763
SRICHARAN Rf4010732012-03-12 02:25:37 +0000764 /*
765 * Putting EMIF in HW_AUTO is seen to be causing issues with
Lubomir Popova8f408a2013-04-04 05:51:45 +0000766 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
SRICHARAN Rf4010732012-03-12 02:25:37 +0000767 * in OMAP5430 ES1.0 silicon
768 */
769 if (omap_rev != OMAP5430_ES1_0) {
770 /* Put EMIF clock domain back in hw auto mode */
SRICHARAN R01b753f2013-02-04 04:22:00 +0000771 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
SRICHARAN Rf4010732012-03-12 02:25:37 +0000772 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
SRICHARAN R01b753f2013-02-04 04:22:00 +0000773 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
774 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
SRICHARAN Rf4010732012-03-12 02:25:37 +0000775 }
Aneesh V37768012011-07-21 09:10:07 -0400776}
777
SRICHARAN R01b753f2013-02-04 04:22:00 +0000778void bypass_dpll(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400779{
780 do_bypass_dpll(base);
781 wait_for_bypass(base);
782}
783
SRICHARAN R01b753f2013-02-04 04:22:00 +0000784void lock_dpll(u32 const base)
Aneesh V37768012011-07-21 09:10:07 -0400785{
786 do_lock_dpll(base);
787 wait_for_lock(base);
788}
789
Kipisz, Steven93e62532016-02-24 12:30:52 -0600790static void setup_clocks_for_console(void)
Aneesh Vbcae7212011-07-21 09:10:21 -0400791{
792 /* Do not add any spl_debug prints in this function */
SRICHARAN R01b753f2013-02-04 04:22:00 +0000793 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
Aneesh Vbcae7212011-07-21 09:10:21 -0400794 CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
795 CD_CLKCTRL_CLKTRCTRL_SHIFT);
796
797 /* Enable all UARTs - console will be on one of them */
SRICHARAN R01b753f2013-02-04 04:22:00 +0000798 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
Aneesh Vbcae7212011-07-21 09:10:21 -0400799 MODULE_CLKCTRL_MODULEMODE_MASK,
800 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
801 MODULE_CLKCTRL_MODULEMODE_SHIFT);
802
SRICHARAN R01b753f2013-02-04 04:22:00 +0000803 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
Aneesh Vbcae7212011-07-21 09:10:21 -0400804 MODULE_CLKCTRL_MODULEMODE_MASK,
805 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
806 MODULE_CLKCTRL_MODULEMODE_SHIFT);
807
SRICHARAN R01b753f2013-02-04 04:22:00 +0000808 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
Aneesh Vbcae7212011-07-21 09:10:21 -0400809 MODULE_CLKCTRL_MODULEMODE_MASK,
810 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
811 MODULE_CLKCTRL_MODULEMODE_SHIFT);
812
Lubomir Popova8f408a2013-04-04 05:51:45 +0000813 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
Aneesh Vbcae7212011-07-21 09:10:21 -0400814 MODULE_CLKCTRL_MODULEMODE_MASK,
815 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
816 MODULE_CLKCTRL_MODULEMODE_SHIFT);
817
SRICHARAN R01b753f2013-02-04 04:22:00 +0000818 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
Aneesh Vbcae7212011-07-21 09:10:21 -0400819 CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
820 CD_CLKCTRL_CLKTRCTRL_SHIFT);
821}
822
SRICHARAN R01b753f2013-02-04 04:22:00 +0000823void do_enable_clocks(u32 const *clk_domains,
824 u32 const *clk_modules_hw_auto,
825 u32 const *clk_modules_explicit_en,
Sricharan2e5ba482011-11-15 09:49:58 -0500826 u8 wait_for_enable)
827{
828 u32 i, max = 100;
829
830 /* Put the clock domains in SW_WKUP mode */
831 for (i = 0; (i < max) && clk_domains[i]; i++) {
832 enable_clock_domain(clk_domains[i],
833 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
834 }
835
836 /* Clock modules that need to be put in HW_AUTO */
837 for (i = 0; (i < max) && clk_modules_hw_auto[i]; i++) {
838 enable_clock_module(clk_modules_hw_auto[i],
839 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
840 wait_for_enable);
841 };
842
843 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
844 for (i = 0; (i < max) && clk_modules_explicit_en[i]; i++) {
845 enable_clock_module(clk_modules_explicit_en[i],
846 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
847 wait_for_enable);
848 };
849
850 /* Put the clock domains in HW_AUTO mode now */
851 for (i = 0; (i < max) && clk_domains[i]; i++) {
852 enable_clock_domain(clk_domains[i],
853 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
854 }
855}
856
Kishon Vijay Abraham I16ca1d02015-08-17 13:29:51 +0530857void do_disable_clocks(u32 const *clk_domains,
858 u32 const *clk_modules_disable,
859 u8 wait_for_disable)
860{
861 u32 i, max = 100;
862
863
864 /* Clock modules that need to be put in SW_DISABLE */
865 for (i = 0; (i < max) && clk_modules_disable[i]; i++)
866 disable_clock_module(clk_modules_disable[i],
867 wait_for_disable);
868
869 /* Put the clock domains in SW_SLEEP mode */
870 for (i = 0; (i < max) && clk_domains[i]; i++)
871 disable_clock_domain(clk_domains[i]);
872}
873
Kipisz, Steven93e62532016-02-24 12:30:52 -0600874/**
875 * setup_early_clocks() - Setup early clocks needed for SoC
876 *
877 * Setup clocks for console, SPL basic initialization clocks and initialize
878 * the timer. This is invoked prior prcm_init.
879 */
880void setup_early_clocks(void)
881{
882 switch (omap_hw_init_context()) {
883 case OMAP_INIT_CONTEXT_SPL:
884 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
885 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
886 setup_clocks_for_console();
887 enable_basic_clocks();
888 timer_init();
889 /* Fall through */
890 }
891}
892
Aneesh V37768012011-07-21 09:10:07 -0400893void prcm_init(void)
894{
Sricharan508a58f2011-11-15 09:49:55 -0500895 switch (omap_hw_init_context()) {
Aneesh V37768012011-07-21 09:10:07 -0400896 case OMAP_INIT_CONTEXT_SPL:
897 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
898 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
SRICHARAN R3fcdd4a2013-02-04 04:22:02 +0000899 scale_vcores(*omap_vcores);
Aneesh V37768012011-07-21 09:10:07 -0400900 setup_dplls();
Lokesh Vutla0b1b60c2013-04-17 20:49:40 +0000901 setup_warmreset_time();
Aneesh V37768012011-07-21 09:10:07 -0400902 break;
903 default:
904 break;
905 }
Sricharan78f455c2011-11-15 09:50:03 -0500906
907 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
908 enable_basic_uboot_clocks();
Aneesh V37768012011-07-21 09:10:07 -0400909}
Lokesh Vutla63fc0c72013-05-30 03:19:29 +0000910
911void gpi2c_init(void)
912{
913 static int gpi2c = 1;
914
915 if (gpi2c) {
Heiko Schocher6789e842013-10-22 11:03:18 +0200916 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
917 CONFIG_SYS_OMAP24_I2C_SLAVE);
Lokesh Vutla63fc0c72013-05-30 03:19:29 +0000918 gpi2c = 0;
919 }
920}