Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2013 |
| 4 | * NVIDIA Corporation <www.nvidia.com> |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <common.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 8 | #include <log.h> |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 9 | #include <asm/io.h> |
| 10 | #include <asm/arch/ahb.h> |
| 11 | #include <asm/arch/clock.h> |
| 12 | #include <asm/arch/flow.h> |
| 13 | #include <asm/arch/pinmux.h> |
| 14 | #include <asm/arch/tegra.h> |
| 15 | #include <asm/arch-tegra/clk_rst.h> |
| 16 | #include <asm/arch-tegra/pmc.h> |
| 17 | #include <asm/arch-tegra/ap.h> |
Masahiro Yamada | 09f455d | 2015-02-20 17:04:04 +0900 | [diff] [blame] | 18 | #include "../cpu.h" |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 19 | |
| 20 | /* Tegra124-specific CPU init code */ |
| 21 | |
| 22 | static void enable_cpu_power_rail(void) |
| 23 | { |
| 24 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 25 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 26 | debug("%s entry\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 27 | |
| 28 | /* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */ |
Stephen Warren | d68c942 | 2014-03-21 12:29:01 -0600 | [diff] [blame] | 29 | pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SCL_PZ6); |
| 30 | pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SDA_PZ7); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 31 | |
| 32 | pmic_enable_cpu_vdd(); |
| 33 | |
| 34 | /* |
| 35 | * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz), |
| 36 | * set it for 5ms as per SysEng (102MHz*5ms = 510000 (7C830h). |
| 37 | */ |
| 38 | writel(0x7C830, &pmc->pmc_cpupwrgood_timer); |
| 39 | |
| 40 | /* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */ |
| 41 | clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL); |
| 42 | setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE); |
| 43 | } |
| 44 | |
| 45 | static void enable_cpu_clocks(void) |
| 46 | { |
| 47 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 48 | struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU]; |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 49 | u32 reg; |
| 50 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 51 | debug("%s entry\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 52 | |
| 53 | /* Wait for PLL-X to lock */ |
| 54 | do { |
| 55 | reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base); |
| 56 | debug("%s: PLLX base = 0x%08X\n", __func__, reg); |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 57 | } while ((reg & (1 << pllinfo->lock_det)) == 0); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 58 | |
| 59 | debug("%s: PLLX locked, delay for stable clocks\n", __func__); |
| 60 | /* Wait until all clocks are stable */ |
| 61 | udelay(PLL_STABILIZATION_DELAY); |
| 62 | |
| 63 | debug("%s: Setting CCLK_BURST and DIVIDER\n", __func__); |
| 64 | writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol); |
| 65 | writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div); |
| 66 | |
| 67 | debug("%s: Enabling clock to all CPUs\n", __func__); |
| 68 | /* Enable the clock to all CPUs */ |
| 69 | reg = CLR_CPU3_CLK_STP | CLR_CPU2_CLK_STP | CLR_CPU1_CLK_STP | |
| 70 | CLR_CPU0_CLK_STP; |
| 71 | writel(reg, &clkrst->crc_clk_cpu_cmplx_clr); |
| 72 | |
| 73 | debug("%s: Enabling main CPU complex clocks\n", __func__); |
| 74 | /* Always enable the main CPU complex clocks */ |
| 75 | clock_enable(PERIPH_ID_CPU); |
| 76 | clock_enable(PERIPH_ID_CPULP); |
| 77 | clock_enable(PERIPH_ID_CPUG); |
| 78 | |
| 79 | debug("%s: Done\n", __func__); |
| 80 | } |
| 81 | |
| 82 | static void remove_cpu_resets(void) |
| 83 | { |
| 84 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 85 | u32 reg; |
| 86 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 87 | debug("%s entry\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 88 | |
| 89 | /* Take the slow and fast partitions out of reset */ |
| 90 | reg = CLR_NONCPURESET; |
| 91 | writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr); |
| 92 | writel(reg, &clkrst->crc_rst_cpug_cmplx_clr); |
| 93 | |
| 94 | /* Clear the SW-controlled reset of the slow cluster */ |
| 95 | reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 | |
| 96 | CLR_L2RESET | CLR_PRESETDBG; |
| 97 | writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr); |
| 98 | |
| 99 | /* Clear the SW-controlled reset of the fast cluster */ |
| 100 | reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 | |
| 101 | CLR_CPURESET1 | CLR_DBGRESET1 | CLR_CORERESET1 | CLR_CXRESET1 | |
| 102 | CLR_CPURESET2 | CLR_DBGRESET2 | CLR_CORERESET2 | CLR_CXRESET2 | |
| 103 | CLR_CPURESET3 | CLR_DBGRESET3 | CLR_CORERESET3 | CLR_CXRESET3 | |
| 104 | CLR_L2RESET | CLR_PRESETDBG; |
| 105 | writel(reg, &clkrst->crc_rst_cpug_cmplx_clr); |
| 106 | } |
| 107 | |
Bibek Basu | 3cc7942 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 108 | static void tegra124_ram_repair(void) |
| 109 | { |
| 110 | struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE; |
| 111 | u32 ram_repair_timeout; /*usec*/ |
| 112 | u32 val; |
| 113 | |
| 114 | /* |
| 115 | * Request the Flow Controller perform RAM repair whenever it turns on |
| 116 | * a power rail that requires RAM repair. |
| 117 | */ |
| 118 | clrbits_le32(&flow->ram_repair, RAM_REPAIR_BYPASS_EN); |
| 119 | |
| 120 | /* Request SW trigerred RAM repair by setting req bit */ |
| 121 | /* cluster 0 */ |
| 122 | setbits_le32(&flow->ram_repair, RAM_REPAIR_REQ); |
| 123 | /* Wait for completion (status == 0) */ |
| 124 | ram_repair_timeout = 500; |
| 125 | do { |
| 126 | udelay(1); |
| 127 | val = readl(&flow->ram_repair); |
| 128 | } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--); |
| 129 | if (!ram_repair_timeout) |
| 130 | debug("Ram Repair cluster0 failed\n"); |
| 131 | |
| 132 | /* cluster 1 */ |
| 133 | setbits_le32(&flow->ram_repair_cluster1, RAM_REPAIR_REQ); |
| 134 | /* Wait for completion (status == 0) */ |
| 135 | ram_repair_timeout = 500; |
| 136 | do { |
| 137 | udelay(1); |
| 138 | val = readl(&flow->ram_repair_cluster1); |
| 139 | } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--); |
| 140 | |
| 141 | if (!ram_repair_timeout) |
| 142 | debug("Ram Repair cluster1 failed\n"); |
| 143 | } |
| 144 | |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 145 | /** |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 146 | * Tegra124 requires some special clock initialization, including setting up |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 147 | * the DVC I2C, turning on MSELECT and selecting the G CPU cluster |
| 148 | */ |
| 149 | void tegra124_init_clocks(void) |
| 150 | { |
| 151 | struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE; |
| 152 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 153 | struct clk_rst_ctlr *clkrst = |
| 154 | (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 155 | u32 val; |
| 156 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 157 | debug("%s entry\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 158 | |
| 159 | /* Set active CPU cluster to G */ |
| 160 | clrbits_le32(&flow->cluster_control, 1); |
| 161 | |
| 162 | /* Change the oscillator drive strength */ |
| 163 | val = readl(&clkrst->crc_osc_ctrl); |
| 164 | val &= ~OSC_XOFS_MASK; |
| 165 | val |= (OSC_DRIVE_STRENGTH << OSC_XOFS_SHIFT); |
| 166 | writel(val, &clkrst->crc_osc_ctrl); |
| 167 | |
| 168 | /* Update same value in PMC_OSC_EDPD_OVER XOFS field for warmboot */ |
| 169 | val = readl(&pmc->pmc_osc_edpd_over); |
| 170 | val &= ~PMC_XOFS_MASK; |
| 171 | val |= (OSC_DRIVE_STRENGTH << PMC_XOFS_SHIFT); |
| 172 | writel(val, &pmc->pmc_osc_edpd_over); |
| 173 | |
| 174 | /* Set HOLD_CKE_LOW_EN to 1 */ |
| 175 | setbits_le32(&pmc->pmc_cntrl2, HOLD_CKE_LOW_EN); |
| 176 | |
| 177 | debug("Setting up PLLX\n"); |
| 178 | init_pllx(); |
| 179 | |
| 180 | val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT); |
| 181 | writel(val, &clkrst->crc_clk_sys_rate); |
| 182 | |
| 183 | /* Enable clocks to required peripherals. TBD - minimize this list */ |
| 184 | debug("Enabling clocks\n"); |
| 185 | |
| 186 | clock_set_enable(PERIPH_ID_CACHE2, 1); |
| 187 | clock_set_enable(PERIPH_ID_GPIO, 1); |
| 188 | clock_set_enable(PERIPH_ID_TMR, 1); |
| 189 | clock_set_enable(PERIPH_ID_CPU, 1); |
| 190 | clock_set_enable(PERIPH_ID_EMC, 1); |
| 191 | clock_set_enable(PERIPH_ID_I2C5, 1); |
| 192 | clock_set_enable(PERIPH_ID_APBDMA, 1); |
| 193 | clock_set_enable(PERIPH_ID_MEM, 1); |
| 194 | clock_set_enable(PERIPH_ID_CORESIGHT, 1); |
| 195 | clock_set_enable(PERIPH_ID_MSELECT, 1); |
| 196 | clock_set_enable(PERIPH_ID_DVFS, 1); |
| 197 | |
| 198 | /* |
| 199 | * Set MSELECT clock source as PLLP (00), and ask for a clock |
| 200 | * divider that would set the MSELECT clock at 102MHz for a |
| 201 | * PLLP base of 408MHz. |
| 202 | */ |
| 203 | clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0, |
| 204 | CLK_DIVIDER(NVBL_PLLP_KHZ, 102000)); |
| 205 | |
| 206 | /* Give clock time to stabilize */ |
| 207 | udelay(IO_STABILIZATION_DELAY); |
| 208 | |
| 209 | /* I2C5 (DVC) gets CLK_M and a divisor of 17 */ |
| 210 | clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16); |
| 211 | |
| 212 | /* Give clock time to stabilize */ |
| 213 | udelay(IO_STABILIZATION_DELAY); |
| 214 | |
| 215 | /* Take required peripherals out of reset */ |
| 216 | debug("Taking periphs out of reset\n"); |
| 217 | reset_set_enable(PERIPH_ID_CACHE2, 0); |
| 218 | reset_set_enable(PERIPH_ID_GPIO, 0); |
| 219 | reset_set_enable(PERIPH_ID_TMR, 0); |
| 220 | reset_set_enable(PERIPH_ID_COP, 0); |
| 221 | reset_set_enable(PERIPH_ID_EMC, 0); |
| 222 | reset_set_enable(PERIPH_ID_I2C5, 0); |
| 223 | reset_set_enable(PERIPH_ID_APBDMA, 0); |
| 224 | reset_set_enable(PERIPH_ID_MEM, 0); |
| 225 | reset_set_enable(PERIPH_ID_CORESIGHT, 0); |
| 226 | reset_set_enable(PERIPH_ID_MSELECT, 0); |
| 227 | reset_set_enable(PERIPH_ID_DVFS, 0); |
| 228 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 229 | debug("%s exit\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | static bool is_partition_powered(u32 partid) |
| 233 | { |
| 234 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 235 | u32 reg; |
| 236 | |
| 237 | /* Get power gate status */ |
| 238 | reg = readl(&pmc->pmc_pwrgate_status); |
| 239 | return !!(reg & (1 << partid)); |
| 240 | } |
| 241 | |
Dominik Sliwa | 5a20adf | 2019-08-01 11:06:39 +0300 | [diff] [blame] | 242 | static void unpower_partition(u32 partid) |
| 243 | { |
| 244 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 245 | |
| 246 | debug("%s: part ID = %08X\n", __func__, partid); |
| 247 | /* Is the partition on? */ |
| 248 | if (is_partition_powered(partid)) { |
| 249 | /* Yes, toggle the partition power state (ON -> OFF) */ |
| 250 | debug("power_partition, toggling state\n"); |
| 251 | writel(START_CP | partid, &pmc->pmc_pwrgate_toggle); |
| 252 | |
| 253 | /* Wait for the power to come down */ |
| 254 | while (is_partition_powered(partid)) |
| 255 | ; |
| 256 | |
| 257 | /* Give I/O signals time to stabilize */ |
| 258 | udelay(IO_STABILIZATION_DELAY); |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | void unpower_cpus(void) |
| 263 | { |
| 264 | debug("%s entry: G cluster\n", __func__); |
| 265 | |
| 266 | /* Power down the fast cluster rail partition */ |
| 267 | debug("%s: CRAIL\n", __func__); |
| 268 | unpower_partition(CRAIL); |
| 269 | |
| 270 | /* Power down the fast cluster non-CPU partition */ |
| 271 | debug("%s: C0NC\n", __func__); |
| 272 | unpower_partition(C0NC); |
| 273 | |
| 274 | /* Power down the fast cluster CPU0 partition */ |
| 275 | debug("%s: CE0\n", __func__); |
| 276 | unpower_partition(CE0); |
| 277 | |
| 278 | debug("%s: done\n", __func__); |
| 279 | } |
| 280 | |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 281 | static void power_partition(u32 partid) |
| 282 | { |
| 283 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 284 | |
| 285 | debug("%s: part ID = %08X\n", __func__, partid); |
| 286 | /* Is the partition already on? */ |
| 287 | if (!is_partition_powered(partid)) { |
| 288 | /* No, toggle the partition power state (OFF -> ON) */ |
| 289 | debug("power_partition, toggling state\n"); |
| 290 | writel(START_CP | partid, &pmc->pmc_pwrgate_toggle); |
| 291 | |
| 292 | /* Wait for the power to come up */ |
| 293 | while (!is_partition_powered(partid)) |
| 294 | ; |
| 295 | |
| 296 | /* Give I/O signals time to stabilize */ |
| 297 | udelay(IO_STABILIZATION_DELAY); |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | void powerup_cpus(void) |
| 302 | { |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 303 | /* We boot to the fast cluster */ |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 304 | debug("%s entry: G cluster\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 305 | |
| 306 | /* Power up the fast cluster rail partition */ |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 307 | debug("%s: CRAIL\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 308 | power_partition(CRAIL); |
| 309 | |
| 310 | /* Power up the fast cluster non-CPU partition */ |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 311 | debug("%s: C0NC\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 312 | power_partition(C0NC); |
| 313 | |
| 314 | /* Power up the fast cluster CPU0 partition */ |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 315 | debug("%s: CE0\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 316 | power_partition(CE0); |
| 317 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 318 | debug("%s: done\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | void start_cpu(u32 reset_vector) |
| 322 | { |
| 323 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 324 | |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 325 | debug("%s entry, reset_vector = %x\n", __func__, reset_vector); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 326 | |
Dominik Sliwa | 5a20adf | 2019-08-01 11:06:39 +0300 | [diff] [blame] | 327 | /* |
| 328 | * High power clusters are on after software reset, |
| 329 | * it may interfere with tegra124_ram_repair. |
| 330 | * unpower them. |
| 331 | */ |
| 332 | unpower_cpus(); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 333 | tegra124_init_clocks(); |
| 334 | |
| 335 | /* Set power-gating timer multiplier */ |
Stephen Warren | f3026c1 | 2014-02-03 14:03:25 -0700 | [diff] [blame] | 336 | writel((MULT_8 << TIMER_MULT_SHIFT) | (MULT_8 << TIMER_MULT_CPU_SHIFT), |
| 337 | &pmc->pmc_pwrgate_timer_mult); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 338 | |
| 339 | enable_cpu_power_rail(); |
Bibek Basu | 3cc7942 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 340 | powerup_cpus(); |
| 341 | tegra124_ram_repair(); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 342 | enable_cpu_clocks(); |
| 343 | clock_enable_coresight(1); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 344 | writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR); |
Bibek Basu | 3cc7942 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 345 | remove_cpu_resets(); |
Tom Warren | 722e000 | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 346 | debug("%s exit, should continue @ reset_vector\n", __func__); |
Tom Warren | 32edd2e | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 347 | } |