Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 2 | /* |
Shinya Kuribayashi | 373b16f | 2008-03-25 21:30:07 +0900 | [diff] [blame] | 3 | * Cache-handling routined for MIPS CPUs |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2003 Wolfgang Denk <wd@denx.de> |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
Wolfgang Denk | 25ddd1f | 2010-10-26 14:34:52 +0200 | [diff] [blame] | 8 | #include <asm-offsets.h> |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 9 | #include <config.h> |
Shinya Kuribayashi | 2f5d414 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 10 | #include <asm/asm.h> |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 11 | #include <asm/regdef.h> |
| 12 | #include <asm/mipsregs.h> |
| 13 | #include <asm/addrspace.h> |
| 14 | #include <asm/cacheops.h> |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 15 | #include <asm/cm.h> |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 16 | |
Daniel Schwierzeck | 979cfea | 2012-04-02 02:57:55 +0000 | [diff] [blame] | 17 | #ifndef CONFIG_SYS_MIPS_CACHE_MODE |
| 18 | #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT |
| 19 | #endif |
| 20 | |
Shinya Kuribayashi | 1898840 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 21 | .macro f_fill64 dst, offset, val |
| 22 | LONG_S \val, (\offset + 0 * LONGSIZE)(\dst) |
| 23 | LONG_S \val, (\offset + 1 * LONGSIZE)(\dst) |
| 24 | LONG_S \val, (\offset + 2 * LONGSIZE)(\dst) |
| 25 | LONG_S \val, (\offset + 3 * LONGSIZE)(\dst) |
| 26 | LONG_S \val, (\offset + 4 * LONGSIZE)(\dst) |
| 27 | LONG_S \val, (\offset + 5 * LONGSIZE)(\dst) |
| 28 | LONG_S \val, (\offset + 6 * LONGSIZE)(\dst) |
| 29 | LONG_S \val, (\offset + 7 * LONGSIZE)(\dst) |
| 30 | #if LONGSIZE == 4 |
| 31 | LONG_S \val, (\offset + 8 * LONGSIZE)(\dst) |
| 32 | LONG_S \val, (\offset + 9 * LONGSIZE)(\dst) |
| 33 | LONG_S \val, (\offset + 10 * LONGSIZE)(\dst) |
| 34 | LONG_S \val, (\offset + 11 * LONGSIZE)(\dst) |
| 35 | LONG_S \val, (\offset + 12 * LONGSIZE)(\dst) |
| 36 | LONG_S \val, (\offset + 13 * LONGSIZE)(\dst) |
| 37 | LONG_S \val, (\offset + 14 * LONGSIZE)(\dst) |
| 38 | LONG_S \val, (\offset + 15 * LONGSIZE)(\dst) |
| 39 | #endif |
| 40 | .endm |
| 41 | |
Paul Burton | ac22fec | 2015-01-29 01:28:00 +0000 | [diff] [blame] | 42 | .macro cache_loop curr, end, line_sz, op |
| 43 | 10: cache \op, 0(\curr) |
| 44 | PTR_ADDU \curr, \curr, \line_sz |
| 45 | bne \curr, \end, 10b |
| 46 | .endm |
| 47 | |
Paul Burton | 536cb7c | 2015-01-29 01:27:59 +0000 | [diff] [blame] | 48 | .macro l1_info sz, line_sz, off |
| 49 | .set push |
| 50 | .set noat |
| 51 | |
| 52 | mfc0 $1, CP0_CONFIG, 1 |
| 53 | |
| 54 | /* detect line size */ |
Daniel Schwierzeck | a3ab2ae | 2016-01-12 21:48:26 +0100 | [diff] [blame] | 55 | srl \line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF |
| 56 | andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF) |
Paul Burton | 536cb7c | 2015-01-29 01:27:59 +0000 | [diff] [blame] | 57 | move \sz, zero |
| 58 | beqz \line_sz, 10f |
| 59 | li \sz, 2 |
| 60 | sllv \line_sz, \sz, \line_sz |
| 61 | |
| 62 | /* detect associativity */ |
Daniel Schwierzeck | a3ab2ae | 2016-01-12 21:48:26 +0100 | [diff] [blame] | 63 | srl \sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF |
| 64 | andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF) |
Paul Burton | 9f8ac82 | 2016-05-16 10:52:10 +0100 | [diff] [blame] | 65 | addiu \sz, \sz, 1 |
Paul Burton | 536cb7c | 2015-01-29 01:27:59 +0000 | [diff] [blame] | 66 | |
| 67 | /* sz *= line_sz */ |
| 68 | mul \sz, \sz, \line_sz |
| 69 | |
| 70 | /* detect log32(sets) */ |
Daniel Schwierzeck | a3ab2ae | 2016-01-12 21:48:26 +0100 | [diff] [blame] | 71 | srl $1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF |
| 72 | andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF) |
Paul Burton | 536cb7c | 2015-01-29 01:27:59 +0000 | [diff] [blame] | 73 | addiu $1, $1, 1 |
| 74 | andi $1, $1, 0x7 |
| 75 | |
| 76 | /* sz <<= log32(sets) */ |
| 77 | sllv \sz, \sz, $1 |
| 78 | |
| 79 | /* sz *= 32 */ |
| 80 | li $1, 32 |
| 81 | mul \sz, \sz, $1 |
| 82 | 10: |
| 83 | .set pop |
| 84 | .endm |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 85 | |
Shinya Kuribayashi | 7aa1f19 | 2011-05-07 00:18:13 +0900 | [diff] [blame] | 86 | /* |
| 87 | * mips_cache_reset - low level initialisation of the primary caches |
| 88 | * |
| 89 | * This routine initialises the primary caches to ensure that they have good |
| 90 | * parity. It must be called by the ROM before any cached locations are used |
| 91 | * to prevent the possibility of data with bad parity being written to memory. |
| 92 | * |
| 93 | * To initialise the instruction cache it is essential that a source of data |
| 94 | * with good parity is available. This routine will initialise an area of |
| 95 | * memory starting at location zero to be used as a source of parity. |
| 96 | * |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 97 | * Note that this function does not follow the standard calling convention & |
| 98 | * may clobber typically callee-saved registers. |
| 99 | * |
Shinya Kuribayashi | 7aa1f19 | 2011-05-07 00:18:13 +0900 | [diff] [blame] | 100 | * RETURNS: N/A |
| 101 | * |
| 102 | */ |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 103 | #define R_RETURN s0 |
| 104 | #define R_IC_SIZE s1 |
| 105 | #define R_IC_LINE s2 |
| 106 | #define R_DC_SIZE s3 |
| 107 | #define R_DC_LINE s4 |
| 108 | #define R_L2_SIZE s5 |
| 109 | #define R_L2_LINE s6 |
| 110 | #define R_L2_BYPASSED s7 |
| 111 | #define R_L2_L2C t8 |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 112 | LEAF(mips_cache_reset) |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 113 | move R_RETURN, ra |
| 114 | |
| 115 | #ifdef CONFIG_MIPS_L2_CACHE |
| 116 | /* |
| 117 | * For there to be an L2 present, Config2 must be present. If it isn't |
| 118 | * then we proceed knowing there's no L2 cache. |
| 119 | */ |
| 120 | move R_L2_SIZE, zero |
| 121 | move R_L2_LINE, zero |
| 122 | move R_L2_BYPASSED, zero |
| 123 | move R_L2_L2C, zero |
| 124 | mfc0 t0, CP0_CONFIG, 1 |
| 125 | bgez t0, l2_probe_done |
| 126 | |
| 127 | /* |
| 128 | * From MIPSr6 onwards the L2 cache configuration might not be reported |
| 129 | * by Config2. The Config5.L2C bit indicates whether this is the case, |
| 130 | * and if it is then we need knowledge of where else to look. For cores |
| 131 | * from Imagination Technologies this is a CM GCR. |
| 132 | */ |
| 133 | # if __mips_isa_rev >= 6 |
| 134 | /* Check that Config5 exists */ |
| 135 | mfc0 t0, CP0_CONFIG, 2 |
| 136 | bgez t0, l2_probe_cop0 |
| 137 | mfc0 t0, CP0_CONFIG, 3 |
| 138 | bgez t0, l2_probe_cop0 |
| 139 | mfc0 t0, CP0_CONFIG, 4 |
| 140 | bgez t0, l2_probe_cop0 |
| 141 | |
| 142 | /* Check Config5.L2C is set */ |
| 143 | mfc0 t0, CP0_CONFIG, 5 |
| 144 | and R_L2_L2C, t0, MIPS_CONF5_L2C |
| 145 | beqz R_L2_L2C, l2_probe_cop0 |
| 146 | |
| 147 | /* Config5.L2C is set */ |
| 148 | # ifdef CONFIG_MIPS_CM |
| 149 | /* The CM will provide L2 configuration */ |
| 150 | PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) |
| 151 | lw t1, GCR_L2_CONFIG(t0) |
| 152 | bgez t1, l2_probe_done |
| 153 | |
| 154 | ext R_L2_LINE, t1, \ |
| 155 | GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS |
| 156 | beqz R_L2_LINE, l2_probe_done |
| 157 | li t2, 2 |
| 158 | sllv R_L2_LINE, t2, R_L2_LINE |
| 159 | |
| 160 | ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS |
| 161 | addiu t2, t2, 1 |
| 162 | mul R_L2_SIZE, R_L2_LINE, t2 |
| 163 | |
| 164 | ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS |
| 165 | sllv R_L2_SIZE, R_L2_SIZE, t2 |
| 166 | li t2, 64 |
| 167 | mul R_L2_SIZE, R_L2_SIZE, t2 |
| 168 | |
| 169 | /* Bypass the L2 cache so that we can init the L1s early */ |
| 170 | or t1, t1, GCR_L2_CONFIG_BYPASS |
| 171 | sw t1, GCR_L2_CONFIG(t0) |
| 172 | sync |
| 173 | li R_L2_BYPASSED, 1 |
| 174 | |
| 175 | /* Zero the L2 tag registers */ |
| 176 | sw zero, GCR_L2_TAG_ADDR(t0) |
| 177 | sw zero, GCR_L2_TAG_ADDR_UPPER(t0) |
| 178 | sw zero, GCR_L2_TAG_STATE(t0) |
| 179 | sw zero, GCR_L2_TAG_STATE_UPPER(t0) |
| 180 | sw zero, GCR_L2_DATA(t0) |
| 181 | sw zero, GCR_L2_DATA_UPPER(t0) |
| 182 | sync |
| 183 | # else |
| 184 | /* We don't know how to retrieve L2 configuration on this system */ |
| 185 | # endif |
| 186 | b l2_probe_done |
| 187 | # endif |
| 188 | |
| 189 | /* |
| 190 | * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2 |
| 191 | * cache configuration from the cop0 Config2 register. |
| 192 | */ |
| 193 | l2_probe_cop0: |
| 194 | mfc0 t0, CP0_CONFIG, 2 |
| 195 | |
| 196 | srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF |
| 197 | andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF |
| 198 | beqz R_L2_LINE, l2_probe_done |
| 199 | li t1, 2 |
| 200 | sllv R_L2_LINE, t1, R_L2_LINE |
| 201 | |
| 202 | srl t1, t0, MIPS_CONF2_SA_SHF |
| 203 | andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF |
| 204 | addiu t1, t1, 1 |
| 205 | mul R_L2_SIZE, R_L2_LINE, t1 |
| 206 | |
| 207 | srl t1, t0, MIPS_CONF2_SS_SHF |
| 208 | andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF |
| 209 | sllv R_L2_SIZE, R_L2_SIZE, t1 |
| 210 | li t1, 64 |
| 211 | mul R_L2_SIZE, R_L2_SIZE, t1 |
| 212 | |
| 213 | /* Attempt to bypass the L2 so that we can init the L1s early */ |
| 214 | or t0, t0, MIPS_CONF2_L2B |
| 215 | mtc0 t0, CP0_CONFIG, 2 |
| 216 | ehb |
| 217 | mfc0 t0, CP0_CONFIG, 2 |
| 218 | and R_L2_BYPASSED, t0, MIPS_CONF2_L2B |
| 219 | |
| 220 | /* Zero the L2 tag registers */ |
| 221 | mtc0 zero, CP0_TAGLO, 4 |
| 222 | ehb |
| 223 | l2_probe_done: |
| 224 | #endif |
| 225 | |
Paul Burton | ace3be4 | 2016-05-27 14:28:04 +0100 | [diff] [blame] | 226 | #ifndef CONFIG_SYS_CACHE_SIZE_AUTO |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 227 | li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE |
| 228 | li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 229 | #else |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 230 | l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 231 | #endif |
| 232 | |
Paul Burton | ace3be4 | 2016-05-27 14:28:04 +0100 | [diff] [blame] | 233 | #ifndef CONFIG_SYS_CACHE_SIZE_AUTO |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 234 | li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE |
| 235 | li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 236 | #else |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 237 | l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 238 | #endif |
| 239 | |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 240 | #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD |
| 241 | |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 242 | /* Determine the largest L1 cache size */ |
Paul Burton | ace3be4 | 2016-05-27 14:28:04 +0100 | [diff] [blame] | 243 | #ifndef CONFIG_SYS_CACHE_SIZE_AUTO |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 244 | #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE |
| 245 | li v0, CONFIG_SYS_ICACHE_SIZE |
| 246 | #else |
| 247 | li v0, CONFIG_SYS_DCACHE_SIZE |
| 248 | #endif |
| 249 | #else |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 250 | move v0, R_IC_SIZE |
| 251 | sltu t1, R_IC_SIZE, R_DC_SIZE |
| 252 | movn v0, R_DC_SIZE, t1 |
Paul Burton | fa476f7 | 2013-11-08 11:18:42 +0000 | [diff] [blame] | 253 | #endif |
Shinya Kuribayashi | 1898840 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 254 | /* |
| 255 | * Now clear that much memory starting from zero. |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 256 | */ |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 257 | PTR_LI a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Shinya Kuribayashi | 1898840 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 258 | PTR_ADDU a1, a0, v0 |
| 259 | 2: PTR_ADDIU a0, 64 |
| 260 | f_fill64 a0, -64, zero |
| 261 | bne a0, a1, 2b |
wdenk | 8bde7f7 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 262 | |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 263 | #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */ |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 264 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 265 | #ifdef CONFIG_MIPS_L2_CACHE |
| 266 | /* |
| 267 | * If the L2 is bypassed, init the L1 first so that we can execute the |
| 268 | * rest of the cache initialisation using the L1 instruction cache. |
| 269 | */ |
| 270 | bnez R_L2_BYPASSED, l1_init |
| 271 | |
| 272 | l2_init: |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 273 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 274 | PTR_ADDU t1, t0, R_L2_SIZE |
| 275 | 1: cache INDEX_STORE_TAG_SD, 0(t0) |
| 276 | PTR_ADDU t0, t0, R_L2_LINE |
| 277 | bne t0, t1, 1b |
| 278 | |
| 279 | /* |
| 280 | * If the L2 was bypassed then we already initialised the L1s before |
| 281 | * the L2, so we are now done. |
| 282 | */ |
| 283 | bnez R_L2_BYPASSED, l2_unbypass |
| 284 | #endif |
| 285 | |
Shinya Kuribayashi | 2e0e527 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 286 | /* |
Paul Burton | 8755d50 | 2015-01-29 01:28:03 +0000 | [diff] [blame] | 287 | * The TagLo registers used depend upon the CPU implementation, but the |
| 288 | * architecture requires that it is safe for software to write to both |
| 289 | * TagLo selects 0 & 2 covering supported cases. |
| 290 | */ |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 291 | l1_init: |
Paul Burton | 8755d50 | 2015-01-29 01:28:03 +0000 | [diff] [blame] | 292 | mtc0 zero, CP0_TAGLO |
| 293 | mtc0 zero, CP0_TAGLO, 2 |
Paul Burton | d608254 | 2016-09-21 11:18:58 +0100 | [diff] [blame] | 294 | ehb |
Paul Burton | 8755d50 | 2015-01-29 01:28:03 +0000 | [diff] [blame] | 295 | |
| 296 | /* |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 297 | * The caches are probably in an indeterminate state, so we force good |
| 298 | * parity into them by doing an invalidate for each line. If |
| 299 | * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to |
| 300 | * perform a load/fill & a further invalidate for each line, assuming |
| 301 | * that the bottom of RAM (having just been cleared) will generate good |
| 302 | * parity for the cache. |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 303 | */ |
| 304 | |
Shinya Kuribayashi | 2e0e527 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 305 | /* |
| 306 | * Initialize the I-cache first, |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 307 | */ |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 308 | blez R_IC_SIZE, 1f |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 309 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 310 | PTR_ADDU t1, t0, R_IC_SIZE |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 311 | /* clear tag to invalidate */ |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 312 | cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 313 | #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 314 | /* fill once, so data field parity is correct */ |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 315 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 316 | cache_loop t0, t1, R_IC_LINE, FILL |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 317 | /* invalidate again - prudent but not strictly neccessary */ |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 318 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 319 | cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 320 | #endif |
Paul Burton | 33b5c9b | 2016-09-21 11:18:49 +0100 | [diff] [blame] | 321 | sync |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 322 | |
| 323 | /* |
| 324 | * Enable use of the I-cache by setting Config.K0. The code for this |
| 325 | * must be executed from KSEG1. Jump from KSEG0 to KSEG1 to do this. |
| 326 | * Jump back to KSEG0 after caches are enabled and insert an |
| 327 | * instruction hazard barrier. |
| 328 | */ |
| 329 | PTR_LA t0, change_k0_cca |
| 330 | li t1, CPHYSADDR(~0) |
| 331 | and t0, t0, t1 |
| 332 | PTR_LI t1, CKSEG1 |
Paul Burton | 33b5c9b | 2016-09-21 11:18:49 +0100 | [diff] [blame] | 333 | or t0, t0, t1 |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 334 | li a0, CONFIG_SYS_MIPS_CACHE_MODE |
| 335 | jalr.hb t0 |
Paul Burton | 33b5c9b | 2016-09-21 11:18:49 +0100 | [diff] [blame] | 336 | |
Shinya Kuribayashi | 2e0e527 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 337 | /* |
| 338 | * then initialize D-cache. |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 339 | */ |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 340 | 1: blez R_DC_SIZE, 3f |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 341 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 342 | PTR_ADDU t1, t0, R_DC_SIZE |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 343 | /* clear all tags */ |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 344 | cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 345 | #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 346 | /* load from each line (in cached space) */ |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 347 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 348 | 2: LONG_L zero, 0(t0) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 349 | PTR_ADDU t0, R_DC_LINE |
Paul Burton | ca4e833 | 2015-01-29 01:28:01 +0000 | [diff] [blame] | 350 | bne t0, t1, 2b |
| 351 | /* clear all tags */ |
Daniel Schwierzeck | 5ef337a | 2018-09-07 19:02:05 +0200 | [diff] [blame^] | 352 | PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE) |
Paul Burton | 5c72e5a | 2016-09-21 11:18:52 +0100 | [diff] [blame] | 353 | cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D |
Paul Burton | dd7c720 | 2015-01-29 01:28:02 +0000 | [diff] [blame] | 354 | #endif |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 355 | 3: |
wdenk | c021880 | 2003-03-27 12:09:35 +0000 | [diff] [blame] | 356 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 357 | #ifdef CONFIG_MIPS_L2_CACHE |
| 358 | /* If the L2 isn't bypassed then we're done */ |
| 359 | beqz R_L2_BYPASSED, return |
| 360 | |
| 361 | /* The L2 is bypassed - go initialise it */ |
| 362 | b l2_init |
| 363 | |
| 364 | l2_unbypass: |
| 365 | # if __mips_isa_rev >= 6 |
| 366 | beqz R_L2_L2C, 1f |
| 367 | |
| 368 | li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) |
| 369 | lw t1, GCR_L2_CONFIG(t0) |
| 370 | xor t1, t1, GCR_L2_CONFIG_BYPASS |
| 371 | sw t1, GCR_L2_CONFIG(t0) |
| 372 | sync |
| 373 | ehb |
| 374 | b 2f |
| 375 | # endif |
| 376 | 1: mfc0 t0, CP0_CONFIG, 2 |
| 377 | xor t0, t0, MIPS_CONF2_L2B |
| 378 | mtc0 t0, CP0_CONFIG, 2 |
| 379 | ehb |
| 380 | |
| 381 | 2: |
Paul Burton | 7953354 | 2016-09-21 11:18:55 +0100 | [diff] [blame] | 382 | # ifdef CONFIG_MIPS_CM |
| 383 | /* Config3 must exist for a CM to be present */ |
| 384 | mfc0 t0, CP0_CONFIG, 1 |
| 385 | bgez t0, 2f |
| 386 | mfc0 t0, CP0_CONFIG, 2 |
| 387 | bgez t0, 2f |
| 388 | |
| 389 | /* Check Config3.CMGCR to determine CM presence */ |
| 390 | mfc0 t0, CP0_CONFIG, 3 |
| 391 | and t0, t0, MIPS_CONF3_CMGCR |
| 392 | beqz t0, 2f |
| 393 | |
| 394 | /* Change Config.K0 to a coherent CCA */ |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 395 | PTR_LA t0, change_k0_cca |
| 396 | li a0, CONF_CM_CACHABLE_COW |
| 397 | jalr t0 |
Paul Burton | 7953354 | 2016-09-21 11:18:55 +0100 | [diff] [blame] | 398 | |
| 399 | /* |
| 400 | * Join the coherent domain such that the caches of this core are kept |
| 401 | * coherent with those of other cores. |
| 402 | */ |
| 403 | PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) |
| 404 | lw t1, GCR_REV(t0) |
| 405 | li t2, GCR_REV_CM3 |
| 406 | li t3, GCR_Cx_COHERENCE_EN |
| 407 | bge t1, t2, 1f |
| 408 | li t3, GCR_Cx_COHERENCE_DOM_EN |
| 409 | 1: sw t3, GCR_Cx_COHERENCE(t0) |
| 410 | ehb |
| 411 | 2: |
| 412 | # endif |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 413 | #endif |
| 414 | |
| 415 | return: |
Paul Burton | 639200f | 2016-09-21 11:18:59 +0100 | [diff] [blame] | 416 | /* Ensure all cache operations complete before returning */ |
| 417 | sync |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 418 | jr R_RETURN |
Shinya Kuribayashi | 2f5d414 | 2008-03-25 21:30:06 +0900 | [diff] [blame] | 419 | END(mips_cache_reset) |
Daniel Schwierzeck | b838586 | 2018-09-07 19:02:04 +0200 | [diff] [blame] | 420 | |
| 421 | LEAF(change_k0_cca) |
| 422 | mfc0 t0, CP0_CONFIG |
| 423 | #if __mips_isa_rev >= 2 |
| 424 | ins t0, a0, 0, 3 |
| 425 | #else |
| 426 | xor a0, a0, t0 |
| 427 | andi a0, a0, CONF_CM_CMASK |
| 428 | xor a0, a0, t0 |
| 429 | #endif |
| 430 | mtc0 a0, CP0_CONFIG |
| 431 | |
| 432 | jr.hb ra |
| 433 | END(change_k0_cca) |