Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2003 |
| 4 | * Wolfgang Denk, DENX Software Engineering, <wd@denx.de> |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <common.h> |
Simon Glass | 9edefc2 | 2019-11-14 12:57:37 -0700 | [diff] [blame] | 8 | #include <cpu_func.h> |
Weijie Gao | 2948d9cf | 2022-05-20 11:21:51 +0800 | [diff] [blame] | 9 | #include <malloc.h> |
Simon Glass | 90526e9 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 10 | #include <asm/cache.h> |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 11 | #include <asm/cacheops.h> |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 12 | #include <asm/cm.h> |
Simon Glass | 401d1c4 | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 13 | #include <asm/global_data.h> |
Paul Burton | 219c2db | 2017-11-21 11:18:37 -0800 | [diff] [blame] | 14 | #include <asm/io.h> |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 15 | #include <asm/mipsregs.h> |
Paul Burton | d8b3269 | 2017-11-21 11:18:38 -0800 | [diff] [blame] | 16 | #include <asm/system.h> |
Simon Glass | eb41d8a | 2020-05-10 11:40:08 -0600 | [diff] [blame] | 17 | #include <linux/bug.h> |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 18 | |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 19 | DECLARE_GLOBAL_DATA_PTR; |
Paul Burton | 3722862 | 2016-05-27 14:28:05 +0100 | [diff] [blame] | 20 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 21 | static void probe_l2(void) |
| 22 | { |
| 23 | #ifdef CONFIG_MIPS_L2_CACHE |
| 24 | unsigned long conf2, sl; |
| 25 | bool l2c = false; |
| 26 | |
| 27 | if (!(read_c0_config1() & MIPS_CONF_M)) |
| 28 | return; |
| 29 | |
| 30 | conf2 = read_c0_config2(); |
| 31 | |
| 32 | if (__mips_isa_rev >= 6) { |
| 33 | l2c = conf2 & MIPS_CONF_M; |
| 34 | if (l2c) |
| 35 | l2c = read_c0_config3() & MIPS_CONF_M; |
| 36 | if (l2c) |
| 37 | l2c = read_c0_config4() & MIPS_CONF_M; |
| 38 | if (l2c) |
| 39 | l2c = read_c0_config5() & MIPS_CONF5_L2C; |
| 40 | } |
| 41 | |
Simon Glass | d67f9e3 | 2022-01-22 05:07:23 -0700 | [diff] [blame] | 42 | if (l2c && IS_ENABLED(CONFIG_MIPS_CM)) { |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 43 | gd->arch.l2_line_size = mips_cm_l2_line_size(); |
| 44 | } else if (l2c) { |
| 45 | /* We don't know how to retrieve L2 config on this system */ |
| 46 | BUG(); |
| 47 | } else { |
| 48 | sl = (conf2 & MIPS_CONF2_SL) >> MIPS_CONF2_SL_SHF; |
| 49 | gd->arch.l2_line_size = sl ? (2 << sl) : 0; |
| 50 | } |
| 51 | #endif |
| 52 | } |
| 53 | |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 54 | void mips_cache_probe(void) |
| 55 | { |
| 56 | #ifdef CONFIG_SYS_CACHE_SIZE_AUTO |
| 57 | unsigned long conf1, il, dl; |
Paul Burton | 3722862 | 2016-05-27 14:28:05 +0100 | [diff] [blame] | 58 | |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 59 | conf1 = read_c0_config1(); |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 60 | |
Daniel Schwierzeck | a3ab2ae | 2016-01-12 21:48:26 +0100 | [diff] [blame] | 61 | il = (conf1 & MIPS_CONF1_IL) >> MIPS_CONF1_IL_SHF; |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 62 | dl = (conf1 & MIPS_CONF1_DL) >> MIPS_CONF1_DL_SHF; |
| 63 | |
| 64 | gd->arch.l1i_line_size = il ? (2 << il) : 0; |
| 65 | gd->arch.l1d_line_size = dl ? (2 << dl) : 0; |
| 66 | #endif |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 67 | probe_l2(); |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static inline unsigned long icache_line_size(void) |
| 71 | { |
| 72 | #ifdef CONFIG_SYS_CACHE_SIZE_AUTO |
| 73 | return gd->arch.l1i_line_size; |
| 74 | #else |
| 75 | return CONFIG_SYS_ICACHE_LINE_SIZE; |
| 76 | #endif |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | static inline unsigned long dcache_line_size(void) |
| 80 | { |
Paul Burton | 8cb4817 | 2016-09-21 11:18:48 +0100 | [diff] [blame] | 81 | #ifdef CONFIG_SYS_CACHE_SIZE_AUTO |
| 82 | return gd->arch.l1d_line_size; |
| 83 | #else |
| 84 | return CONFIG_SYS_DCACHE_LINE_SIZE; |
| 85 | #endif |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 86 | } |
| 87 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 88 | static inline unsigned long scache_line_size(void) |
| 89 | { |
| 90 | #ifdef CONFIG_MIPS_L2_CACHE |
| 91 | return gd->arch.l2_line_size; |
| 92 | #else |
Ramon Fried | 22247c6 | 2019-06-10 21:05:26 +0300 | [diff] [blame] | 93 | return CONFIG_SYS_SCACHE_LINE_SIZE; |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 94 | #endif |
| 95 | } |
| 96 | |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 97 | #define cache_loop(start, end, lsize, ops...) do { \ |
| 98 | const void *addr = (const void *)(start & ~(lsize - 1)); \ |
| 99 | const void *aend = (const void *)((end - 1) & ~(lsize - 1)); \ |
| 100 | const unsigned int cache_ops[] = { ops }; \ |
| 101 | unsigned int i; \ |
| 102 | \ |
Paul Burton | cc4f364 | 2017-11-21 11:18:39 -0800 | [diff] [blame] | 103 | if (!lsize) \ |
| 104 | break; \ |
| 105 | \ |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 106 | for (; addr <= aend; addr += lsize) { \ |
| 107 | for (i = 0; i < ARRAY_SIZE(cache_ops); i++) \ |
| 108 | mips_cache(cache_ops[i], addr); \ |
| 109 | } \ |
| 110 | } while (0) |
| 111 | |
Stefan Roese | 1d4ba15 | 2020-05-14 11:59:04 +0200 | [diff] [blame] | 112 | void __weak flush_cache(ulong start_addr, ulong size) |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 113 | { |
| 114 | unsigned long ilsize = icache_line_size(); |
| 115 | unsigned long dlsize = dcache_line_size(); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 116 | unsigned long slsize = scache_line_size(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 117 | |
| 118 | /* aend will be miscalculated when size is zero, so we return here */ |
| 119 | if (size == 0) |
| 120 | return; |
| 121 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 122 | if ((ilsize == dlsize) && !slsize) { |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 123 | /* flush I-cache & D-cache simultaneously */ |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 124 | cache_loop(start_addr, start_addr + size, ilsize, |
| 125 | HIT_WRITEBACK_INV_D, HIT_INVALIDATE_I); |
Paul Burton | 219c2db | 2017-11-21 11:18:37 -0800 | [diff] [blame] | 126 | goto ops_done; |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | /* flush D-cache */ |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 130 | cache_loop(start_addr, start_addr + size, dlsize, HIT_WRITEBACK_INV_D); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 131 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 132 | /* flush L2 cache */ |
Paul Burton | cc4f364 | 2017-11-21 11:18:39 -0800 | [diff] [blame] | 133 | cache_loop(start_addr, start_addr + size, slsize, HIT_WRITEBACK_INV_SD); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 134 | |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 135 | /* flush I-cache */ |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 136 | cache_loop(start_addr, start_addr + size, ilsize, HIT_INVALIDATE_I); |
Paul Burton | 219c2db | 2017-11-21 11:18:37 -0800 | [diff] [blame] | 137 | |
| 138 | ops_done: |
| 139 | /* ensure cache ops complete before any further memory accesses */ |
| 140 | sync(); |
Paul Burton | d8b3269 | 2017-11-21 11:18:38 -0800 | [diff] [blame] | 141 | |
| 142 | /* ensure the pipeline doesn't contain now-invalid instructions */ |
| 143 | instruction_hazard_barrier(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 144 | } |
| 145 | |
Alex Nemirovsky | ebdc278 | 2019-12-23 20:19:20 +0000 | [diff] [blame] | 146 | void __weak flush_dcache_range(ulong start_addr, ulong stop) |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 147 | { |
| 148 | unsigned long lsize = dcache_line_size(); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 149 | unsigned long slsize = scache_line_size(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 150 | |
Marek Vasut | fbb0de0 | 2016-01-27 03:13:59 +0100 | [diff] [blame] | 151 | /* aend will be miscalculated when size is zero, so we return here */ |
| 152 | if (start_addr == stop) |
| 153 | return; |
| 154 | |
Paul Burton | fb64cda | 2016-05-27 14:28:06 +0100 | [diff] [blame] | 155 | cache_loop(start_addr, stop, lsize, HIT_WRITEBACK_INV_D); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 156 | |
| 157 | /* flush L2 cache */ |
Paul Burton | cc4f364 | 2017-11-21 11:18:39 -0800 | [diff] [blame] | 158 | cache_loop(start_addr, stop, slsize, HIT_WRITEBACK_INV_SD); |
Paul Burton | 219c2db | 2017-11-21 11:18:37 -0800 | [diff] [blame] | 159 | |
| 160 | /* ensure cache ops complete before any further memory accesses */ |
| 161 | sync(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 162 | } |
| 163 | |
Stefan Roese | 60a0559 | 2020-06-30 12:33:19 +0200 | [diff] [blame] | 164 | void __weak invalidate_dcache_range(ulong start_addr, ulong stop) |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 165 | { |
| 166 | unsigned long lsize = dcache_line_size(); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 167 | unsigned long slsize = scache_line_size(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 168 | |
Marek Vasut | fbb0de0 | 2016-01-27 03:13:59 +0100 | [diff] [blame] | 169 | /* aend will be miscalculated when size is zero, so we return here */ |
| 170 | if (start_addr == stop) |
| 171 | return; |
| 172 | |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 173 | /* invalidate L2 cache */ |
Paul Burton | cc4f364 | 2017-11-21 11:18:39 -0800 | [diff] [blame] | 174 | cache_loop(start_addr, stop, slsize, HIT_INVALIDATE_SD); |
Paul Burton | 4baa0ab | 2016-09-21 11:18:54 +0100 | [diff] [blame] | 175 | |
Paul Burton | a95800e | 2016-06-09 13:09:51 +0100 | [diff] [blame] | 176 | cache_loop(start_addr, stop, lsize, HIT_INVALIDATE_D); |
Paul Burton | 219c2db | 2017-11-21 11:18:37 -0800 | [diff] [blame] | 177 | |
| 178 | /* ensure cache ops complete before any further memory accesses */ |
| 179 | sync(); |
Paul Burton | 30374f9 | 2015-01-29 01:27:57 +0000 | [diff] [blame] | 180 | } |
Daniel Schwierzeck | 2f85c2b | 2018-09-07 19:02:03 +0200 | [diff] [blame] | 181 | |
| 182 | int dcache_status(void) |
| 183 | { |
| 184 | unsigned int cca = read_c0_config() & CONF_CM_CMASK; |
| 185 | return cca != CONF_CM_UNCACHED; |
| 186 | } |
| 187 | |
| 188 | void dcache_enable(void) |
| 189 | { |
| 190 | puts("Not supported!\n"); |
| 191 | } |
| 192 | |
| 193 | void dcache_disable(void) |
| 194 | { |
| 195 | /* change CCA to uncached */ |
| 196 | change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED); |
| 197 | |
| 198 | /* ensure the pipeline doesn't contain now-invalid instructions */ |
| 199 | instruction_hazard_barrier(); |
| 200 | } |
Weijie Gao | 2948d9cf | 2022-05-20 11:21:51 +0800 | [diff] [blame] | 201 | |
| 202 | #ifdef CONFIG_SYS_NONCACHED_MEMORY |
| 203 | static unsigned long noncached_start; |
| 204 | static unsigned long noncached_end; |
| 205 | static unsigned long noncached_next; |
| 206 | |
| 207 | void noncached_set_region(void) |
| 208 | { |
| 209 | } |
| 210 | |
| 211 | int noncached_init(void) |
| 212 | { |
| 213 | phys_addr_t start, end; |
| 214 | size_t size; |
| 215 | |
| 216 | /* If this calculation changes, update board_f.c:reserve_noncached() */ |
| 217 | end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE; |
| 218 | size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE); |
| 219 | start = end - size; |
| 220 | |
| 221 | debug("mapping memory %pa-%pa non-cached\n", &start, &end); |
| 222 | |
| 223 | noncached_start = start; |
| 224 | noncached_end = end; |
| 225 | noncached_next = start; |
| 226 | |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | phys_addr_t noncached_alloc(size_t size, size_t align) |
| 231 | { |
| 232 | phys_addr_t next = ALIGN(noncached_next, align); |
| 233 | |
| 234 | if (next >= noncached_end || (noncached_end - next) < size) |
| 235 | return 0; |
| 236 | |
| 237 | debug("allocated %zu bytes of uncached memory @%pa\n", size, &next); |
| 238 | noncached_next = next + size; |
| 239 | |
| 240 | return CKSEG1ADDR(next); |
| 241 | } |
| 242 | #endif /* CONFIG_SYS_NONCACHED_MEMORY */ |