blob: d23b38d6b93fa1804b90e75811a0ab025d03d677 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Paul Burton30374f92015-01-29 01:27:57 +00002/*
3 * (C) Copyright 2003
4 * Wolfgang Denk, DENX Software Engineering, <wd@denx.de>
Paul Burton30374f92015-01-29 01:27:57 +00005 */
6
7#include <common.h>
Simon Glass9edefc22019-11-14 12:57:37 -07008#include <cpu_func.h>
Weijie Gao2948d9cf2022-05-20 11:21:51 +08009#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060010#include <asm/cache.h>
Paul Burton30374f92015-01-29 01:27:57 +000011#include <asm/cacheops.h>
Paul Burton4baa0ab2016-09-21 11:18:54 +010012#include <asm/cm.h>
Simon Glass401d1c42020-10-30 21:38:53 -060013#include <asm/global_data.h>
Paul Burton219c2db2017-11-21 11:18:37 -080014#include <asm/io.h>
Paul Burton30374f92015-01-29 01:27:57 +000015#include <asm/mipsregs.h>
Paul Burtond8b32692017-11-21 11:18:38 -080016#include <asm/system.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060017#include <linux/bug.h>
Paul Burton30374f92015-01-29 01:27:57 +000018
Paul Burton8cb48172016-09-21 11:18:48 +010019DECLARE_GLOBAL_DATA_PTR;
Paul Burton37228622016-05-27 14:28:05 +010020
Paul Burton4baa0ab2016-09-21 11:18:54 +010021static void probe_l2(void)
22{
23#ifdef CONFIG_MIPS_L2_CACHE
24 unsigned long conf2, sl;
25 bool l2c = false;
26
27 if (!(read_c0_config1() & MIPS_CONF_M))
28 return;
29
30 conf2 = read_c0_config2();
31
32 if (__mips_isa_rev >= 6) {
33 l2c = conf2 & MIPS_CONF_M;
34 if (l2c)
35 l2c = read_c0_config3() & MIPS_CONF_M;
36 if (l2c)
37 l2c = read_c0_config4() & MIPS_CONF_M;
38 if (l2c)
39 l2c = read_c0_config5() & MIPS_CONF5_L2C;
40 }
41
Simon Glassd67f9e32022-01-22 05:07:23 -070042 if (l2c && IS_ENABLED(CONFIG_MIPS_CM)) {
Paul Burton4baa0ab2016-09-21 11:18:54 +010043 gd->arch.l2_line_size = mips_cm_l2_line_size();
44 } else if (l2c) {
45 /* We don't know how to retrieve L2 config on this system */
46 BUG();
47 } else {
48 sl = (conf2 & MIPS_CONF2_SL) >> MIPS_CONF2_SL_SHF;
49 gd->arch.l2_line_size = sl ? (2 << sl) : 0;
50 }
51#endif
52}
53
Paul Burton8cb48172016-09-21 11:18:48 +010054void mips_cache_probe(void)
55{
56#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
57 unsigned long conf1, il, dl;
Paul Burton37228622016-05-27 14:28:05 +010058
Paul Burton30374f92015-01-29 01:27:57 +000059 conf1 = read_c0_config1();
Paul Burton8cb48172016-09-21 11:18:48 +010060
Daniel Schwierzecka3ab2ae2016-01-12 21:48:26 +010061 il = (conf1 & MIPS_CONF1_IL) >> MIPS_CONF1_IL_SHF;
Paul Burton8cb48172016-09-21 11:18:48 +010062 dl = (conf1 & MIPS_CONF1_DL) >> MIPS_CONF1_DL_SHF;
63
64 gd->arch.l1i_line_size = il ? (2 << il) : 0;
65 gd->arch.l1d_line_size = dl ? (2 << dl) : 0;
66#endif
Paul Burton4baa0ab2016-09-21 11:18:54 +010067 probe_l2();
Paul Burton8cb48172016-09-21 11:18:48 +010068}
69
70static inline unsigned long icache_line_size(void)
71{
72#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
73 return gd->arch.l1i_line_size;
74#else
75 return CONFIG_SYS_ICACHE_LINE_SIZE;
76#endif
Paul Burton30374f92015-01-29 01:27:57 +000077}
78
79static inline unsigned long dcache_line_size(void)
80{
Paul Burton8cb48172016-09-21 11:18:48 +010081#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
82 return gd->arch.l1d_line_size;
83#else
84 return CONFIG_SYS_DCACHE_LINE_SIZE;
85#endif
Paul Burton30374f92015-01-29 01:27:57 +000086}
87
Paul Burton4baa0ab2016-09-21 11:18:54 +010088static inline unsigned long scache_line_size(void)
89{
90#ifdef CONFIG_MIPS_L2_CACHE
91 return gd->arch.l2_line_size;
92#else
Ramon Fried22247c62019-06-10 21:05:26 +030093 return CONFIG_SYS_SCACHE_LINE_SIZE;
Paul Burton4baa0ab2016-09-21 11:18:54 +010094#endif
95}
96
Paul Burtonfb64cda2016-05-27 14:28:06 +010097#define cache_loop(start, end, lsize, ops...) do { \
98 const void *addr = (const void *)(start & ~(lsize - 1)); \
99 const void *aend = (const void *)((end - 1) & ~(lsize - 1)); \
100 const unsigned int cache_ops[] = { ops }; \
101 unsigned int i; \
102 \
Paul Burtoncc4f3642017-11-21 11:18:39 -0800103 if (!lsize) \
104 break; \
105 \
Paul Burtonfb64cda2016-05-27 14:28:06 +0100106 for (; addr <= aend; addr += lsize) { \
107 for (i = 0; i < ARRAY_SIZE(cache_ops); i++) \
108 mips_cache(cache_ops[i], addr); \
109 } \
110} while (0)
111
Stefan Roese1d4ba152020-05-14 11:59:04 +0200112void __weak flush_cache(ulong start_addr, ulong size)
Paul Burton30374f92015-01-29 01:27:57 +0000113{
114 unsigned long ilsize = icache_line_size();
115 unsigned long dlsize = dcache_line_size();
Paul Burton4baa0ab2016-09-21 11:18:54 +0100116 unsigned long slsize = scache_line_size();
Paul Burton30374f92015-01-29 01:27:57 +0000117
118 /* aend will be miscalculated when size is zero, so we return here */
119 if (size == 0)
120 return;
121
Paul Burton4baa0ab2016-09-21 11:18:54 +0100122 if ((ilsize == dlsize) && !slsize) {
Paul Burton30374f92015-01-29 01:27:57 +0000123 /* flush I-cache & D-cache simultaneously */
Paul Burtonfb64cda2016-05-27 14:28:06 +0100124 cache_loop(start_addr, start_addr + size, ilsize,
125 HIT_WRITEBACK_INV_D, HIT_INVALIDATE_I);
Paul Burton219c2db2017-11-21 11:18:37 -0800126 goto ops_done;
Paul Burton30374f92015-01-29 01:27:57 +0000127 }
128
129 /* flush D-cache */
Paul Burtonfb64cda2016-05-27 14:28:06 +0100130 cache_loop(start_addr, start_addr + size, dlsize, HIT_WRITEBACK_INV_D);
Paul Burton30374f92015-01-29 01:27:57 +0000131
Paul Burton4baa0ab2016-09-21 11:18:54 +0100132 /* flush L2 cache */
Paul Burtoncc4f3642017-11-21 11:18:39 -0800133 cache_loop(start_addr, start_addr + size, slsize, HIT_WRITEBACK_INV_SD);
Paul Burton4baa0ab2016-09-21 11:18:54 +0100134
Paul Burton30374f92015-01-29 01:27:57 +0000135 /* flush I-cache */
Paul Burtonfb64cda2016-05-27 14:28:06 +0100136 cache_loop(start_addr, start_addr + size, ilsize, HIT_INVALIDATE_I);
Paul Burton219c2db2017-11-21 11:18:37 -0800137
138ops_done:
139 /* ensure cache ops complete before any further memory accesses */
140 sync();
Paul Burtond8b32692017-11-21 11:18:38 -0800141
142 /* ensure the pipeline doesn't contain now-invalid instructions */
143 instruction_hazard_barrier();
Paul Burton30374f92015-01-29 01:27:57 +0000144}
145
Alex Nemirovskyebdc2782019-12-23 20:19:20 +0000146void __weak flush_dcache_range(ulong start_addr, ulong stop)
Paul Burton30374f92015-01-29 01:27:57 +0000147{
148 unsigned long lsize = dcache_line_size();
Paul Burton4baa0ab2016-09-21 11:18:54 +0100149 unsigned long slsize = scache_line_size();
Paul Burton30374f92015-01-29 01:27:57 +0000150
Marek Vasutfbb0de02016-01-27 03:13:59 +0100151 /* aend will be miscalculated when size is zero, so we return here */
152 if (start_addr == stop)
153 return;
154
Paul Burtonfb64cda2016-05-27 14:28:06 +0100155 cache_loop(start_addr, stop, lsize, HIT_WRITEBACK_INV_D);
Paul Burton4baa0ab2016-09-21 11:18:54 +0100156
157 /* flush L2 cache */
Paul Burtoncc4f3642017-11-21 11:18:39 -0800158 cache_loop(start_addr, stop, slsize, HIT_WRITEBACK_INV_SD);
Paul Burton219c2db2017-11-21 11:18:37 -0800159
160 /* ensure cache ops complete before any further memory accesses */
161 sync();
Paul Burton30374f92015-01-29 01:27:57 +0000162}
163
Stefan Roese60a05592020-06-30 12:33:19 +0200164void __weak invalidate_dcache_range(ulong start_addr, ulong stop)
Paul Burton30374f92015-01-29 01:27:57 +0000165{
166 unsigned long lsize = dcache_line_size();
Paul Burton4baa0ab2016-09-21 11:18:54 +0100167 unsigned long slsize = scache_line_size();
Paul Burton30374f92015-01-29 01:27:57 +0000168
Marek Vasutfbb0de02016-01-27 03:13:59 +0100169 /* aend will be miscalculated when size is zero, so we return here */
170 if (start_addr == stop)
171 return;
172
Paul Burton4baa0ab2016-09-21 11:18:54 +0100173 /* invalidate L2 cache */
Paul Burtoncc4f3642017-11-21 11:18:39 -0800174 cache_loop(start_addr, stop, slsize, HIT_INVALIDATE_SD);
Paul Burton4baa0ab2016-09-21 11:18:54 +0100175
Paul Burtona95800e2016-06-09 13:09:51 +0100176 cache_loop(start_addr, stop, lsize, HIT_INVALIDATE_D);
Paul Burton219c2db2017-11-21 11:18:37 -0800177
178 /* ensure cache ops complete before any further memory accesses */
179 sync();
Paul Burton30374f92015-01-29 01:27:57 +0000180}
Daniel Schwierzeck2f85c2b2018-09-07 19:02:03 +0200181
182int dcache_status(void)
183{
184 unsigned int cca = read_c0_config() & CONF_CM_CMASK;
185 return cca != CONF_CM_UNCACHED;
186}
187
188void dcache_enable(void)
189{
190 puts("Not supported!\n");
191}
192
193void dcache_disable(void)
194{
195 /* change CCA to uncached */
196 change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
197
198 /* ensure the pipeline doesn't contain now-invalid instructions */
199 instruction_hazard_barrier();
200}
Weijie Gao2948d9cf2022-05-20 11:21:51 +0800201
202#ifdef CONFIG_SYS_NONCACHED_MEMORY
203static unsigned long noncached_start;
204static unsigned long noncached_end;
205static unsigned long noncached_next;
206
207void noncached_set_region(void)
208{
209}
210
211int noncached_init(void)
212{
213 phys_addr_t start, end;
214 size_t size;
215
216 /* If this calculation changes, update board_f.c:reserve_noncached() */
217 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
218 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
219 start = end - size;
220
221 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
222
223 noncached_start = start;
224 noncached_end = end;
225 noncached_next = start;
226
227 return 0;
228}
229
230phys_addr_t noncached_alloc(size_t size, size_t align)
231{
232 phys_addr_t next = ALIGN(noncached_next, align);
233
234 if (next >= noncached_end || (noncached_end - next) < size)
235 return 0;
236
237 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
238 noncached_next = next + size;
239
240 return CKSEG1ADDR(next);
241}
242#endif /* CONFIG_SYS_NONCACHED_MEMORY */