blob: abd81d21c73c3000cda98a1a4196dbd1a2918960 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +02002/*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +02005 */
6
7#include <common.h>
Simon Glass9edefc22019-11-14 12:57:37 -07008#include <cpu_func.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -06009#include <log.h>
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +020010#include <asm/system.h>
R Sricharan96fdbec2013-03-04 20:04:44 +000011#include <asm/cache.h>
12#include <linux/compiler.h>
Lokesh Vutlaa43d46a2018-04-26 18:21:31 +053013#include <asm/armv7_mpu.h>
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +020014
Trevor Woerner10015022019-05-03 09:41:00 -040015#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
Heiko Schocher880eff52010-09-17 13:10:29 +020016
Heiko Schocher880eff52010-09-17 13:10:29 +020017DECLARE_GLOBAL_DATA_PTR;
18
Lokesh Vutlaa43d46a2018-04-26 18:21:31 +053019#ifdef CONFIG_SYS_ARM_MMU
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020020__weak void arm_init_before_mmu(void)
Aneesh Vc2dd0d42011-06-16 23:30:49 +000021{
22}
Aneesh Vc2dd0d42011-06-16 23:30:49 +000023
R Sricharande63ac22013-03-04 20:04:45 +000024__weak void arm_init_domains(void)
25{
26}
27
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020028static void set_section_phys(int section, phys_addr_t phys,
29 enum dcache_option option)
Heiko Schocherf1d2b312010-09-17 13:10:39 +020030{
Alexander Grafd990f5c2016-03-16 15:41:21 +010031#ifdef CONFIG_ARMV7_LPAE
32 u64 *page_table = (u64 *)gd->arch.tlb_addr;
33 /* Need to set the access flag to not fault */
34 u64 value = TTB_SECT_AP | TTB_SECT_AF;
35#else
Simon Glass34fd5d22012-12-13 20:48:39 +000036 u32 *page_table = (u32 *)gd->arch.tlb_addr;
Alexander Grafd990f5c2016-03-16 15:41:21 +010037 u32 value = TTB_SECT_AP;
38#endif
Simon Glass0dde7f52012-10-17 13:24:53 +000039
Alexander Grafd990f5c2016-03-16 15:41:21 +010040 /* Add the page offset */
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020041 value |= phys;
Alexander Grafd990f5c2016-03-16 15:41:21 +010042
43 /* Add caching bits */
Simon Glass0dde7f52012-10-17 13:24:53 +000044 value |= option;
Alexander Grafd990f5c2016-03-16 15:41:21 +010045
46 /* Set PTE */
Simon Glass0dde7f52012-10-17 13:24:53 +000047 page_table[section] = value;
48}
49
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020050void set_section_dcache(int section, enum dcache_option option)
51{
52 set_section_phys(section, (u32)section << MMU_SECTION_SHIFT, option);
53}
54
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020055__weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
Simon Glass0dde7f52012-10-17 13:24:53 +000056{
57 debug("%s: Warning: not implemented\n", __func__);
58}
59
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020060void mmu_set_region_dcache_behaviour_phys(phys_addr_t start, phys_addr_t phys,
61 size_t size, enum dcache_option option)
Simon Glass0dde7f52012-10-17 13:24:53 +000062{
Stefan Agnerc5b3cab2016-08-14 21:33:00 -070063#ifdef CONFIG_ARMV7_LPAE
64 u64 *page_table = (u64 *)gd->arch.tlb_addr;
65#else
Simon Glass34fd5d22012-12-13 20:48:39 +000066 u32 *page_table = (u32 *)gd->arch.tlb_addr;
Stefan Agnerc5b3cab2016-08-14 21:33:00 -070067#endif
Stefan Agner8f894a42016-08-14 21:33:01 -070068 unsigned long startpt, stoppt;
Thierry Reding25026fa2014-08-26 17:34:21 +020069 unsigned long upto, end;
Simon Glass0dde7f52012-10-17 13:24:53 +000070
Patrick Delaunay54be09c2020-04-24 20:20:17 +020071 /* div by 2 before start + size to avoid phys_addr_t overflow */
72 end = ALIGN((start / 2) + (size / 2), MMU_SECTION_SIZE / 2)
73 >> (MMU_SECTION_SHIFT - 1);
Simon Glass0dde7f52012-10-17 13:24:53 +000074 start = start >> MMU_SECTION_SHIFT;
Patrick Delaunay54be09c2020-04-24 20:20:17 +020075
Keerthy06d43c82016-10-29 15:19:10 +053076#ifdef CONFIG_ARMV7_LPAE
77 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
78 option);
79#else
Keerthy2b373cb2016-10-29 15:19:09 +053080 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
Simon Glass0dde7f52012-10-17 13:24:53 +000081 option);
Keerthy06d43c82016-10-29 15:19:10 +053082#endif
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020083 for (upto = start; upto < end; upto++, phys += MMU_SECTION_SIZE)
84 set_section_phys(upto, phys, option);
Stefan Agner8f894a42016-08-14 21:33:01 -070085
86 /*
87 * Make sure range is cache line aligned
88 * Only CPU maintains page tables, hence it is safe to always
89 * flush complete cache lines...
90 */
91
92 startpt = (unsigned long)&page_table[start];
93 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
94 stoppt = (unsigned long)&page_table[end];
95 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
96 mmu_page_table_flush(startpt, stoppt);
Simon Glass0dde7f52012-10-17 13:24:53 +000097}
98
Marek Szyprowskid877f8f2020-06-03 14:43:42 +020099void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
100 enum dcache_option option)
101{
102 mmu_set_region_dcache_behaviour_phys(start, start, size, option);
103}
104
R Sricharan96fdbec2013-03-04 20:04:44 +0000105__weak void dram_bank_mmu_setup(int bank)
Simon Glass0dde7f52012-10-17 13:24:53 +0000106{
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +0900107 struct bd_info *bd = gd->bd;
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200108 int i;
109
Patrick Delaunayc8ec1e32020-04-24 20:20:15 +0200110 /* bd->bi_dram is available only after relocation */
111 if ((gd->flags & GD_FLG_RELOC) == 0)
112 return;
113
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200114 debug("%s: bank: %d\n", __func__, bank);
Alexander Grafd990f5c2016-03-16 15:41:21 +0100115 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
116 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
117 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
Patrick Delaunay2e8d68e2020-04-24 20:20:16 +0200118 i++)
119 set_section_dcache(i, DCACHE_DEFAULT_OPTION);
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200120}
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200121
122/* to activate the MMU we need to set up virtual memory: use 1M areas */
Heiko Schocher880eff52010-09-17 13:10:29 +0200123static inline void mmu_setup(void)
124{
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200125 int i;
Heiko Schocher880eff52010-09-17 13:10:29 +0200126 u32 reg;
127
Aneesh Vc2dd0d42011-06-16 23:30:49 +0000128 arm_init_before_mmu();
Heiko Schocher880eff52010-09-17 13:10:29 +0200129 /* Set up an identity-mapping for all 4GB, rw for everyone */
Alexander Grafd990f5c2016-03-16 15:41:21 +0100130 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
Simon Glass0dde7f52012-10-17 13:24:53 +0000131 set_section_dcache(i, DCACHE_OFF);
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200132
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200133 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
134 dram_bank_mmu_setup(i);
135 }
Heiko Schocher880eff52010-09-17 13:10:29 +0200136
Simon Glass10d602a2017-05-31 17:57:13 -0600137#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
Alexander Grafd990f5c2016-03-16 15:41:21 +0100138 /* Set up 4 PTE entries pointing to our 4 1GB page tables */
139 for (i = 0; i < 4; i++) {
140 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
141 u64 tpt = gd->arch.tlb_addr + (4096 * i);
142 page_table[i] = tpt | TTB_PAGETABLE;
143 }
144
145 reg = TTBCR_EAE;
146#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
147 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
148#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
149 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
150#else
151 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
152#endif
153
154 if (is_hyp()) {
Simon Glass579dfca2017-05-31 17:57:12 -0600155 /* Set HTCR to enable LPAE */
Alexander Grafd990f5c2016-03-16 15:41:21 +0100156 asm volatile("mcr p15, 4, %0, c2, c0, 2"
157 : : "r" (reg) : "memory");
158 /* Set HTTBR0 */
159 asm volatile("mcrr p15, 4, %0, %1, c2"
160 :
161 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
162 : "memory");
163 /* Set HMAIR */
164 asm volatile("mcr p15, 4, %0, c10, c2, 0"
165 : : "r" (MEMORY_ATTRIBUTES) : "memory");
166 } else {
167 /* Set TTBCR to enable LPAE */
168 asm volatile("mcr p15, 0, %0, c2, c0, 2"
169 : : "r" (reg) : "memory");
170 /* Set 64-bit TTBR0 */
171 asm volatile("mcrr p15, 0, %0, %1, c2"
172 :
173 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
174 : "memory");
175 /* Set MAIR */
176 asm volatile("mcr p15, 0, %0, c10, c2, 0"
177 : : "r" (MEMORY_ATTRIBUTES) : "memory");
178 }
Lokesh Vutlaacf15002018-04-26 18:21:26 +0530179#elif defined(CONFIG_CPU_V7A)
Simon Glass50a48862017-05-31 17:57:14 -0600180 if (is_hyp()) {
181 /* Set HTCR to disable LPAE */
182 asm volatile("mcr p15, 4, %0, c2, c0, 2"
183 : : "r" (0) : "memory");
184 } else {
185 /* Set TTBCR to disable LPAE */
186 asm volatile("mcr p15, 0, %0, c2, c0, 2"
187 : : "r" (0) : "memory");
188 }
Bryan Brinsko97840b52015-03-24 11:25:12 -0500189 /* Set TTBR0 */
190 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
191#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
192 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
193#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
194 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
195#else
196 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
197#endif
198 asm volatile("mcr p15, 0, %0, c2, c0, 0"
199 : : "r" (reg) : "memory");
200#else
Heiko Schocher880eff52010-09-17 13:10:29 +0200201 /* Copy the page table address to cp15 */
202 asm volatile("mcr p15, 0, %0, c2, c0, 0"
Simon Glass34fd5d22012-12-13 20:48:39 +0000203 : : "r" (gd->arch.tlb_addr) : "memory");
Bryan Brinsko97840b52015-03-24 11:25:12 -0500204#endif
Heiko Schocher880eff52010-09-17 13:10:29 +0200205 /* Set the access control to all-supervisor */
206 asm volatile("mcr p15, 0, %0, c3, c0, 0"
207 : : "r" (~0));
R Sricharande63ac22013-03-04 20:04:45 +0000208
209 arm_init_domains();
210
Heiko Schocher880eff52010-09-17 13:10:29 +0200211 /* and enable the mmu */
212 reg = get_cr(); /* get control reg. */
Heiko Schocher880eff52010-09-17 13:10:29 +0200213 set_cr(reg | CR_M);
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200214}
215
Aneesh Ve05f0072011-06-16 23:30:50 +0000216static int mmu_enabled(void)
217{
218 return get_cr() & CR_M;
219}
Lokesh Vutlaa43d46a2018-04-26 18:21:31 +0530220#endif /* CONFIG_SYS_ARM_MMU */
Aneesh Ve05f0072011-06-16 23:30:50 +0000221
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200222/* cache_bit must be either CR_I or CR_C */
223static void cache_enable(uint32_t cache_bit)
224{
225 uint32_t reg;
226
Lokesh Vutlaa43d46a2018-04-26 18:21:31 +0530227 /* The data cache is not active unless the mmu/mpu is enabled too */
228#ifdef CONFIG_SYS_ARM_MMU
Aneesh Ve05f0072011-06-16 23:30:50 +0000229 if ((cache_bit == CR_C) && !mmu_enabled())
Heiko Schocher880eff52010-09-17 13:10:29 +0200230 mmu_setup();
Lokesh Vutlaa43d46a2018-04-26 18:21:31 +0530231#elif defined(CONFIG_SYS_ARM_MPU)
232 if ((cache_bit == CR_C) && !mpu_enabled()) {
233 printf("Consider enabling MPU before enabling caches\n");
234 return;
235 }
236#endif
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200237 reg = get_cr(); /* get control reg. */
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200238 set_cr(reg | cache_bit);
239}
240
241/* cache_bit must be either CR_I or CR_C */
242static void cache_disable(uint32_t cache_bit)
243{
244 uint32_t reg;
245
SRICHARAN Rd702b082012-05-16 23:52:54 +0000246 reg = get_cr();
SRICHARAN Rd702b082012-05-16 23:52:54 +0000247
Heiko Schocher880eff52010-09-17 13:10:29 +0200248 if (cache_bit == CR_C) {
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200249 /* if cache isn;t enabled no need to disable */
Heiko Schocherf1d2b312010-09-17 13:10:39 +0200250 if ((reg & CR_C) != CR_C)
251 return;
Lokesh Vutla7a540ee2019-10-30 15:55:41 +0530252#ifdef CONFIG_SYS_ARM_MMU
Heiko Schocher880eff52010-09-17 13:10:29 +0200253 /* if disabling data cache, disable mmu too */
254 cache_bit |= CR_M;
Lokesh Vutla7a540ee2019-10-30 15:55:41 +0530255#endif
Heiko Schocher880eff52010-09-17 13:10:29 +0200256 }
Arun Mankuzhi44df5e82012-11-30 13:01:14 +0000257 reg = get_cr();
Lothar Waßmann53d4ed72017-06-08 09:48:41 +0200258
Lokesh Vutla7a540ee2019-10-30 15:55:41 +0530259#ifdef CONFIG_SYS_ARM_MMU
Arun Mankuzhi44df5e82012-11-30 13:01:14 +0000260 if (cache_bit == (CR_C | CR_M))
Lokesh Vutla7a540ee2019-10-30 15:55:41 +0530261#elif defined(CONFIG_SYS_ARM_MPU)
262 if (cache_bit == CR_C)
263#endif
Arun Mankuzhi44df5e82012-11-30 13:01:14 +0000264 flush_dcache_all();
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200265 set_cr(reg & ~cache_bit);
266}
267#endif
268
Trevor Woerner10015022019-05-03 09:41:00 -0400269#if CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
Simon Glass6cc915b2019-11-14 12:57:36 -0700270void icache_enable(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200271{
272 return;
273}
274
Simon Glass6cc915b2019-11-14 12:57:36 -0700275void icache_disable(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200276{
277 return;
278}
279
Simon Glass6cc915b2019-11-14 12:57:36 -0700280int icache_status(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200281{
282 return 0; /* always off */
283}
284#else
285void icache_enable(void)
286{
287 cache_enable(CR_I);
288}
289
290void icache_disable(void)
291{
292 cache_disable(CR_I);
293}
294
295int icache_status(void)
296{
297 return (get_cr() & CR_I) != 0;
298}
299#endif
300
Trevor Woerner10015022019-05-03 09:41:00 -0400301#if CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Simon Glass6cc915b2019-11-14 12:57:36 -0700302void dcache_enable(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200303{
304 return;
305}
306
Simon Glass6cc915b2019-11-14 12:57:36 -0700307void dcache_disable(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200308{
309 return;
310}
311
Simon Glass6cc915b2019-11-14 12:57:36 -0700312int dcache_status(void)
Jean-Christophe PLAGNIOL-VILLARDb3acb6c2009-04-05 13:06:31 +0200313{
314 return 0; /* always off */
315}
316#else
317void dcache_enable(void)
318{
319 cache_enable(CR_C);
320}
321
322void dcache_disable(void)
323{
324 cache_disable(CR_C);
325}
326
327int dcache_status(void)
328{
329 return (get_cr() & CR_C) != 0;
330}
331#endif