Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2002 |
| 4 | * Wolfgang Denk, DENX Software Engineering, wd@denx.de. |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /* for now: just dummy functions to satisfy the linker */ |
| 8 | |
wdenk | 8ed9604 | 2005-01-09 23:16:25 +0000 | [diff] [blame] | 9 | #include <common.h> |
Simon Glass | 1eb69ae | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 11 | #include <malloc.h> |
wdenk | 8ed9604 | 2005-01-09 23:16:25 +0000 | [diff] [blame] | 12 | |
Ovidiu Panait | 586b15b | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 13 | DECLARE_GLOBAL_DATA_PTR; |
| 14 | |
Wu, Josh | 633b6cc | 2015-07-27 11:40:17 +0800 | [diff] [blame] | 15 | /* |
| 16 | * Flush range from all levels of d-cache/unified-cache. |
| 17 | * Affects the range [start, start + size - 1]. |
| 18 | */ |
Jeroen Hofstee | fcfddfd | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 19 | __weak void flush_cache(unsigned long start, unsigned long size) |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 20 | { |
Wu, Josh | 633b6cc | 2015-07-27 11:40:17 +0800 | [diff] [blame] | 21 | flush_dcache_range(start, start + size); |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 22 | } |
Aneesh V | e05f007 | 2011-06-16 23:30:50 +0000 | [diff] [blame] | 23 | |
| 24 | /* |
| 25 | * Default implementation: |
| 26 | * do a range flush for the entire range |
| 27 | */ |
Jeroen Hofstee | fcfddfd | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 28 | __weak void flush_dcache_all(void) |
Aneesh V | e05f007 | 2011-06-16 23:30:50 +0000 | [diff] [blame] | 29 | { |
| 30 | flush_cache(0, ~0); |
| 31 | } |
Aneesh V | cba4b18 | 2011-08-16 04:33:05 +0000 | [diff] [blame] | 32 | |
| 33 | /* |
| 34 | * Default implementation of enable_caches() |
| 35 | * Real implementation should be in platform code |
| 36 | */ |
Jeroen Hofstee | fcfddfd | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 37 | __weak void enable_caches(void) |
Aneesh V | cba4b18 | 2011-08-16 04:33:05 +0000 | [diff] [blame] | 38 | { |
| 39 | puts("WARNING: Caches not enabled\n"); |
| 40 | } |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 41 | |
Wu, Josh | 387871a | 2015-07-27 11:40:16 +0800 | [diff] [blame] | 42 | __weak void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 43 | { |
| 44 | /* An empty stub, real implementation should be in platform code */ |
| 45 | } |
| 46 | __weak void flush_dcache_range(unsigned long start, unsigned long stop) |
| 47 | { |
| 48 | /* An empty stub, real implementation should be in platform code */ |
| 49 | } |
| 50 | |
Simon Glass | 397b569 | 2016-06-19 19:43:01 -0600 | [diff] [blame] | 51 | int check_cache_range(unsigned long start, unsigned long stop) |
| 52 | { |
| 53 | int ok = 1; |
| 54 | |
| 55 | if (start & (CONFIG_SYS_CACHELINE_SIZE - 1)) |
| 56 | ok = 0; |
| 57 | |
| 58 | if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1)) |
| 59 | ok = 0; |
| 60 | |
| 61 | if (!ok) { |
Simon Glass | bcc53bf | 2016-06-19 19:43:05 -0600 | [diff] [blame] | 62 | warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n", |
| 63 | start, stop); |
Simon Glass | 397b569 | 2016-06-19 19:43:01 -0600 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | return ok; |
| 67 | } |
| 68 | |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 69 | #ifdef CONFIG_SYS_NONCACHED_MEMORY |
| 70 | /* |
| 71 | * Reserve one MMU section worth of address space below the malloc() area that |
| 72 | * will be mapped uncached. |
| 73 | */ |
| 74 | static unsigned long noncached_start; |
| 75 | static unsigned long noncached_end; |
| 76 | static unsigned long noncached_next; |
| 77 | |
Patrice Chotard | c2a2123 | 2020-04-28 11:38:03 +0200 | [diff] [blame^] | 78 | void noncached_set_region(void) |
| 79 | { |
| 80 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
| 81 | mmu_set_region_dcache_behaviour(noncached_start, |
| 82 | noncached_end - noncached_start, |
| 83 | DCACHE_OFF); |
| 84 | #endif |
| 85 | } |
| 86 | |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 87 | void noncached_init(void) |
| 88 | { |
| 89 | phys_addr_t start, end; |
| 90 | size_t size; |
| 91 | |
Stephen Warren | 5e0404f | 2019-08-27 11:54:31 -0600 | [diff] [blame] | 92 | /* If this calculation changes, update board_f.c:reserve_noncached() */ |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 93 | end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE; |
| 94 | size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE); |
| 95 | start = end - size; |
| 96 | |
| 97 | debug("mapping memory %pa-%pa non-cached\n", &start, &end); |
| 98 | |
| 99 | noncached_start = start; |
| 100 | noncached_end = end; |
| 101 | noncached_next = start; |
| 102 | |
Patrice Chotard | c2a2123 | 2020-04-28 11:38:03 +0200 | [diff] [blame^] | 103 | noncached_set_region(); |
Thierry Reding | 1dfdd9b | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | phys_addr_t noncached_alloc(size_t size, size_t align) |
| 107 | { |
| 108 | phys_addr_t next = ALIGN(noncached_next, align); |
| 109 | |
| 110 | if (next >= noncached_end || (noncached_end - next) < size) |
| 111 | return 0; |
| 112 | |
| 113 | debug("allocated %zu bytes of uncached memory @%pa\n", size, &next); |
| 114 | noncached_next = next + size; |
| 115 | |
| 116 | return next; |
| 117 | } |
| 118 | #endif /* CONFIG_SYS_NONCACHED_MEMORY */ |
Albert ARIBAUD | 62e9207 | 2015-10-23 18:06:40 +0200 | [diff] [blame] | 119 | |
Tom Rini | 3a64940 | 2017-03-18 09:01:44 -0400 | [diff] [blame] | 120 | #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) |
Albert ARIBAUD | 62e9207 | 2015-10-23 18:06:40 +0200 | [diff] [blame] | 121 | void invalidate_l2_cache(void) |
| 122 | { |
| 123 | unsigned int val = 0; |
| 124 | |
| 125 | asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache" |
| 126 | : : "r" (val) : "cc"); |
| 127 | isb(); |
| 128 | } |
| 129 | #endif |
Ovidiu Panait | 586b15b | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 130 | |
Ovidiu Panait | 79926e4 | 2020-03-29 20:57:41 +0300 | [diff] [blame] | 131 | int arch_reserve_mmu(void) |
Ovidiu Panait | 586b15b | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 132 | { |
Ovidiu Panait | 6184858 | 2020-03-29 20:57:40 +0300 | [diff] [blame] | 133 | return arm_reserve_mmu(); |
| 134 | } |
| 135 | |
| 136 | __weak int arm_reserve_mmu(void) |
| 137 | { |
Ovidiu Panait | 586b15b | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 138 | #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF)) |
| 139 | /* reserve TLB table */ |
| 140 | gd->arch.tlb_size = PGTABLE_SIZE; |
| 141 | gd->relocaddr -= gd->arch.tlb_size; |
| 142 | |
| 143 | /* round down to next 64 kB limit */ |
| 144 | gd->relocaddr &= ~(0x10000 - 1); |
| 145 | |
| 146 | gd->arch.tlb_addr = gd->relocaddr; |
| 147 | debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr, |
| 148 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 149 | |
| 150 | #ifdef CONFIG_SYS_MEM_RESERVE_SECURE |
| 151 | /* |
| 152 | * Record allocated tlb_addr in case gd->tlb_addr to be overwritten |
| 153 | * with location within secure ram. |
| 154 | */ |
| 155 | gd->arch.tlb_allocated = gd->arch.tlb_addr; |
| 156 | #endif |
| 157 | #endif |
| 158 | |
| 159 | return 0; |
| 160 | } |