blob: 224f2aef14db6146471345fbd9fd801379fad405 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
wdenkedc48b62002-09-08 17:56:50 +00002/*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
wdenkedc48b62002-09-08 17:56:50 +00005 */
6
7/* for now: just dummy functions to satisfy the linker */
8
wdenk8ed96042005-01-09 23:16:25 +00009#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070011#include <malloc.h>
wdenk8ed96042005-01-09 23:16:25 +000012
Ovidiu Panait586b15b2020-03-29 20:57:39 +030013DECLARE_GLOBAL_DATA_PTR;
14
Wu, Josh633b6cc2015-07-27 11:40:17 +080015/*
16 * Flush range from all levels of d-cache/unified-cache.
17 * Affects the range [start, start + size - 1].
18 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020019__weak void flush_cache(unsigned long start, unsigned long size)
wdenkedc48b62002-09-08 17:56:50 +000020{
Wu, Josh633b6cc2015-07-27 11:40:17 +080021 flush_dcache_range(start, start + size);
wdenkedc48b62002-09-08 17:56:50 +000022}
Aneesh Ve05f0072011-06-16 23:30:50 +000023
24/*
25 * Default implementation:
26 * do a range flush for the entire range
27 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020028__weak void flush_dcache_all(void)
Aneesh Ve05f0072011-06-16 23:30:50 +000029{
30 flush_cache(0, ~0);
31}
Aneesh Vcba4b182011-08-16 04:33:05 +000032
33/*
34 * Default implementation of enable_caches()
35 * Real implementation should be in platform code
36 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020037__weak void enable_caches(void)
Aneesh Vcba4b182011-08-16 04:33:05 +000038{
39 puts("WARNING: Caches not enabled\n");
40}
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070041
Wu, Josh387871a2015-07-27 11:40:16 +080042__weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
43{
44 /* An empty stub, real implementation should be in platform code */
45}
46__weak void flush_dcache_range(unsigned long start, unsigned long stop)
47{
48 /* An empty stub, real implementation should be in platform code */
49}
50
Simon Glass397b5692016-06-19 19:43:01 -060051int check_cache_range(unsigned long start, unsigned long stop)
52{
53 int ok = 1;
54
55 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
56 ok = 0;
57
58 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
59 ok = 0;
60
61 if (!ok) {
Simon Glassbcc53bf2016-06-19 19:43:05 -060062 warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
63 start, stop);
Simon Glass397b5692016-06-19 19:43:01 -060064 }
65
66 return ok;
67}
68
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070069#ifdef CONFIG_SYS_NONCACHED_MEMORY
70/*
71 * Reserve one MMU section worth of address space below the malloc() area that
72 * will be mapped uncached.
73 */
74static unsigned long noncached_start;
75static unsigned long noncached_end;
76static unsigned long noncached_next;
77
Patrice Chotardc2a21232020-04-28 11:38:03 +020078void noncached_set_region(void)
79{
80#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
81 mmu_set_region_dcache_behaviour(noncached_start,
82 noncached_end - noncached_start,
83 DCACHE_OFF);
84#endif
85}
86
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070087void noncached_init(void)
88{
89 phys_addr_t start, end;
90 size_t size;
91
Stephen Warren5e0404f2019-08-27 11:54:31 -060092 /* If this calculation changes, update board_f.c:reserve_noncached() */
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070093 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
94 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
95 start = end - size;
96
97 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
98
99 noncached_start = start;
100 noncached_end = end;
101 noncached_next = start;
102
Patrice Chotardc2a21232020-04-28 11:38:03 +0200103 noncached_set_region();
Thierry Reding1dfdd9b2014-12-09 22:25:22 -0700104}
105
106phys_addr_t noncached_alloc(size_t size, size_t align)
107{
108 phys_addr_t next = ALIGN(noncached_next, align);
109
110 if (next >= noncached_end || (noncached_end - next) < size)
111 return 0;
112
113 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
114 noncached_next = next + size;
115
116 return next;
117}
118#endif /* CONFIG_SYS_NONCACHED_MEMORY */
Albert ARIBAUD62e92072015-10-23 18:06:40 +0200119
Tom Rini3a649402017-03-18 09:01:44 -0400120#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
Albert ARIBAUD62e92072015-10-23 18:06:40 +0200121void invalidate_l2_cache(void)
122{
123 unsigned int val = 0;
124
125 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
126 : : "r" (val) : "cc");
127 isb();
128}
129#endif
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300130
Ovidiu Panait79926e42020-03-29 20:57:41 +0300131int arch_reserve_mmu(void)
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300132{
Ovidiu Panait61848582020-03-29 20:57:40 +0300133 return arm_reserve_mmu();
134}
135
136__weak int arm_reserve_mmu(void)
137{
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300138#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
139 /* reserve TLB table */
140 gd->arch.tlb_size = PGTABLE_SIZE;
141 gd->relocaddr -= gd->arch.tlb_size;
142
143 /* round down to next 64 kB limit */
144 gd->relocaddr &= ~(0x10000 - 1);
145
146 gd->arch.tlb_addr = gd->relocaddr;
147 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
148 gd->arch.tlb_addr + gd->arch.tlb_size);
149
150#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
151 /*
152 * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
153 * with location within secure ram.
154 */
155 gd->arch.tlb_allocated = gd->arch.tlb_addr;
156#endif
157#endif
158
159 return 0;
160}