blob: ee7d14b2d31dfd426809073a1a6f0ea9c0cd6681 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
wdenkedc48b62002-09-08 17:56:50 +00002/*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
wdenkedc48b62002-09-08 17:56:50 +00005 */
6
7/* for now: just dummy functions to satisfy the linker */
8
wdenk8ed96042005-01-09 23:16:25 +00009#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070012#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060013#include <asm/cache.h>
wdenk8ed96042005-01-09 23:16:25 +000014
Ovidiu Panait586b15b2020-03-29 20:57:39 +030015DECLARE_GLOBAL_DATA_PTR;
16
Wu, Josh633b6cc2015-07-27 11:40:17 +080017/*
18 * Flush range from all levels of d-cache/unified-cache.
19 * Affects the range [start, start + size - 1].
20 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020021__weak void flush_cache(unsigned long start, unsigned long size)
wdenkedc48b62002-09-08 17:56:50 +000022{
Wu, Josh633b6cc2015-07-27 11:40:17 +080023 flush_dcache_range(start, start + size);
wdenkedc48b62002-09-08 17:56:50 +000024}
Aneesh Ve05f0072011-06-16 23:30:50 +000025
26/*
27 * Default implementation:
28 * do a range flush for the entire range
29 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020030__weak void flush_dcache_all(void)
Aneesh Ve05f0072011-06-16 23:30:50 +000031{
32 flush_cache(0, ~0);
33}
Aneesh Vcba4b182011-08-16 04:33:05 +000034
35/*
36 * Default implementation of enable_caches()
37 * Real implementation should be in platform code
38 */
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020039__weak void enable_caches(void)
Aneesh Vcba4b182011-08-16 04:33:05 +000040{
41 puts("WARNING: Caches not enabled\n");
42}
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070043
Wu, Josh387871a2015-07-27 11:40:16 +080044__weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
45{
46 /* An empty stub, real implementation should be in platform code */
47}
48__weak void flush_dcache_range(unsigned long start, unsigned long stop)
49{
50 /* An empty stub, real implementation should be in platform code */
51}
52
Simon Glass397b5692016-06-19 19:43:01 -060053int check_cache_range(unsigned long start, unsigned long stop)
54{
55 int ok = 1;
56
57 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
58 ok = 0;
59
60 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
61 ok = 0;
62
63 if (!ok) {
Simon Glassbcc53bf2016-06-19 19:43:05 -060064 warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
65 start, stop);
Simon Glass397b5692016-06-19 19:43:01 -060066 }
67
68 return ok;
69}
70
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070071#ifdef CONFIG_SYS_NONCACHED_MEMORY
72/*
73 * Reserve one MMU section worth of address space below the malloc() area that
74 * will be mapped uncached.
75 */
76static unsigned long noncached_start;
77static unsigned long noncached_end;
78static unsigned long noncached_next;
79
Patrice Chotardc2a21232020-04-28 11:38:03 +020080void noncached_set_region(void)
81{
82#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
83 mmu_set_region_dcache_behaviour(noncached_start,
84 noncached_end - noncached_start,
85 DCACHE_OFF);
86#endif
87}
88
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070089void noncached_init(void)
90{
91 phys_addr_t start, end;
92 size_t size;
93
Stephen Warren5e0404f2019-08-27 11:54:31 -060094 /* If this calculation changes, update board_f.c:reserve_noncached() */
Thierry Reding1dfdd9b2014-12-09 22:25:22 -070095 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
96 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
97 start = end - size;
98
99 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
100
101 noncached_start = start;
102 noncached_end = end;
103 noncached_next = start;
104
Patrice Chotardc2a21232020-04-28 11:38:03 +0200105 noncached_set_region();
Thierry Reding1dfdd9b2014-12-09 22:25:22 -0700106}
107
108phys_addr_t noncached_alloc(size_t size, size_t align)
109{
110 phys_addr_t next = ALIGN(noncached_next, align);
111
112 if (next >= noncached_end || (noncached_end - next) < size)
113 return 0;
114
115 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
116 noncached_next = next + size;
117
118 return next;
119}
120#endif /* CONFIG_SYS_NONCACHED_MEMORY */
Albert ARIBAUD62e92072015-10-23 18:06:40 +0200121
Tom Rini3a649402017-03-18 09:01:44 -0400122#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
Albert ARIBAUD62e92072015-10-23 18:06:40 +0200123void invalidate_l2_cache(void)
124{
125 unsigned int val = 0;
126
127 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
128 : : "r" (val) : "cc");
129 isb();
130}
131#endif
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300132
Ovidiu Panait79926e42020-03-29 20:57:41 +0300133int arch_reserve_mmu(void)
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300134{
Ovidiu Panait61848582020-03-29 20:57:40 +0300135 return arm_reserve_mmu();
136}
137
138__weak int arm_reserve_mmu(void)
139{
Ovidiu Panait586b15b2020-03-29 20:57:39 +0300140#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
141 /* reserve TLB table */
142 gd->arch.tlb_size = PGTABLE_SIZE;
143 gd->relocaddr -= gd->arch.tlb_size;
144
145 /* round down to next 64 kB limit */
146 gd->relocaddr &= ~(0x10000 - 1);
147
148 gd->arch.tlb_addr = gd->relocaddr;
149 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
150 gd->arch.tlb_addr + gd->arch.tlb_size);
151
152#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
153 /*
154 * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
155 * with location within secure ram.
156 */
157 gd->arch.tlb_allocated = gd->arch.tlb_addr;
158#endif
159#endif
160
161 return 0;
162}