blob: 950ec1e793cafe869624738961b4e276569b998b [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Prafulla Wadaskar5c3d5812009-06-20 11:01:52 +02002/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
Prafulla Wadaskar5c3d5812009-06-20 11:01:52 +02006 */
7
8#ifndef _ASM_CACHE_H
9#define _ASM_CACHE_H
10
11#include <asm/system.h>
12
David Feng0ae76532013-12-14 11:47:35 +080013#ifndef CONFIG_ARM64
14
Prafulla Wadaskar5c3d5812009-06-20 11:01:52 +020015/*
16 * Invalidate L2 Cache using co-proc instruction
17 */
Tom Rini3a649402017-03-18 09:01:44 -040018#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
Albert ARIBAUD62e92072015-10-23 18:06:40 +020019void invalidate_l2_cache(void);
20#else
Prafulla Wadaskar5c3d5812009-06-20 11:01:52 +020021static inline void invalidate_l2_cache(void)
22{
23 unsigned int val=0;
24
25 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
26 : : "r" (val) : "cc");
27 isb();
28}
Albert ARIBAUD62e92072015-10-23 18:06:40 +020029#endif
Kim, Heung Jun06e758e2009-06-20 11:02:17 +020030
Simon Glass397b5692016-06-19 19:43:01 -060031int check_cache_range(unsigned long start, unsigned long stop);
32
Kim, Heung Jun06e758e2009-06-20 11:02:17 +020033void l2_cache_enable(void);
34void l2_cache_disable(void);
Vincent Stehlédfa41382013-03-04 20:04:43 +000035void set_section_dcache(int section, enum dcache_option option);
Kim, Heung Jun06e758e2009-06-20 11:02:17 +020036
Jeroen Hofsteefcfddfd2014-06-23 22:07:04 +020037void arm_init_before_mmu(void);
38void arm_init_domains(void);
39void cpu_cache_initialization(void);
R Sricharan96fdbec2013-03-04 20:04:44 +000040void dram_bank_mmu_setup(int bank);
David Feng0ae76532013-12-14 11:47:35 +080041
42#endif
43
Anton Staaf44d6cbb2011-10-17 16:46:03 -070044/*
Tom Rini067716b2016-08-22 08:22:17 -040045 * The value of the largest data cache relevant to DMA operations shall be set
46 * for us in CONFIG_SYS_CACHELINE_SIZE. In some cases this may be a larger
47 * value than found in the L1 cache but this is OK to use in terms of
48 * alignment.
Anton Staaf44d6cbb2011-10-17 16:46:03 -070049 */
Anton Staaf44d6cbb2011-10-17 16:46:03 -070050#define ARCH_DMA_MINALIGN CONFIG_SYS_CACHELINE_SIZE
Anton Staaf44d6cbb2011-10-17 16:46:03 -070051
Prafulla Wadaskar5c3d5812009-06-20 11:01:52 +020052#endif /* _ASM_CACHE_H */