blob: 249799cd01973facb26ff65f73050beea8458325 [file] [log] [blame]
David Feng0ae76532013-12-14 11:47:35 +08001/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
12#include <version.h>
13#include <asm/macro.h>
14#include <linux/linkage.h>
15
16/*
17 * void __asm_flush_dcache_level(level)
18 *
19 * clean and invalidate one level cache.
20 *
21 * x0: cache level
York Sun1e6ad552014-02-26 13:26:04 -080022 * x1: 0 flush & invalidate, 1 invalidate only
23 * x2~x9: clobbered
David Feng0ae76532013-12-14 11:47:35 +080024 */
25ENTRY(__asm_flush_dcache_level)
York Sun1e6ad552014-02-26 13:26:04 -080026 lsl x12, x0, #1
27 msr csselr_el1, x12 /* select cache level */
David Feng0ae76532013-12-14 11:47:35 +080028 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
30 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
31 add x2, x2, #4 /* x2 <- log2(cache line size) */
32 mov x3, #0x3ff
33 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
34 add w4, w3, w3
35 sub w4, w4, 1 /* round up log2(#ways + 1) */
36 clz w5, w4 /* bit position of #ways */
37 mov x4, #0x7fff
38 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
York Sun1e6ad552014-02-26 13:26:04 -080039 /* x12 <- cache level << 1 */
David Feng0ae76532013-12-14 11:47:35 +080040 /* x2 <- line length offset */
41 /* x3 <- number of cache ways - 1 */
42 /* x4 <- number of cache sets - 1 */
43 /* x5 <- bit position of #ways */
44
45loop_set:
46 mov x6, x3 /* x6 <- working copy of #ways */
47loop_way:
48 lsl x7, x6, x5
York Sun1e6ad552014-02-26 13:26:04 -080049 orr x9, x12, x7 /* map way and level to cisw value */
David Feng0ae76532013-12-14 11:47:35 +080050 lsl x7, x4, x2
51 orr x9, x9, x7 /* map set number to cisw value */
York Sun1e6ad552014-02-26 13:26:04 -080052 tbz w1, #0, 1f
53 dc isw, x9
54 b 2f
551: dc cisw, x9 /* clean & invalidate by set/way */
562: subs x6, x6, #1 /* decrement the way */
David Feng0ae76532013-12-14 11:47:35 +080057 b.ge loop_way
58 subs x4, x4, #1 /* decrement the set */
59 b.ge loop_set
60
61 ret
62ENDPROC(__asm_flush_dcache_level)
63
64/*
York Sun1e6ad552014-02-26 13:26:04 -080065 * void __asm_flush_dcache_all(int invalidate_only)
66 *
67 * x0: 0 flush & invalidate, 1 invalidate only
David Feng0ae76532013-12-14 11:47:35 +080068 *
69 * clean and invalidate all data cache by SET/WAY.
70 */
York Sun1e6ad552014-02-26 13:26:04 -080071ENTRY(__asm_dcache_all)
72 mov x1, x0
David Feng0ae76532013-12-14 11:47:35 +080073 dsb sy
74 mrs x10, clidr_el1 /* read clidr_el1 */
75 lsr x11, x10, #24
76 and x11, x11, #0x7 /* x11 <- loc */
77 cbz x11, finished /* if loc is 0, exit */
78 mov x15, lr
79 mov x0, #0 /* start flush at cache level 0 */
80 /* x0 <- cache level */
81 /* x10 <- clidr_el1 */
82 /* x11 <- loc */
83 /* x15 <- return address */
84
85loop_level:
York Sun1e6ad552014-02-26 13:26:04 -080086 lsl x12, x0, #1
87 add x12, x12, x0 /* x0 <- tripled cache level */
88 lsr x12, x10, x12
89 and x12, x12, #7 /* x12 <- cache type */
90 cmp x12, #2
David Feng0ae76532013-12-14 11:47:35 +080091 b.lt skip /* skip if no cache or icache */
York Sun1e6ad552014-02-26 13:26:04 -080092 bl __asm_flush_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng0ae76532013-12-14 11:47:35 +080093skip:
94 add x0, x0, #1 /* increment cache level */
95 cmp x11, x0
96 b.gt loop_level
97
98 mov x0, #0
99 msr csselr_el1, x0 /* resotre csselr_el1 */
100 dsb sy
101 isb
102 mov lr, x15
103
104finished:
105 ret
York Sun1e6ad552014-02-26 13:26:04 -0800106ENDPROC(__asm_dcache_all)
107
108ENTRY(__asm_flush_dcache_all)
109 mov x16, lr
110 mov x0, #0
111 bl __asm_dcache_all
112 mov lr, x16
113 ret
David Feng0ae76532013-12-14 11:47:35 +0800114ENDPROC(__asm_flush_dcache_all)
115
York Sun1e6ad552014-02-26 13:26:04 -0800116ENTRY(__asm_invalidate_dcache_all)
117 mov x16, lr
118 mov x0, #0xffff
119 bl __asm_dcache_all
120 mov lr, x16
121 ret
122ENDPROC(__asm_invalidate_dcache_all)
123
David Feng0ae76532013-12-14 11:47:35 +0800124/*
125 * void __asm_flush_dcache_range(start, end)
126 *
127 * clean & invalidate data cache in the range
128 *
129 * x0: start address
130 * x1: end address
131 */
132ENTRY(__asm_flush_dcache_range)
133 mrs x3, ctr_el0
134 lsr x3, x3, #16
135 and x3, x3, #0xf
136 mov x2, #4
137 lsl x2, x2, x3 /* cache line size */
138
139 /* x2 <- minimal cache line size in cache system */
140 sub x3, x2, #1
141 bic x0, x0, x3
1421: dc civac, x0 /* clean & invalidate data or unified cache */
143 add x0, x0, x2
144 cmp x0, x1
145 b.lo 1b
146 dsb sy
147 ret
148ENDPROC(__asm_flush_dcache_range)
149
150/*
151 * void __asm_invalidate_icache_all(void)
152 *
153 * invalidate all tlb entries.
154 */
155ENTRY(__asm_invalidate_icache_all)
156 ic ialluis
157 isb sy
158 ret
159ENDPROC(__asm_invalidate_icache_all)