blob: a210835ea9f09409c31d92f5bf578243cfc842f7 [file] [log] [blame]
Masahiro Yamada4bab70a2016-07-22 13:38:31 +09001/*
2 * Copyright (C) 2012-2014 Panasonic Corporation
3 * Copyright (C) 2015-2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <linux/io.h>
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090011#include <linux/kernel.h>
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090012#include <asm/armv7.h>
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090013#include <asm/processor.h>
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090014
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090015#include "cache-uniphier.h"
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090016#include "ssc-regs.h"
17
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090018#define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
19 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090020#define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
21 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090022
23/* uniphier_cache_sync - perform a sync point for a particular cache level */
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090024static void uniphier_cache_sync(void)
25{
26 /* drain internal buffers */
27 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
28 /* need a read back to confirm */
29 readl(UNIPHIER_SSCOPE);
30}
31
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090032/**
33 * uniphier_cache_maint_common - run a queue operation
34 *
35 * @start: start address of range operation (don't care for "all" operation)
36 * @size: data size of range operation (don't care for "all" operation)
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090037 * @ways: target ways (don't care for operations other than pre-fetch, touch
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090038 * @operation: flags to specify the desired cache operation
39 */
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090040static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
41 u32 operation)
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090042{
43 /* clear the complete notification flag */
44 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
45
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090046 do {
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090047 /* set cache operation */
48 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
49
50 /* set address range if needed */
51 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
52 writel(start, UNIPHIER_SSCOQAD);
53 writel(size, UNIPHIER_SSCOQSZ);
54 }
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090055
56 /* set target ways if needed */
57 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
58 writel(ways, UNIPHIER_SSCOQWN);
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090059 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
60 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090061
62 /* wait until the operation is completed */
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090063 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
64 cpu_relax();
65}
66
67static void uniphier_cache_maint_all(u32 operation)
68{
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090069 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090070
71 uniphier_cache_sync();
72}
73
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090074static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
75 u32 operation)
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090076{
77 u32 size;
78
79 /*
80 * If the start address is not aligned,
81 * perform a cache operation for the first cache-line
82 */
83 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
84
85 size = end - start;
86
87 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
88 /* this means cache operation for all range */
89 uniphier_cache_maint_all(operation);
90 return;
91 }
92
93 /*
94 * If the end address is not aligned,
95 * perform a cache operation for the last cache-line
96 */
97 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
98
99 while (size) {
100 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
101
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900102 uniphier_cache_maint_common(start, chunk_size, ways,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900103 UNIPHIER_SSCOQM_S_RANGE | operation);
104
105 start += chunk_size;
106 size -= chunk_size;
107 }
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900108
109 uniphier_cache_sync();
110}
111
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900112void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
113{
114 uniphier_cache_maint_range(start, end, ways,
115 UNIPHIER_SSCOQM_TID_WAY |
116 UNIPHIER_SSCOQM_CM_PREFETCH);
117}
118
119void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
120{
121 uniphier_cache_maint_range(start, end, ways,
122 UNIPHIER_SSCOQM_TID_WAY |
123 UNIPHIER_SSCOQM_CM_TOUCH);
124}
125
126void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
127{
128 uniphier_cache_maint_range(start, end, ways,
129 UNIPHIER_SSCOQM_TID_WAY |
130 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
131}
132
133#ifdef CONFIG_UNIPHIER_L2CACHE_ON
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900134void v7_outer_cache_flush_all(void)
135{
136 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
137}
138
139void v7_outer_cache_inval_all(void)
140{
141 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
142}
143
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900144void v7_outer_cache_flush_range(u32 start, u32 end)
145{
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900146 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900147}
148
149void v7_outer_cache_inval_range(u32 start, u32 end)
150{
151 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
152 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900153 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900154 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900155 start += UNIPHIER_SSC_LINE_SIZE;
156 }
157
158 if (start >= end) {
159 uniphier_cache_sync();
160 return;
161 }
162
163 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
164 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900165 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900166 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900167 }
168
169 if (start >= end) {
170 uniphier_cache_sync();
171 return;
172 }
173
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900174 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900175}
176
177void v7_outer_cache_enable(void)
178{
179 u32 tmp;
180
181 writel(U32_MAX, UNIPHIER_SSCLPDAWCR); /* activate all ways */
182 tmp = readl(UNIPHIER_SSCC);
183 tmp |= UNIPHIER_SSCC_ON;
184 writel(tmp, UNIPHIER_SSCC);
185}
186#endif
187
188void v7_outer_cache_disable(void)
189{
190 u32 tmp;
191
192 tmp = readl(UNIPHIER_SSCC);
193 tmp &= ~UNIPHIER_SSCC_ON;
194 writel(tmp, UNIPHIER_SSCC);
195}
196
197void enable_caches(void)
198{
199 dcache_enable();
200}