blob: 4bb7d951c0f409f145da13b76212d2a3135e99e7 [file] [log] [blame]
Masahiro Yamada4bab70a2016-07-22 13:38:31 +09001/*
2 * Copyright (C) 2012-2014 Panasonic Corporation
3 * Copyright (C) 2015-2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <linux/io.h>
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090011#include <linux/kernel.h>
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090012#include <asm/armv7.h>
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090013#include <asm/processor.h>
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090014
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090015#include "cache-uniphier.h"
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090016#include "ssc-regs.h"
17
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090018#define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
19 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090020#define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
21 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090022
23/* uniphier_cache_sync - perform a sync point for a particular cache level */
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090024static void uniphier_cache_sync(void)
25{
26 /* drain internal buffers */
27 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
28 /* need a read back to confirm */
29 readl(UNIPHIER_SSCOPE);
30}
31
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090032/**
33 * uniphier_cache_maint_common - run a queue operation
34 *
35 * @start: start address of range operation (don't care for "all" operation)
36 * @size: data size of range operation (don't care for "all" operation)
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090037 * @ways: target ways (don't care for operations other than pre-fetch, touch
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090038 * @operation: flags to specify the desired cache operation
39 */
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090040static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
41 u32 operation)
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090042{
43 /* clear the complete notification flag */
44 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
45
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090046 do {
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090047 /* set cache operation */
48 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
49
50 /* set address range if needed */
51 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
52 writel(start, UNIPHIER_SSCOQAD);
53 writel(size, UNIPHIER_SSCOQSZ);
54 }
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090055
56 /* set target ways if needed */
57 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
58 writel(ways, UNIPHIER_SSCOQWN);
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090059 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
60 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
Masahiro Yamada4bab70a2016-07-22 13:38:31 +090061
62 /* wait until the operation is completed */
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090063 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
64 cpu_relax();
65}
66
67static void uniphier_cache_maint_all(u32 operation)
68{
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090069 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090070
71 uniphier_cache_sync();
72}
73
Masahiro Yamada95a1fec2016-08-10 16:08:37 +090074static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
75 u32 operation)
Masahiro Yamada3ffc7472016-08-10 16:08:36 +090076{
77 u32 size;
78
79 /*
80 * If the start address is not aligned,
81 * perform a cache operation for the first cache-line
82 */
83 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
84
85 size = end - start;
86
87 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
88 /* this means cache operation for all range */
89 uniphier_cache_maint_all(operation);
90 return;
91 }
92
93 /*
94 * If the end address is not aligned,
95 * perform a cache operation for the last cache-line
96 */
97 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
98
99 while (size) {
100 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
101
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900102 uniphier_cache_maint_common(start, chunk_size, ways,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900103 UNIPHIER_SSCOQM_S_RANGE | operation);
104
105 start += chunk_size;
106 size -= chunk_size;
107 }
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900108
109 uniphier_cache_sync();
110}
111
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900112void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
113{
114 uniphier_cache_maint_range(start, end, ways,
115 UNIPHIER_SSCOQM_TID_WAY |
116 UNIPHIER_SSCOQM_CM_PREFETCH);
117}
118
119void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
120{
121 uniphier_cache_maint_range(start, end, ways,
122 UNIPHIER_SSCOQM_TID_WAY |
123 UNIPHIER_SSCOQM_CM_TOUCH);
124}
125
126void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
127{
128 uniphier_cache_maint_range(start, end, ways,
129 UNIPHIER_SSCOQM_TID_WAY |
130 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
131}
132
Masahiro Yamada6f579db2016-08-10 16:08:42 +0900133static void uniphier_cache_endisable(int enable)
134{
135 u32 tmp;
136
137 tmp = readl(UNIPHIER_SSCC);
138 if (enable)
139 tmp |= UNIPHIER_SSCC_ON;
140 else
141 tmp &= ~UNIPHIER_SSCC_ON;
142 writel(tmp, UNIPHIER_SSCC);
143}
144
145void uniphier_cache_enable(void)
146{
147 uniphier_cache_endisable(1);
148}
149
150void uniphier_cache_disable(void)
151{
152 uniphier_cache_endisable(0);
153}
154
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900155#ifdef CONFIG_UNIPHIER_L2CACHE_ON
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900156void v7_outer_cache_flush_all(void)
157{
158 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
159}
160
161void v7_outer_cache_inval_all(void)
162{
163 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
164}
165
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900166void v7_outer_cache_flush_range(u32 start, u32 end)
167{
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900168 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900169}
170
171void v7_outer_cache_inval_range(u32 start, u32 end)
172{
173 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
174 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900175 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900176 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900177 start += UNIPHIER_SSC_LINE_SIZE;
178 }
179
180 if (start >= end) {
181 uniphier_cache_sync();
182 return;
183 }
184
185 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
186 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900187 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada3ffc7472016-08-10 16:08:36 +0900188 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900189 }
190
191 if (start >= end) {
192 uniphier_cache_sync();
193 return;
194 }
195
Masahiro Yamada95a1fec2016-08-10 16:08:37 +0900196 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900197}
198
199void v7_outer_cache_enable(void)
200{
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900201 writel(U32_MAX, UNIPHIER_SSCLPDAWCR); /* activate all ways */
Masahiro Yamada6f579db2016-08-10 16:08:42 +0900202 uniphier_cache_enable();
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900203}
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900204
205void v7_outer_cache_disable(void)
206{
Masahiro Yamada6f579db2016-08-10 16:08:42 +0900207 uniphier_cache_disable();
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900208}
Masahiro Yamadae731a532016-08-10 16:08:38 +0900209#endif
Masahiro Yamada4bab70a2016-07-22 13:38:31 +0900210
211void enable_caches(void)
212{
213 dcache_enable();
214}