blob: bf85ad6fd9aa8b9fb1709e16fc97ac97ee26b9c0 [file] [log] [blame]
Masahiro Yamada5894ca02014-10-03 19:21:06 +09001/*
Masahiro Yamadaf6e7f072015-05-29 17:30:00 +09002 * Copyright (C) 2012-2015 Masahiro Yamada <yamada.masahiro@socionext.com>
Masahiro Yamada5894ca02014-10-03 19:21:06 +09003 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7#include <common.h>
Masahiro Yamadaf6e7f072015-05-29 17:30:00 +09008#include <linux/io.h>
Masahiro Yamada5894ca02014-10-03 19:21:06 +09009#include <asm/armv7.h>
Masahiro Yamadaa86ac952015-02-27 02:26:44 +090010#include <mach/ssc-regs.h>
Masahiro Yamada5894ca02014-10-03 19:21:06 +090011
12#ifdef CONFIG_UNIPHIER_L2CACHE_ON
13static void uniphier_cache_maint_all(u32 operation)
14{
15 /* try until the command is successfully set */
16 do {
17 writel(SSCOQM_S_ALL | SSCOQM_CE | operation, SSCOQM);
18 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
19
20 /* wait until the operation is completed */
21 while (readl(SSCOLPQS) != SSCOLPQS_EF)
22 ;
23
24 /* clear the complete notification flag */
25 writel(SSCOLPQS_EF, SSCOLPQS);
26
27 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
28 readl(SSCOPE); /* need a read back to confirm */
29}
30
31void v7_outer_cache_flush_all(void)
32{
33 uniphier_cache_maint_all(SSCOQM_CM_WB_INV);
34}
35
36void v7_outer_cache_inval_all(void)
37{
38 uniphier_cache_maint_all(SSCOQM_CM_INV);
39}
40
41static void __uniphier_cache_maint_range(u32 start, u32 size, u32 operation)
42{
43 /* try until the command is successfully set */
44 do {
45 writel(SSCOQM_S_ADDRESS | SSCOQM_CE | operation, SSCOQM);
46 writel(start, SSCOQAD);
47 writel(size, SSCOQSZ);
48
49 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
50
51 /* wait until the operation is completed */
52 while (readl(SSCOLPQS) != SSCOLPQS_EF)
53 ;
54
55 /* clear the complete notification flag */
56 writel(SSCOLPQS_EF, SSCOLPQS);
57}
58
59static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation)
60{
61 u32 size;
62
63 /*
64 * If start address is not aligned to cache-line,
65 * do cache operation for the first cache-line
66 */
67 start = start & ~(SSC_LINE_SIZE - 1);
68
69 if (start == 0 && end >= (u32)(-SSC_LINE_SIZE)) {
70 /* this means cache operation for all range */
71 uniphier_cache_maint_all(operation);
72 return;
73 }
74
75 /*
76 * If end address is not aligned to cache-line,
77 * do cache operation for the last cache-line
78 */
79 size = (end - start + SSC_LINE_SIZE - 1) & ~(SSC_LINE_SIZE - 1);
80
81 while (size) {
82 u32 chunk_size = size > SSC_RANGE_OP_MAX_SIZE ?
83 SSC_RANGE_OP_MAX_SIZE : size;
84 __uniphier_cache_maint_range(start, chunk_size, operation);
85
86 start += chunk_size;
87 size -= chunk_size;
88 }
89
90 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
91 readl(SSCOPE); /* need a read back to confirm */
92}
93
94void v7_outer_cache_flush_range(u32 start, u32 end)
95{
96 uniphier_cache_maint_range(start, end, SSCOQM_CM_WB_INV);
97}
98
99void v7_outer_cache_inval_range(u32 start, u32 end)
100{
101 uniphier_cache_maint_range(start, end, SSCOQM_CM_INV);
102}
103
104void v7_outer_cache_enable(void)
105{
106 u32 tmp;
107 tmp = readl(SSCC);
108 tmp |= SSCC_ON;
109 writel(tmp, SSCC);
110}
111#endif
112
113void v7_outer_cache_disable(void)
114{
115 u32 tmp;
116 tmp = readl(SSCC);
117 tmp &= ~SSCC_ON;
118 writel(tmp, SSCC);
119}
120
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900121void enable_caches(void)
122{
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900123 dcache_enable();
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900124}