blob: 9aec0ce9a43059ef635d237d94dc249c69ae1dfd [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Stefan Roesef61aefc2016-05-17 15:00:30 +02002/*
3 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
Marek Behúna129f642020-04-08 19:25:19 +02004 * Copyright (C) 2020 Marek Behun <marek.behun@nic.cz>
Stefan Roesef61aefc2016-05-17 15:00:30 +02005 */
6
7#include <common.h>
Simon Glass9a3b4ce2019-12-28 10:45:01 -07008#include <cpu_func.h>
Stefan Roesef61aefc2016-05-17 15:00:30 +02009#include <dm.h>
10#include <fdtdec.h>
Pali Rohár4a82fca2021-05-26 17:59:38 +020011#include <fdt_support.h>
Simon Glass691d7192020-05-10 11:40:02 -060012#include <init.h>
Simon Glass401d1c42020-10-30 21:38:53 -060013#include <asm/global_data.h>
Simon Glasscd93d622020-05-10 11:40:13 -060014#include <linux/bitops.h>
Masahiro Yamadab08c8c42018-03-05 01:20:11 +090015#include <linux/libfdt.h>
Stefan Roesef61aefc2016-05-17 15:00:30 +020016#include <asm/io.h>
17#include <asm/system.h>
18#include <asm/arch/cpu.h>
19#include <asm/arch/soc.h>
20#include <asm/armv8/mmu.h>
Marek Behúna129f642020-04-08 19:25:19 +020021#include <sort.h>
Stefan Roesef61aefc2016-05-17 15:00:30 +020022
Stefan Roesef61aefc2016-05-17 15:00:30 +020023/* Armada 3700 */
24#define MVEBU_GPIO_NB_REG_BASE (MVEBU_REGISTER(0x13800))
25
26#define MVEBU_TEST_PIN_LATCH_N (MVEBU_GPIO_NB_REG_BASE + 0x8)
27#define MVEBU_XTAL_MODE_MASK BIT(9)
28#define MVEBU_XTAL_MODE_OFFS 9
29#define MVEBU_XTAL_CLOCK_25MHZ 0x0
30#define MVEBU_XTAL_CLOCK_40MHZ 0x1
31
32#define MVEBU_NB_WARM_RST_REG (MVEBU_GPIO_NB_REG_BASE + 0x40)
33#define MVEBU_NB_WARM_RST_MAGIC_NUM 0x1d1e
34
Marek Behúna129f642020-04-08 19:25:19 +020035/* Armada 3700 CPU Address Decoder registers */
36#define MVEBU_CPU_DEC_WIN_REG_BASE (size_t)(MVEBU_REGISTER(0xcf00))
37#define MVEBU_CPU_DEC_WIN_CTRL(w) \
38 (MVEBU_CPU_DEC_WIN_REG_BASE + ((w) << 4))
39#define MVEBU_CPU_DEC_WIN_CTRL_EN BIT(0)
40#define MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK 0xf
41#define MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS 4
42#define MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM 0
43#define MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE 2
44#define MVEBU_CPU_DEC_WIN_SIZE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x4)
45#define MVEBU_CPU_DEC_WIN_BASE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x8)
46#define MVEBU_CPU_DEC_WIN_REMAP(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0xc)
47#define MVEBU_CPU_DEC_WIN_GRANULARITY 16
48#define MVEBU_CPU_DEC_WINS 5
49
50#define MAX_MEM_MAP_REGIONS (MVEBU_CPU_DEC_WINS + 2)
51
52#define A3700_PTE_BLOCK_NORMAL \
53 (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE)
54#define A3700_PTE_BLOCK_DEVICE \
55 (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE)
56
57DECLARE_GLOBAL_DATA_PTR;
58
59static struct mm_region mvebu_mem_map[MAX_MEM_MAP_REGIONS] = {
Stefan Roesef61aefc2016-05-17 15:00:30 +020060 {
Marek Behúna129f642020-04-08 19:25:19 +020061 /*
62 * SRAM, MMIO regions
63 * Don't remove this, a3700_build_mem_map needs it.
64 */
65 .phys = SOC_REGS_PHY_BASE,
66 .virt = SOC_REGS_PHY_BASE,
Stefan Roesef61aefc2016-05-17 15:00:30 +020067 .size = 0x02000000UL, /* 32MiB internal registers */
Marek Behúna129f642020-04-08 19:25:19 +020068 .attrs = A3700_PTE_BLOCK_DEVICE
Stefan Roesef61aefc2016-05-17 15:00:30 +020069 },
Stefan Roesef61aefc2016-05-17 15:00:30 +020070};
71
72struct mm_region *mem_map = mvebu_mem_map;
73
Marek Behúna129f642020-04-08 19:25:19 +020074static int get_cpu_dec_win(int win, u32 *tgt, u32 *base, u32 *size)
75{
76 u32 reg;
77
78 reg = readl(MVEBU_CPU_DEC_WIN_CTRL(win));
79 if (!(reg & MVEBU_CPU_DEC_WIN_CTRL_EN))
80 return -1;
81
82 if (tgt) {
83 reg >>= MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS;
84 reg &= MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK;
85 *tgt = reg;
86 }
87
88 if (base) {
89 reg = readl(MVEBU_CPU_DEC_WIN_BASE(win));
90 *base = reg << MVEBU_CPU_DEC_WIN_GRANULARITY;
91 }
92
93 if (size) {
94 /*
95 * Window size is encoded as the number of 1s from LSB to MSB,
96 * followed by 0s. The number of 1s specifies the size in 64 KiB
97 * granularity.
98 */
99 reg = readl(MVEBU_CPU_DEC_WIN_SIZE(win));
100 *size = ((reg + 1) << MVEBU_CPU_DEC_WIN_GRANULARITY);
101 }
102
103 return 0;
104}
105
106/*
107 * Builds mem_map according to CPU Address Decoder settings, which were set by
108 * the TIMH image on the Cortex-M3 secure processor, or by ARM Trusted Firmware
109 */
110static void build_mem_map(void)
111{
112 int win, region;
113
114 region = 1;
115 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
116 u32 base, tgt, size;
117 u64 attrs;
118
119 /* skip disabled windows */
120 if (get_cpu_dec_win(win, &tgt, &base, &size))
121 continue;
122
123 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
124 attrs = A3700_PTE_BLOCK_NORMAL;
125 else if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
126 attrs = A3700_PTE_BLOCK_DEVICE;
127 else
128 /* skip windows with other targets */
129 continue;
130
131 mvebu_mem_map[region].phys = base;
132 mvebu_mem_map[region].virt = base;
133 mvebu_mem_map[region].size = size;
134 mvebu_mem_map[region].attrs = attrs;
135 ++region;
136 }
137
138 /* add list terminator */
139 mvebu_mem_map[region].size = 0;
140 mvebu_mem_map[region].attrs = 0;
141}
142
143void enable_caches(void)
144{
145 build_mem_map();
146
147 icache_enable();
148 dcache_enable();
149}
150
151int a3700_dram_init(void)
152{
153 int win;
154
155 gd->ram_size = 0;
156 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
157 u32 base, tgt, size;
158
159 /* skip disabled windows */
160 if (get_cpu_dec_win(win, &tgt, &base, &size))
161 continue;
162
163 /* skip non-DRAM windows */
164 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
165 continue;
166
167 /*
168 * It is possible that one image was built for boards with
169 * different RAM sizes, for example 512 MiB and 1 GiB.
170 * We therefore try to determine the actual RAM size in the
171 * window with get_ram_size.
172 */
173 gd->ram_size += get_ram_size((void *)(size_t)base, size);
174 }
175
176 return 0;
177}
178
179struct a3700_dram_window {
180 size_t base, size;
181};
182
183static int dram_win_cmp(const void *a, const void *b)
184{
185 size_t ab, bb;
186
187 ab = ((const struct a3700_dram_window *)a)->base;
188 bb = ((const struct a3700_dram_window *)b)->base;
189
190 if (ab < bb)
191 return -1;
192 else if (ab > bb)
193 return 1;
194 else
195 return 0;
196}
197
198int a3700_dram_init_banksize(void)
199{
200 struct a3700_dram_window dram_wins[MVEBU_CPU_DEC_WINS];
201 int bank, win, ndram_wins;
202 u32 last_end;
203 size_t size;
204
205 ndram_wins = 0;
206 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
207 u32 base, tgt, size;
208
209 /* skip disabled windows */
210 if (get_cpu_dec_win(win, &tgt, &base, &size))
211 continue;
212
213 /* skip non-DRAM windows */
214 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
215 continue;
216
217 dram_wins[win].base = base;
218 dram_wins[win].size = size;
219 ++ndram_wins;
220 }
221
222 qsort(dram_wins, ndram_wins, sizeof(dram_wins[0]), dram_win_cmp);
223
224 bank = 0;
225 last_end = -1;
226
227 for (win = 0; win < ndram_wins; ++win) {
228 /* again determining actual RAM size as in a3700_dram_init */
229 size = get_ram_size((void *)dram_wins[win].base,
230 dram_wins[win].size);
231
232 /*
233 * Check if previous window ends as the current starts. If yes,
234 * merge these windows into one "bank". This is possible by this
235 * simple check thanks to mem_map regions being qsorted in
236 * build_mem_map.
237 */
238 if (last_end == dram_wins[win].base) {
239 gd->bd->bi_dram[bank - 1].size += size;
240 last_end += size;
241 } else {
242 if (bank == CONFIG_NR_DRAM_BANKS) {
243 printf("Need more CONFIG_NR_DRAM_BANKS\n");
244 return -ENOBUFS;
245 }
246
247 gd->bd->bi_dram[bank].start = dram_wins[win].base;
248 gd->bd->bi_dram[bank].size = size;
249 last_end = dram_wins[win].base + size;
250 ++bank;
251 }
252 }
253
254 /*
255 * If there is more place for DRAM BANKS definitions than needed, fill
256 * the rest with zeros.
257 */
258 for (; bank < CONFIG_NR_DRAM_BANKS; ++bank) {
259 gd->bd->bi_dram[bank].start = 0;
260 gd->bd->bi_dram[bank].size = 0;
261 }
262
263 return 0;
264}
265
Marek Behúncb2ddb22020-04-08 19:25:21 +0200266static u32 find_pcie_window_base(void)
267{
268 int win;
269
270 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
271 u32 base, tgt;
272
273 /* skip disabled windows */
274 if (get_cpu_dec_win(win, &tgt, &base, NULL))
275 continue;
276
277 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
278 return base;
279 }
280
281 return -1;
282}
283
Pali Rohár4a82fca2021-05-26 17:59:38 +0200284static int fdt_setprop_inplace_u32_partial(void *blob, int node,
285 const char *name,
286 u32 idx, u32 val)
287{
288 val = cpu_to_fdt32(val);
289
290 return fdt_setprop_inplace_namelen_partial(blob, node, name,
291 strlen(name),
292 idx * sizeof(u32),
293 &val, sizeof(u32));
294}
295
Marek Behúncb2ddb22020-04-08 19:25:21 +0200296int a3700_fdt_fix_pcie_regions(void *blob)
297{
Pali Rohár4a82fca2021-05-26 17:59:38 +0200298 int acells, pacells, scells;
299 u32 base, fix_offset;
Marek Behúncb2ddb22020-04-08 19:25:21 +0200300 const u32 *ranges;
Pali Rohár4a82fca2021-05-26 17:59:38 +0200301 int node, pnode;
302 int ret, i, len;
303
304 base = find_pcie_window_base();
305 if (base == -1)
306 return -ENOENT;
Marek Behúncb2ddb22020-04-08 19:25:21 +0200307
Pali Rohár46b679e2021-05-26 17:59:37 +0200308 node = fdt_node_offset_by_compatible(blob, -1, "marvell,armada-3700-pcie");
Marek Behúncb2ddb22020-04-08 19:25:21 +0200309 if (node < 0)
310 return node;
311
312 ranges = fdt_getprop(blob, node, "ranges", &len);
Pali Rohár4a82fca2021-05-26 17:59:38 +0200313 if (!ranges || len % sizeof(u32))
Marek Behúncb2ddb22020-04-08 19:25:21 +0200314 return -ENOENT;
315
Pali Rohár4a82fca2021-05-26 17:59:38 +0200316 /*
317 * The "ranges" property is an array of
318 * { <child address> <parent address> <size in child address space> }
319 *
320 * All 3 elements can span a diffent number of cells. Fetch their sizes.
321 */
322 pnode = fdt_parent_offset(blob, node);
323 acells = fdt_address_cells(blob, node);
324 pacells = fdt_address_cells(blob, pnode);
325 scells = fdt_size_cells(blob, node);
Marek Behúncb2ddb22020-04-08 19:25:21 +0200326
Pali Rohár4a82fca2021-05-26 17:59:38 +0200327 /* Child PCI addresses always use 3 cells */
328 if (acells != 3)
Marek Behúncb2ddb22020-04-08 19:25:21 +0200329 return -ENOENT;
330
Pali Rohár4a82fca2021-05-26 17:59:38 +0200331 /* Calculate fixup offset from first child address (in last cell) */
332 fix_offset = base - fdt32_to_cpu(ranges[2]);
Marek Behúncb2ddb22020-04-08 19:25:21 +0200333
Pali Rohár4a82fca2021-05-26 17:59:38 +0200334 /*
335 * Fix address (last cell) of each child address and each parent
336 * address
337 */
338 for (i = 0; i < len / sizeof(u32); i += acells + pacells + scells) {
339 int idx;
Marek Behúncb2ddb22020-04-08 19:25:21 +0200340
Pali Rohár4a82fca2021-05-26 17:59:38 +0200341 /* fix child address */
342 idx = i + acells - 1;
343 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
344 fdt32_to_cpu(ranges[idx]) +
345 fix_offset);
346 if (ret)
347 return ret;
348
349 /* fix parent address */
350 idx = i + acells + pacells - 1;
351 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
352 fdt32_to_cpu(ranges[idx]) +
353 fix_offset);
354 if (ret)
355 return ret;
356 }
357
358 return 0;
Marek Behúncb2ddb22020-04-08 19:25:21 +0200359}
360
Harald Seiler35b65dd2020-12-15 16:47:52 +0100361void reset_cpu(void)
Stefan Roesef61aefc2016-05-17 15:00:30 +0200362{
363 /*
364 * Write magic number of 0x1d1e to North Bridge Warm Reset register
365 * to trigger warm reset
366 */
367 writel(MVEBU_NB_WARM_RST_MAGIC_NUM, MVEBU_NB_WARM_RST_REG);
368}
369
370/*
371 * get_ref_clk
372 *
373 * return: reference clock in MHz (25 or 40)
374 */
375u32 get_ref_clk(void)
376{
377 u32 regval;
378
379 regval = (readl(MVEBU_TEST_PIN_LATCH_N) & MVEBU_XTAL_MODE_MASK) >>
380 MVEBU_XTAL_MODE_OFFS;
381
382 if (regval == MVEBU_XTAL_CLOCK_25MHZ)
383 return 25;
384 else
385 return 40;
386}