blob: a4c99f17e7cc29e3595a488d13b1e313d0716ced [file] [log] [blame]
Lokesh Vutlaa3501a42018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glass9a3b4ce2019-12-28 10:45:01 -070010#include <cpu_func.h>
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053011#include <spl.h>
12#include "common.h"
13#include <dm.h>
14#include <remoteproc.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053015#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutlaa9a84482019-03-08 11:47:34 +053016#include <fdt_support.h>
Andreas Dannenbergf9380a72019-06-07 19:24:42 +053017#include <asm/arch/sys_proto.h>
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +053018#include <asm/hardware.h>
19#include <asm/io.h>
Keerthy3ab34bc2020-02-12 13:55:04 +053020#include <fs_loader.h>
21#include <fs.h>
22#include <env.h>
23#include <elf.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053024
25struct ti_sci_handle *get_ti_sci_handle(void)
26{
27 struct udevice *dev;
28 int ret;
29
Lokesh Vutlae69ffdb2019-09-27 13:32:15 +053030 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
31 DM_GET_DRIVER(ti_sci), &dev);
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053032 if (ret)
33 panic("Failed to get SYSFW (%d)\n", ret);
34
35 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
36}
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053037
Andreas Dannenberge630afe12019-08-15 15:55:28 -050038DECLARE_GLOBAL_DATA_PTR;
39
40#ifdef CONFIG_K3_EARLY_CONS
41int early_console_init(void)
42{
43 struct udevice *dev;
44 int ret;
45
46 gd->baudrate = CONFIG_BAUDRATE;
47
48 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
49 &dev);
50 if (ret) {
51 printf("Error getting serial dev for early console! (%d)\n",
52 ret);
53 return ret;
54 }
55
56 gd->cur_serial_dev = dev;
57 gd->flags |= GD_FLG_SERIAL_READY;
58 gd->have_console = 1;
59
60 return 0;
61}
62#endif
63
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053064#ifdef CONFIG_SYS_K3_SPL_ATF
Keerthy3ab34bc2020-02-12 13:55:04 +053065
66void init_env(void)
67{
68#ifdef CONFIG_SPL_ENV_SUPPORT
69 char *part;
70
71 env_init();
72 env_relocate();
73 switch (spl_boot_device()) {
74 case BOOT_DEVICE_MMC2:
75 part = env_get("bootpart");
76 env_set("storage_interface", "mmc");
77 env_set("fw_dev_part", part);
78 break;
79 case BOOT_DEVICE_SPI:
80 env_set("storage_interface", "ubi");
81 env_set("fw_ubi_mtdpart", "UBI");
82 env_set("fw_ubi_volume", "UBI0");
83 break;
84 default:
85 printf("%s from device %u not supported!\n",
86 __func__, spl_boot_device());
87 return;
88 }
89#endif
90}
91
92#ifdef CONFIG_FS_LOADER
93int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
94{
95 struct udevice *fsdev;
96 char *name = NULL;
97 int size = 0;
98
99 *loadaddr = 0;
100#ifdef CONFIG_SPL_ENV_SUPPORT
101 switch (spl_boot_device()) {
102 case BOOT_DEVICE_MMC2:
103 name = env_get(name_fw);
104 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
105 break;
106 default:
107 printf("Loading rproc fw image from device %u not supported!\n",
108 spl_boot_device());
109 return 0;
110 }
111#endif
112 if (!*loadaddr)
113 return 0;
114
115 if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
116 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
117 0, 0);
118 }
119
120 return size;
121}
122#else
123int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
124{
125 return 0;
126}
127#endif
128
129__weak void start_non_linux_remote_cores(void)
130{
131}
132
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530133void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
134{
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530135 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530136 int ret;
137
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530138 /* Release all the exclusive devices held by SPL before starting ATF */
139 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
140
Keerthy3ab34bc2020-02-12 13:55:04 +0530141 ret = rproc_init();
142 if (ret)
143 panic("rproc failed to be initialized (%d)\n", ret);
144
145 init_env();
146 start_non_linux_remote_cores();
147
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530148 /*
149 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600150 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530151 */
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530152 ret = rproc_load(1, spl_image->entry_point, 0x200);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600153 if (ret)
154 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530155
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600156 /* Add an extra newline to differentiate the ATF logs from SPL */
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530157 printf("Starting ATF on ARM64 core...\n\n");
158
159 ret = rproc_start(1);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600160 if (ret)
161 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530162
Andreas Dannenbergf9380a72019-06-07 19:24:42 +0530163 debug("Releasing resources...\n");
164 release_resources_for_core_shutdown();
165
166 debug("Finalizing core shutdown...\n");
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530167 while (1)
168 asm volatile("wfe");
169}
170#endif
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530171
172#if defined(CONFIG_OF_LIBFDT)
173int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
174{
175 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
176 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
177 int ret, node, subnode, len, prev_node;
178 u32 range[4], addr, size;
179 const fdt32_t *sub_reg;
180
181 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
182 msmc_size = msmc_end - msmc_start + 1;
183 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
184 msmc_start, msmc_size);
185
186 /* find or create "msmc_sram node */
187 ret = fdt_path_offset(blob, parent_path);
188 if (ret < 0)
189 return ret;
190
191 node = fdt_find_or_add_subnode(blob, ret, node_name);
192 if (node < 0)
193 return node;
194
195 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
196 if (ret < 0)
197 return ret;
198
199 reg[0] = cpu_to_fdt64(msmc_start);
200 reg[1] = cpu_to_fdt64(msmc_size);
201 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
202 if (ret < 0)
203 return ret;
204
205 fdt_setprop_cell(blob, node, "#address-cells", 1);
206 fdt_setprop_cell(blob, node, "#size-cells", 1);
207
208 range[0] = 0;
209 range[1] = cpu_to_fdt32(msmc_start >> 32);
210 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
211 range[3] = cpu_to_fdt32(msmc_size);
212 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
213 if (ret < 0)
214 return ret;
215
216 subnode = fdt_first_subnode(blob, node);
217 prev_node = 0;
218
219 /* Look for invalid subnodes and delete them */
220 while (subnode >= 0) {
221 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
222 addr = fdt_read_number(sub_reg, 1);
223 sub_reg++;
224 size = fdt_read_number(sub_reg, 1);
225 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
226 subnode, addr, size);
227 if (addr + size > msmc_size ||
228 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
229 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
230 fdt_del_node(blob, subnode);
231 debug("%s: deleting subnode %d\n", __func__, subnode);
232 if (!prev_node)
233 subnode = fdt_first_subnode(blob, node);
234 else
235 subnode = fdt_next_subnode(blob, prev_node);
236 } else {
237 prev_node = subnode;
238 subnode = fdt_next_subnode(blob, prev_node);
239 }
240 }
241
242 return 0;
243}
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400244
245int fdt_disable_node(void *blob, char *node_path)
246{
247 int offs;
248 int ret;
249
250 offs = fdt_path_offset(blob, node_path);
251 if (offs < 0) {
Andrew F. Davis28b90a42020-01-07 18:12:40 -0500252 printf("Node %s not found.\n", node_path);
253 return offs;
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400254 }
255 ret = fdt_setprop_string(blob, offs, "status", "disabled");
256 if (ret < 0) {
257 printf("Could not add status property to node %s: %s\n",
258 node_path, fdt_strerror(ret));
259 return ret;
260 }
261 return 0;
262}
263
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530264#endif
Lokesh Vutlac2562d72019-06-13 10:29:42 +0530265
266#ifndef CONFIG_SYSRESET
267void reset_cpu(ulong ignored)
268{
269}
270#endif
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530271
272#if defined(CONFIG_DISPLAY_CPUINFO)
273int print_cpuinfo(void)
274{
275 u32 soc, rev;
276 char *name;
277
278 soc = (readl(CTRLMMR_WKUP_JTAG_DEVICE_ID) &
279 DEVICE_ID_FAMILY_MASK) >> DEVICE_ID_FAMILY_SHIFT;
280 rev = (readl(CTRLMMR_WKUP_JTAG_ID) &
281 JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT;
282
283 printf("SoC: ");
284 switch (soc) {
285 case AM654:
286 name = "AM654";
287 break;
288 case J721E:
289 name = "J721E";
290 break;
291 default:
292 name = "Unknown Silicon";
293 };
294
Lokesh Vutlab9c268c2020-02-10 10:39:17 +0530295 printf("%s SR ", name);
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530296 switch (rev) {
297 case REV_PG1_0:
298 name = "1.0";
299 break;
300 case REV_PG2_0:
301 name = "2.0";
302 break;
303 default:
304 name = "Unknown Revision";
305 };
306 printf("%s\n", name);
307
308 return 0;
309}
310#endif
Lokesh Vutlae938b222019-10-07 13:52:17 +0530311
312#ifdef CONFIG_ARM64
313void board_prep_linux(bootm_headers_t *images)
314{
315 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
316 images->os.start, images->os.end);
317 __asm_flush_dcache_range(images->os.start,
318 ROUND(images->os.end,
319 CONFIG_SYS_CACHELINE_SIZE));
320}
321#endif
Lokesh Vutla40109f42019-12-31 15:49:55 +0530322
323#ifdef CONFIG_CPU_V7R
324void disable_linefill_optimization(void)
325{
326 u32 actlr;
327
328 /*
329 * On K3 devices there are 2 conditions where R5F can deadlock:
330 * 1.When software is performing series of store operations to
331 * cacheable write back/write allocate memory region and later
332 * on software execute barrier operation (DSB or DMB). R5F may
333 * hang at the barrier instruction.
334 * 2.When software is performing a mix of load and store operations
335 * within a tight loop and store operations are all writing to
336 * cacheable write back/write allocates memory regions, R5F may
337 * hang at one of the load instruction.
338 *
339 * To avoid the above two conditions disable linefill optimization
340 * inside Cortex R5F.
341 */
342 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
343 actlr |= (1 << 13); /* Set DLFO bit */
344 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
345}
346#endif
Andrew F. Davisea70da12020-01-10 14:35:21 -0500347
348void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
349{
350 struct ti_sci_msg_fwl_region region;
351 struct ti_sci_fwl_ops *fwl_ops;
352 struct ti_sci_handle *ti_sci;
353 size_t i, j;
354
355 ti_sci = get_ti_sci_handle();
356 fwl_ops = &ti_sci->ops.fwl_ops;
357 for (i = 0; i < fwl_data_size; i++) {
358 for (j = 0; j < fwl_data[i].regions; j++) {
359 region.fwl_id = fwl_data[i].fwl_id;
360 region.region = j;
361 region.n_permission_regs = 3;
362
363 fwl_ops->get_fwl_region(ti_sci, &region);
364
365 if (region.control != 0) {
366 pr_debug("Attempting to disable firewall %5d (%25s)\n",
367 region.fwl_id, fwl_data[i].name);
368 region.control = 0;
369
370 if (fwl_ops->set_fwl_region(ti_sci, &region))
371 pr_err("Could not disable firewall %5d (%25s)\n",
372 region.fwl_id, fwl_data[i].name);
373 }
374 }
375 }
376}