blob: 63bf0606166622265d0cbc4db4cd402e64e38ab2 [file] [log] [blame]
Lokesh Vutlaa3501a42018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glass9a3b4ce2019-12-28 10:45:01 -070010#include <cpu_func.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060011#include <image.h>
Simon Glass691d7192020-05-10 11:40:02 -060012#include <init.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053014#include <spl.h>
15#include "common.h"
16#include <dm.h>
17#include <remoteproc.h>
Simon Glass90526e92020-05-10 11:39:56 -060018#include <asm/cache.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053019#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutlaa9a84482019-03-08 11:47:34 +053020#include <fdt_support.h>
Andreas Dannenbergf9380a72019-06-07 19:24:42 +053021#include <asm/arch/sys_proto.h>
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +053022#include <asm/hardware.h>
23#include <asm/io.h>
Keerthy3ab34bc2020-02-12 13:55:04 +053024#include <fs_loader.h>
25#include <fs.h>
26#include <env.h>
27#include <elf.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053028
29struct ti_sci_handle *get_ti_sci_handle(void)
30{
31 struct udevice *dev;
32 int ret;
33
Lokesh Vutlae69ffdb2019-09-27 13:32:15 +053034 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
35 DM_GET_DRIVER(ti_sci), &dev);
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053036 if (ret)
37 panic("Failed to get SYSFW (%d)\n", ret);
38
39 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
40}
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053041
Lokesh Vutla6e44aeb2020-03-10 16:50:58 +053042void k3_sysfw_print_ver(void)
43{
44 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
45 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
46
47 /*
48 * Output System Firmware version info. Note that since the
49 * 'firmware_description' field is not guaranteed to be zero-
50 * terminated we manually add a \0 terminator if needed. Further
51 * note that we intentionally no longer rely on the extended
52 * printf() formatter '%.*s' to not having to require a more
53 * full-featured printf() implementation.
54 */
55 strncpy(fw_desc, ti_sci->version.firmware_description,
56 sizeof(ti_sci->version.firmware_description));
57 fw_desc[sizeof(fw_desc) - 1] = '\0';
58
59 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
60 ti_sci->version.abi_major, ti_sci->version.abi_minor,
61 ti_sci->version.firmware_revision, fw_desc);
62}
63
Andreas Dannenberge630afe12019-08-15 15:55:28 -050064DECLARE_GLOBAL_DATA_PTR;
65
66#ifdef CONFIG_K3_EARLY_CONS
67int early_console_init(void)
68{
69 struct udevice *dev;
70 int ret;
71
72 gd->baudrate = CONFIG_BAUDRATE;
73
74 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
75 &dev);
76 if (ret) {
77 printf("Error getting serial dev for early console! (%d)\n",
78 ret);
79 return ret;
80 }
81
82 gd->cur_serial_dev = dev;
83 gd->flags |= GD_FLG_SERIAL_READY;
84 gd->have_console = 1;
85
86 return 0;
87}
88#endif
89
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053090#ifdef CONFIG_SYS_K3_SPL_ATF
Keerthy3ab34bc2020-02-12 13:55:04 +053091
92void init_env(void)
93{
94#ifdef CONFIG_SPL_ENV_SUPPORT
95 char *part;
96
97 env_init();
98 env_relocate();
99 switch (spl_boot_device()) {
100 case BOOT_DEVICE_MMC2:
101 part = env_get("bootpart");
102 env_set("storage_interface", "mmc");
103 env_set("fw_dev_part", part);
104 break;
105 case BOOT_DEVICE_SPI:
106 env_set("storage_interface", "ubi");
107 env_set("fw_ubi_mtdpart", "UBI");
108 env_set("fw_ubi_volume", "UBI0");
109 break;
110 default:
111 printf("%s from device %u not supported!\n",
112 __func__, spl_boot_device());
113 return;
114 }
115#endif
116}
117
118#ifdef CONFIG_FS_LOADER
119int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
120{
121 struct udevice *fsdev;
122 char *name = NULL;
123 int size = 0;
124
125 *loadaddr = 0;
126#ifdef CONFIG_SPL_ENV_SUPPORT
127 switch (spl_boot_device()) {
128 case BOOT_DEVICE_MMC2:
129 name = env_get(name_fw);
130 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
131 break;
132 default:
133 printf("Loading rproc fw image from device %u not supported!\n",
134 spl_boot_device());
135 return 0;
136 }
137#endif
138 if (!*loadaddr)
139 return 0;
140
141 if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
142 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
143 0, 0);
144 }
145
146 return size;
147}
148#else
149int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
150{
151 return 0;
152}
153#endif
154
155__weak void start_non_linux_remote_cores(void)
156{
157}
158
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530159void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
160{
Keerthyd1542522020-02-12 13:55:06 +0530161 typedef void __noreturn (*image_entry_noargs_t)(void);
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530162 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Keerthyd1542522020-02-12 13:55:06 +0530163 u32 loadaddr = 0;
164 int ret, size;
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530165
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530166 /* Release all the exclusive devices held by SPL before starting ATF */
167 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
168
Keerthy3ab34bc2020-02-12 13:55:04 +0530169 ret = rproc_init();
170 if (ret)
171 panic("rproc failed to be initialized (%d)\n", ret);
172
173 init_env();
174 start_non_linux_remote_cores();
Keerthyd1542522020-02-12 13:55:06 +0530175 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
176 &loadaddr);
177
Keerthy3ab34bc2020-02-12 13:55:04 +0530178
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530179 /*
180 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600181 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530182 */
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530183 ret = rproc_load(1, spl_image->entry_point, 0x200);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600184 if (ret)
185 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530186
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600187 /* Add an extra newline to differentiate the ATF logs from SPL */
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530188 printf("Starting ATF on ARM64 core...\n\n");
189
190 ret = rproc_start(1);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600191 if (ret)
192 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
Keerthyd1542522020-02-12 13:55:06 +0530193 if (!(size > 0 && valid_elf_image(loadaddr))) {
194 debug("Shutting down...\n");
195 release_resources_for_core_shutdown();
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530196
Keerthyd1542522020-02-12 13:55:06 +0530197 while (1)
198 asm volatile("wfe");
199 }
Andreas Dannenbergf9380a72019-06-07 19:24:42 +0530200
Keerthyd1542522020-02-12 13:55:06 +0530201 image_entry_noargs_t image_entry =
202 (image_entry_noargs_t)load_elf_image_phdr(loadaddr);
203
204 image_entry();
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530205}
206#endif
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530207
208#if defined(CONFIG_OF_LIBFDT)
209int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
210{
211 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
212 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
213 int ret, node, subnode, len, prev_node;
214 u32 range[4], addr, size;
215 const fdt32_t *sub_reg;
216
217 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
218 msmc_size = msmc_end - msmc_start + 1;
219 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
220 msmc_start, msmc_size);
221
222 /* find or create "msmc_sram node */
223 ret = fdt_path_offset(blob, parent_path);
224 if (ret < 0)
225 return ret;
226
227 node = fdt_find_or_add_subnode(blob, ret, node_name);
228 if (node < 0)
229 return node;
230
231 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
232 if (ret < 0)
233 return ret;
234
235 reg[0] = cpu_to_fdt64(msmc_start);
236 reg[1] = cpu_to_fdt64(msmc_size);
237 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
238 if (ret < 0)
239 return ret;
240
241 fdt_setprop_cell(blob, node, "#address-cells", 1);
242 fdt_setprop_cell(blob, node, "#size-cells", 1);
243
244 range[0] = 0;
245 range[1] = cpu_to_fdt32(msmc_start >> 32);
246 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
247 range[3] = cpu_to_fdt32(msmc_size);
248 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
249 if (ret < 0)
250 return ret;
251
252 subnode = fdt_first_subnode(blob, node);
253 prev_node = 0;
254
255 /* Look for invalid subnodes and delete them */
256 while (subnode >= 0) {
257 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
258 addr = fdt_read_number(sub_reg, 1);
259 sub_reg++;
260 size = fdt_read_number(sub_reg, 1);
261 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
262 subnode, addr, size);
263 if (addr + size > msmc_size ||
264 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
265 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
266 fdt_del_node(blob, subnode);
267 debug("%s: deleting subnode %d\n", __func__, subnode);
268 if (!prev_node)
269 subnode = fdt_first_subnode(blob, node);
270 else
271 subnode = fdt_next_subnode(blob, prev_node);
272 } else {
273 prev_node = subnode;
274 subnode = fdt_next_subnode(blob, prev_node);
275 }
276 }
277
278 return 0;
279}
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400280
281int fdt_disable_node(void *blob, char *node_path)
282{
283 int offs;
284 int ret;
285
286 offs = fdt_path_offset(blob, node_path);
287 if (offs < 0) {
Andrew F. Davis28b90a42020-01-07 18:12:40 -0500288 printf("Node %s not found.\n", node_path);
289 return offs;
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400290 }
291 ret = fdt_setprop_string(blob, offs, "status", "disabled");
292 if (ret < 0) {
293 printf("Could not add status property to node %s: %s\n",
294 node_path, fdt_strerror(ret));
295 return ret;
296 }
297 return 0;
298}
299
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530300#endif
Lokesh Vutlac2562d72019-06-13 10:29:42 +0530301
302#ifndef CONFIG_SYSRESET
303void reset_cpu(ulong ignored)
304{
305}
306#endif
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530307
308#if defined(CONFIG_DISPLAY_CPUINFO)
309int print_cpuinfo(void)
310{
Tom Rini72083962020-07-24 08:42:06 -0400311 u32 soc, rev;
312 char *name;
313
314 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
315 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
316 rev = (readl(CTRLMMR_WKUP_JTAG_ID) &
317 JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT;
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530318
319 printf("SoC: ");
Tom Rini72083962020-07-24 08:42:06 -0400320 switch (soc) {
321 case AM65X:
322 name = "AM65x";
323 break;
324 case J721E:
325 name = "J721E";
326 break;
327 default:
328 name = "Unknown Silicon";
329 };
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530330
Tom Rini72083962020-07-24 08:42:06 -0400331 printf("%s SR ", name);
332 switch (rev) {
333 case REV_PG1_0:
334 name = "1.0";
335 break;
336 case REV_PG2_0:
337 name = "2.0";
338 break;
339 default:
340 name = "Unknown Revision";
341 };
342 printf("%s\n", name);
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530343
344 return 0;
345}
346#endif
Lokesh Vutlae938b222019-10-07 13:52:17 +0530347
348#ifdef CONFIG_ARM64
349void board_prep_linux(bootm_headers_t *images)
350{
351 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
352 images->os.start, images->os.end);
353 __asm_flush_dcache_range(images->os.start,
354 ROUND(images->os.end,
355 CONFIG_SYS_CACHELINE_SIZE));
356}
357#endif
Lokesh Vutla40109f42019-12-31 15:49:55 +0530358
359#ifdef CONFIG_CPU_V7R
360void disable_linefill_optimization(void)
361{
362 u32 actlr;
363
364 /*
365 * On K3 devices there are 2 conditions where R5F can deadlock:
366 * 1.When software is performing series of store operations to
367 * cacheable write back/write allocate memory region and later
368 * on software execute barrier operation (DSB or DMB). R5F may
369 * hang at the barrier instruction.
370 * 2.When software is performing a mix of load and store operations
371 * within a tight loop and store operations are all writing to
372 * cacheable write back/write allocates memory regions, R5F may
373 * hang at one of the load instruction.
374 *
375 * To avoid the above two conditions disable linefill optimization
376 * inside Cortex R5F.
377 */
378 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
379 actlr |= (1 << 13); /* Set DLFO bit */
380 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
381}
382#endif
Andrew F. Davisea70da12020-01-10 14:35:21 -0500383
384void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
385{
386 struct ti_sci_msg_fwl_region region;
387 struct ti_sci_fwl_ops *fwl_ops;
388 struct ti_sci_handle *ti_sci;
389 size_t i, j;
390
391 ti_sci = get_ti_sci_handle();
392 fwl_ops = &ti_sci->ops.fwl_ops;
393 for (i = 0; i < fwl_data_size; i++) {
394 for (j = 0; j < fwl_data[i].regions; j++) {
395 region.fwl_id = fwl_data[i].fwl_id;
396 region.region = j;
397 region.n_permission_regs = 3;
398
399 fwl_ops->get_fwl_region(ti_sci, &region);
400
401 if (region.control != 0) {
402 pr_debug("Attempting to disable firewall %5d (%25s)\n",
403 region.fwl_id, fwl_data[i].name);
404 region.control = 0;
405
406 if (fwl_ops->set_fwl_region(ti_sci, &region))
407 pr_err("Could not disable firewall %5d (%25s)\n",
408 region.fwl_id, fwl_data[i].name);
409 }
410 }
411 }
412}
Jan Kiszkac02712a2020-05-18 07:57:22 +0200413
414void spl_enable_dcache(void)
415{
416#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
417 phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE;
418
419 dram_init_banksize();
420
421 /* reserve TLB table */
422 gd->arch.tlb_size = PGTABLE_SIZE;
423
424 ram_top += get_effective_memsize();
425 /* keep ram_top in the 32-bit address space */
426 if (ram_top >= 0x100000000)
427 ram_top = (phys_addr_t) 0x100000000;
428
429 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
430 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
431 gd->arch.tlb_addr + gd->arch.tlb_size);
432
433 dcache_enable();
434#endif
435}
436
437#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
438void spl_board_prepare_for_boot(void)
439{
440 dcache_disable();
441}
442
Patrick Delaunay865fdfd2020-07-07 14:25:15 +0200443void spl_board_prepare_for_linux(void)
Jan Kiszkac02712a2020-05-18 07:57:22 +0200444{
445 dcache_disable();
446}
447#endif