blob: 08b7344df7494f9f0853cee7a70dd8eda6ad4653 [file] [log] [blame]
Lokesh Vutlaa3501a42018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glass9a3b4ce2019-12-28 10:45:01 -070010#include <cpu_func.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060011#include <image.h>
Simon Glass691d7192020-05-10 11:40:02 -060012#include <init.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053014#include <spl.h>
Simon Glass401d1c42020-10-30 21:38:53 -060015#include <asm/global_data.h>
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053016#include "common.h"
17#include <dm.h>
18#include <remoteproc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053020#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutlaa9a84482019-03-08 11:47:34 +053021#include <fdt_support.h>
Andreas Dannenbergf9380a72019-06-07 19:24:42 +053022#include <asm/arch/sys_proto.h>
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +053023#include <asm/hardware.h>
24#include <asm/io.h>
Keerthy3ab34bc2020-02-12 13:55:04 +053025#include <fs_loader.h>
26#include <fs.h>
27#include <env.h>
28#include <elf.h>
Dave Gerlach5ab71ea2020-07-15 23:40:04 -050029#include <soc.h>
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053030
Tero Kristo547b2772021-06-11 11:45:19 +030031#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
32enum {
33 IMAGE_ID_ATF,
34 IMAGE_ID_OPTEE,
35 IMAGE_ID_SPL,
36 IMAGE_ID_DM_FW,
37 IMAGE_AMT,
38};
39
40#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
41static const char *image_os_match[IMAGE_AMT] = {
42 "arm-trusted-firmware",
43 "tee",
44 "U-Boot",
45 "DM",
46};
47#endif
48
49static struct image_info fit_image_info[IMAGE_AMT];
50#endif
51
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053052struct ti_sci_handle *get_ti_sci_handle(void)
53{
54 struct udevice *dev;
55 int ret;
56
Lokesh Vutlae69ffdb2019-09-27 13:32:15 +053057 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
Simon Glass65e25be2020-12-28 20:34:56 -070058 DM_DRIVER_GET(ti_sci), &dev);
Lokesh Vutla6ce424a2019-03-08 11:47:33 +053059 if (ret)
60 panic("Failed to get SYSFW (%d)\n", ret);
61
62 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
63}
Lokesh Vutlaa3501a42018-11-02 19:51:05 +053064
Lokesh Vutla6e44aeb2020-03-10 16:50:58 +053065void k3_sysfw_print_ver(void)
66{
67 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
68 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
69
70 /*
71 * Output System Firmware version info. Note that since the
72 * 'firmware_description' field is not guaranteed to be zero-
73 * terminated we manually add a \0 terminator if needed. Further
74 * note that we intentionally no longer rely on the extended
75 * printf() formatter '%.*s' to not having to require a more
76 * full-featured printf() implementation.
77 */
78 strncpy(fw_desc, ti_sci->version.firmware_description,
79 sizeof(ti_sci->version.firmware_description));
80 fw_desc[sizeof(fw_desc) - 1] = '\0';
81
82 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
83 ti_sci->version.abi_major, ti_sci->version.abi_minor,
84 ti_sci->version.firmware_revision, fw_desc);
85}
86
Lokesh Vutla58ccd612020-08-05 22:44:17 +053087void mmr_unlock(phys_addr_t base, u32 partition)
88{
89 /* Translate the base address */
90 phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
91
92 /* Unlock the requested partition if locked using two-step sequence */
93 writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
94 writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
95}
96
Lokesh Vutla01dbe362020-08-05 22:44:23 +053097bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
98{
99 if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
100 return false;
101
102 return data->num_components > 1;
103}
104
Andreas Dannenberge630afe12019-08-15 15:55:28 -0500105DECLARE_GLOBAL_DATA_PTR;
106
107#ifdef CONFIG_K3_EARLY_CONS
108int early_console_init(void)
109{
110 struct udevice *dev;
111 int ret;
112
113 gd->baudrate = CONFIG_BAUDRATE;
114
115 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
116 &dev);
117 if (ret) {
118 printf("Error getting serial dev for early console! (%d)\n",
119 ret);
120 return ret;
121 }
122
123 gd->cur_serial_dev = dev;
124 gd->flags |= GD_FLG_SERIAL_READY;
125 gd->have_console = 1;
126
127 return 0;
128}
129#endif
130
Tero Kristo547b2772021-06-11 11:45:19 +0300131#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
Keerthy3ab34bc2020-02-12 13:55:04 +0530132
133void init_env(void)
134{
135#ifdef CONFIG_SPL_ENV_SUPPORT
136 char *part;
137
138 env_init();
139 env_relocate();
140 switch (spl_boot_device()) {
141 case BOOT_DEVICE_MMC2:
142 part = env_get("bootpart");
143 env_set("storage_interface", "mmc");
144 env_set("fw_dev_part", part);
145 break;
146 case BOOT_DEVICE_SPI:
147 env_set("storage_interface", "ubi");
148 env_set("fw_ubi_mtdpart", "UBI");
149 env_set("fw_ubi_volume", "UBI0");
150 break;
151 default:
152 printf("%s from device %u not supported!\n",
153 __func__, spl_boot_device());
154 return;
155 }
156#endif
157}
158
159#ifdef CONFIG_FS_LOADER
160int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
161{
162 struct udevice *fsdev;
163 char *name = NULL;
164 int size = 0;
165
166 *loadaddr = 0;
167#ifdef CONFIG_SPL_ENV_SUPPORT
168 switch (spl_boot_device()) {
169 case BOOT_DEVICE_MMC2:
170 name = env_get(name_fw);
171 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
172 break;
173 default:
174 printf("Loading rproc fw image from device %u not supported!\n",
175 spl_boot_device());
176 return 0;
177 }
178#endif
179 if (!*loadaddr)
180 return 0;
181
182 if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
183 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
184 0, 0);
185 }
186
187 return size;
188}
189#else
190int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
191{
192 return 0;
193}
194#endif
195
Suman Annad86a0892021-07-27 18:24:40 -0500196__weak void release_resources_for_core_shutdown(void)
197{
198 debug("%s not implemented...\n", __func__);
199}
200
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530201void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
202{
Keerthyd1542522020-02-12 13:55:06 +0530203 typedef void __noreturn (*image_entry_noargs_t)(void);
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530204 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Keerthyd1542522020-02-12 13:55:06 +0530205 u32 loadaddr = 0;
Tero Kristo547b2772021-06-11 11:45:19 +0300206 int ret, size = 0;
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530207
Lokesh Vutlac0669d22019-06-07 19:24:43 +0530208 /* Release all the exclusive devices held by SPL before starting ATF */
209 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
210
Keerthy3ab34bc2020-02-12 13:55:04 +0530211 ret = rproc_init();
212 if (ret)
213 panic("rproc failed to be initialized (%d)\n", ret);
214
215 init_env();
Dave Gerlach7acd4d72021-06-11 11:45:21 +0300216
217 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
Tero Kristo547b2772021-06-11 11:45:19 +0300218 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
219 &loadaddr);
Dave Gerlach7acd4d72021-06-11 11:45:21 +0300220 }
Keerthy3ab34bc2020-02-12 13:55:04 +0530221
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530222 /*
223 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600224 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530225 */
Tero Kristo547b2772021-06-11 11:45:19 +0300226 if (!fit_image_info[IMAGE_ID_ATF].image_start)
227 fit_image_info[IMAGE_ID_ATF].image_start =
228 spl_image->entry_point;
229
230 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600231 if (ret)
232 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530233
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600234 /* Add an extra newline to differentiate the ATF logs from SPL */
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530235 printf("Starting ATF on ARM64 core...\n\n");
236
237 ret = rproc_start(1);
Andreas Dannenberg4a1fa522019-02-04 12:58:47 -0600238 if (ret)
239 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
Tero Kristo547b2772021-06-11 11:45:19 +0300240 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
241 !(size > 0 && valid_elf_image(loadaddr))) {
Keerthyd1542522020-02-12 13:55:06 +0530242 debug("Shutting down...\n");
243 release_resources_for_core_shutdown();
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530244
Keerthyd1542522020-02-12 13:55:06 +0530245 while (1)
246 asm volatile("wfe");
247 }
Andreas Dannenbergf9380a72019-06-07 19:24:42 +0530248
Tero Kristo547b2772021-06-11 11:45:19 +0300249 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
250 loadaddr = load_elf_image_phdr(loadaddr);
251 } else {
252 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
253 if (valid_elf_image(loadaddr))
254 loadaddr = load_elf_image_phdr(loadaddr);
255 }
256
257 debug("%s: jumping to address %x\n", __func__, loadaddr);
258
259 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
Keerthyd1542522020-02-12 13:55:06 +0530260
261 image_entry();
Lokesh Vutlaa3501a42018-11-02 19:51:05 +0530262}
263#endif
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530264
Tero Kristo547b2772021-06-11 11:45:19 +0300265#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
266void board_fit_image_post_process(const void *fit, int node, void **p_image,
267 size_t *p_size)
268{
269#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
270 int len;
271 int i;
272 const char *os;
273 u32 addr;
274
275 os = fdt_getprop(fit, node, "os", &len);
276 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
277
278 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
279 addr, *p_size, os);
280
281 for (i = 0; i < IMAGE_AMT; i++) {
282 if (!strcmp(os, image_os_match[i])) {
283 fit_image_info[i].image_start = addr;
284 fit_image_info[i].image_len = *p_size;
285 debug("%s: matched image for ID %d\n", __func__, i);
286 break;
287 }
288 }
289#endif
290
291#if IS_ENABLED(CONFIG_TI_SECURE_DEVICE)
292 ti_secure_image_post_process(p_image, p_size);
293#endif
294}
295#endif
296
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530297#if defined(CONFIG_OF_LIBFDT)
298int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
299{
300 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
301 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
302 int ret, node, subnode, len, prev_node;
303 u32 range[4], addr, size;
304 const fdt32_t *sub_reg;
305
306 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
307 msmc_size = msmc_end - msmc_start + 1;
308 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
309 msmc_start, msmc_size);
310
311 /* find or create "msmc_sram node */
312 ret = fdt_path_offset(blob, parent_path);
313 if (ret < 0)
314 return ret;
315
316 node = fdt_find_or_add_subnode(blob, ret, node_name);
317 if (node < 0)
318 return node;
319
320 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
321 if (ret < 0)
322 return ret;
323
324 reg[0] = cpu_to_fdt64(msmc_start);
325 reg[1] = cpu_to_fdt64(msmc_size);
326 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
327 if (ret < 0)
328 return ret;
329
330 fdt_setprop_cell(blob, node, "#address-cells", 1);
331 fdt_setprop_cell(blob, node, "#size-cells", 1);
332
333 range[0] = 0;
334 range[1] = cpu_to_fdt32(msmc_start >> 32);
335 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
336 range[3] = cpu_to_fdt32(msmc_size);
337 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
338 if (ret < 0)
339 return ret;
340
341 subnode = fdt_first_subnode(blob, node);
342 prev_node = 0;
343
344 /* Look for invalid subnodes and delete them */
345 while (subnode >= 0) {
346 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
347 addr = fdt_read_number(sub_reg, 1);
348 sub_reg++;
349 size = fdt_read_number(sub_reg, 1);
350 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
351 subnode, addr, size);
352 if (addr + size > msmc_size ||
353 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
354 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
355 fdt_del_node(blob, subnode);
356 debug("%s: deleting subnode %d\n", __func__, subnode);
357 if (!prev_node)
358 subnode = fdt_first_subnode(blob, node);
359 else
360 subnode = fdt_next_subnode(blob, prev_node);
361 } else {
362 prev_node = subnode;
363 subnode = fdt_next_subnode(blob, prev_node);
364 }
365 }
366
367 return 0;
368}
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400369
370int fdt_disable_node(void *blob, char *node_path)
371{
372 int offs;
373 int ret;
374
375 offs = fdt_path_offset(blob, node_path);
376 if (offs < 0) {
Andrew F. Davis28b90a42020-01-07 18:12:40 -0500377 printf("Node %s not found.\n", node_path);
378 return offs;
Andrew F. Davis29c9db42019-09-17 17:15:40 -0400379 }
380 ret = fdt_setprop_string(blob, offs, "status", "disabled");
381 if (ret < 0) {
382 printf("Could not add status property to node %s: %s\n",
383 node_path, fdt_strerror(ret));
384 return ret;
385 }
386 return 0;
387}
388
Lokesh Vutlaa9a84482019-03-08 11:47:34 +0530389#endif
Lokesh Vutlac2562d72019-06-13 10:29:42 +0530390
391#ifndef CONFIG_SYSRESET
Harald Seiler35b65dd2020-12-15 16:47:52 +0100392void reset_cpu(void)
Lokesh Vutlac2562d72019-06-13 10:29:42 +0530393{
394}
395#endif
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530396
397#if defined(CONFIG_DISPLAY_CPUINFO)
398int print_cpuinfo(void)
399{
Dave Gerlach5ab71ea2020-07-15 23:40:04 -0500400 struct udevice *soc;
401 char name[64];
402 int ret;
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530403
404 printf("SoC: ");
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530405
Dave Gerlach5ab71ea2020-07-15 23:40:04 -0500406 ret = soc_get(&soc);
407 if (ret) {
408 printf("UNKNOWN\n");
409 return 0;
410 }
411
412 ret = soc_get_family(soc, name, 64);
413 if (!ret) {
414 printf("%s ", name);
415 }
416
417 ret = soc_get_revision(soc, name, 64);
418 if (!ret) {
419 printf("%s\n", name);
420 }
Lokesh Vutlaf8ca9122019-09-27 13:32:11 +0530421
422 return 0;
423}
424#endif
Lokesh Vutlae938b222019-10-07 13:52:17 +0530425
Lokesh Vutla2a18be72020-08-05 22:44:19 +0530426bool soc_is_j721e(void)
427{
428 u32 soc;
429
430 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
431 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
432
433 return soc == J721E;
434}
435
Lokesh Vutla30de1ba2020-08-05 22:44:21 +0530436bool soc_is_j7200(void)
437{
438 u32 soc;
439
440 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
441 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
442
443 return soc == J7200;
444}
445
Lokesh Vutlae938b222019-10-07 13:52:17 +0530446#ifdef CONFIG_ARM64
447void board_prep_linux(bootm_headers_t *images)
448{
449 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
450 images->os.start, images->os.end);
451 __asm_flush_dcache_range(images->os.start,
452 ROUND(images->os.end,
453 CONFIG_SYS_CACHELINE_SIZE));
454}
455#endif
Lokesh Vutla40109f42019-12-31 15:49:55 +0530456
457#ifdef CONFIG_CPU_V7R
458void disable_linefill_optimization(void)
459{
460 u32 actlr;
461
462 /*
463 * On K3 devices there are 2 conditions where R5F can deadlock:
464 * 1.When software is performing series of store operations to
465 * cacheable write back/write allocate memory region and later
466 * on software execute barrier operation (DSB or DMB). R5F may
467 * hang at the barrier instruction.
468 * 2.When software is performing a mix of load and store operations
469 * within a tight loop and store operations are all writing to
470 * cacheable write back/write allocates memory regions, R5F may
471 * hang at one of the load instruction.
472 *
473 * To avoid the above two conditions disable linefill optimization
474 * inside Cortex R5F.
475 */
476 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
477 actlr |= (1 << 13); /* Set DLFO bit */
478 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
479}
480#endif
Andrew F. Davisea70da12020-01-10 14:35:21 -0500481
482void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
483{
484 struct ti_sci_msg_fwl_region region;
485 struct ti_sci_fwl_ops *fwl_ops;
486 struct ti_sci_handle *ti_sci;
487 size_t i, j;
488
489 ti_sci = get_ti_sci_handle();
490 fwl_ops = &ti_sci->ops.fwl_ops;
491 for (i = 0; i < fwl_data_size; i++) {
492 for (j = 0; j < fwl_data[i].regions; j++) {
493 region.fwl_id = fwl_data[i].fwl_id;
494 region.region = j;
495 region.n_permission_regs = 3;
496
497 fwl_ops->get_fwl_region(ti_sci, &region);
498
499 if (region.control != 0) {
500 pr_debug("Attempting to disable firewall %5d (%25s)\n",
501 region.fwl_id, fwl_data[i].name);
502 region.control = 0;
503
504 if (fwl_ops->set_fwl_region(ti_sci, &region))
505 pr_err("Could not disable firewall %5d (%25s)\n",
506 region.fwl_id, fwl_data[i].name);
507 }
508 }
509 }
510}
Jan Kiszkac02712a2020-05-18 07:57:22 +0200511
512void spl_enable_dcache(void)
513{
514#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
515 phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE;
516
517 dram_init_banksize();
518
519 /* reserve TLB table */
520 gd->arch.tlb_size = PGTABLE_SIZE;
521
522 ram_top += get_effective_memsize();
523 /* keep ram_top in the 32-bit address space */
524 if (ram_top >= 0x100000000)
525 ram_top = (phys_addr_t) 0x100000000;
526
527 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
528 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
529 gd->arch.tlb_addr + gd->arch.tlb_size);
530
531 dcache_enable();
532#endif
533}
534
535#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
536void spl_board_prepare_for_boot(void)
537{
538 dcache_disable();
539}
540
Patrick Delaunay865fdfd2020-07-07 14:25:15 +0200541void spl_board_prepare_for_linux(void)
Jan Kiszkac02712a2020-05-18 07:57:22 +0200542{
543 dcache_disable();
544}
545#endif