Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * K3: Common Architecture initialization |
| 4 | * |
| 5 | * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ |
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> |
| 7 | */ |
| 8 | |
| 9 | #include <common.h> |
Simon Glass | 9a3b4ce | 2019-12-28 10:45:01 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 11 | #include <spl.h> |
| 12 | #include "common.h" |
| 13 | #include <dm.h> |
| 14 | #include <remoteproc.h> |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 15 | #include <linux/soc/ti/ti_sci_protocol.h> |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 16 | #include <fdt_support.h> |
Andreas Dannenberg | f9380a7 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 17 | #include <asm/arch/sys_proto.h> |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 18 | #include <asm/hardware.h> |
| 19 | #include <asm/io.h> |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 20 | #include <fs_loader.h> |
| 21 | #include <fs.h> |
| 22 | #include <env.h> |
| 23 | #include <elf.h> |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 24 | |
| 25 | struct ti_sci_handle *get_ti_sci_handle(void) |
| 26 | { |
| 27 | struct udevice *dev; |
| 28 | int ret; |
| 29 | |
Lokesh Vutla | e69ffdb | 2019-09-27 13:32:15 +0530 | [diff] [blame] | 30 | ret = uclass_get_device_by_driver(UCLASS_FIRMWARE, |
| 31 | DM_GET_DRIVER(ti_sci), &dev); |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 32 | if (ret) |
| 33 | panic("Failed to get SYSFW (%d)\n", ret); |
| 34 | |
| 35 | return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev); |
| 36 | } |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 37 | |
Andreas Dannenberg | e630afe1 | 2019-08-15 15:55:28 -0500 | [diff] [blame] | 38 | DECLARE_GLOBAL_DATA_PTR; |
| 39 | |
| 40 | #ifdef CONFIG_K3_EARLY_CONS |
| 41 | int early_console_init(void) |
| 42 | { |
| 43 | struct udevice *dev; |
| 44 | int ret; |
| 45 | |
| 46 | gd->baudrate = CONFIG_BAUDRATE; |
| 47 | |
| 48 | ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX, |
| 49 | &dev); |
| 50 | if (ret) { |
| 51 | printf("Error getting serial dev for early console! (%d)\n", |
| 52 | ret); |
| 53 | return ret; |
| 54 | } |
| 55 | |
| 56 | gd->cur_serial_dev = dev; |
| 57 | gd->flags |= GD_FLG_SERIAL_READY; |
| 58 | gd->have_console = 1; |
| 59 | |
| 60 | return 0; |
| 61 | } |
| 62 | #endif |
| 63 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 64 | #ifdef CONFIG_SYS_K3_SPL_ATF |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 65 | |
| 66 | void init_env(void) |
| 67 | { |
| 68 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 69 | char *part; |
| 70 | |
| 71 | env_init(); |
| 72 | env_relocate(); |
| 73 | switch (spl_boot_device()) { |
| 74 | case BOOT_DEVICE_MMC2: |
| 75 | part = env_get("bootpart"); |
| 76 | env_set("storage_interface", "mmc"); |
| 77 | env_set("fw_dev_part", part); |
| 78 | break; |
| 79 | case BOOT_DEVICE_SPI: |
| 80 | env_set("storage_interface", "ubi"); |
| 81 | env_set("fw_ubi_mtdpart", "UBI"); |
| 82 | env_set("fw_ubi_volume", "UBI0"); |
| 83 | break; |
| 84 | default: |
| 85 | printf("%s from device %u not supported!\n", |
| 86 | __func__, spl_boot_device()); |
| 87 | return; |
| 88 | } |
| 89 | #endif |
| 90 | } |
| 91 | |
| 92 | #ifdef CONFIG_FS_LOADER |
| 93 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 94 | { |
| 95 | struct udevice *fsdev; |
| 96 | char *name = NULL; |
| 97 | int size = 0; |
| 98 | |
| 99 | *loadaddr = 0; |
| 100 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 101 | switch (spl_boot_device()) { |
| 102 | case BOOT_DEVICE_MMC2: |
| 103 | name = env_get(name_fw); |
| 104 | *loadaddr = env_get_hex(name_loadaddr, *loadaddr); |
| 105 | break; |
| 106 | default: |
| 107 | printf("Loading rproc fw image from device %u not supported!\n", |
| 108 | spl_boot_device()); |
| 109 | return 0; |
| 110 | } |
| 111 | #endif |
| 112 | if (!*loadaddr) |
| 113 | return 0; |
| 114 | |
| 115 | if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) { |
| 116 | size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr, |
| 117 | 0, 0); |
| 118 | } |
| 119 | |
| 120 | return size; |
| 121 | } |
| 122 | #else |
| 123 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 124 | { |
| 125 | return 0; |
| 126 | } |
| 127 | #endif |
| 128 | |
| 129 | __weak void start_non_linux_remote_cores(void) |
| 130 | { |
| 131 | } |
| 132 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 133 | void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image) |
| 134 | { |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 135 | typedef void __noreturn (*image_entry_noargs_t)(void); |
Lokesh Vutla | c0669d2 | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 136 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 137 | u32 loadaddr = 0; |
| 138 | int ret, size; |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 139 | |
Lokesh Vutla | c0669d2 | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 140 | /* Release all the exclusive devices held by SPL before starting ATF */ |
| 141 | ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci); |
| 142 | |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 143 | ret = rproc_init(); |
| 144 | if (ret) |
| 145 | panic("rproc failed to be initialized (%d)\n", ret); |
| 146 | |
| 147 | init_env(); |
| 148 | start_non_linux_remote_cores(); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 149 | size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load", |
| 150 | &loadaddr); |
| 151 | |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 152 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 153 | /* |
| 154 | * It is assumed that remoteproc device 1 is the corresponding |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 155 | * Cortex-A core which runs ATF. Make sure DT reflects the same. |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 156 | */ |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 157 | ret = rproc_load(1, spl_image->entry_point, 0x200); |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 158 | if (ret) |
| 159 | panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 160 | |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 161 | /* Add an extra newline to differentiate the ATF logs from SPL */ |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 162 | printf("Starting ATF on ARM64 core...\n\n"); |
| 163 | |
| 164 | ret = rproc_start(1); |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 165 | if (ret) |
| 166 | panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 167 | if (!(size > 0 && valid_elf_image(loadaddr))) { |
| 168 | debug("Shutting down...\n"); |
| 169 | release_resources_for_core_shutdown(); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 170 | |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 171 | while (1) |
| 172 | asm volatile("wfe"); |
| 173 | } |
Andreas Dannenberg | f9380a7 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 174 | |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 175 | image_entry_noargs_t image_entry = |
| 176 | (image_entry_noargs_t)load_elf_image_phdr(loadaddr); |
| 177 | |
| 178 | image_entry(); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 179 | } |
| 180 | #endif |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 181 | |
| 182 | #if defined(CONFIG_OF_LIBFDT) |
| 183 | int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name) |
| 184 | { |
| 185 | u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2]; |
| 186 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
| 187 | int ret, node, subnode, len, prev_node; |
| 188 | u32 range[4], addr, size; |
| 189 | const fdt32_t *sub_reg; |
| 190 | |
| 191 | ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end); |
| 192 | msmc_size = msmc_end - msmc_start + 1; |
| 193 | debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__, |
| 194 | msmc_start, msmc_size); |
| 195 | |
| 196 | /* find or create "msmc_sram node */ |
| 197 | ret = fdt_path_offset(blob, parent_path); |
| 198 | if (ret < 0) |
| 199 | return ret; |
| 200 | |
| 201 | node = fdt_find_or_add_subnode(blob, ret, node_name); |
| 202 | if (node < 0) |
| 203 | return node; |
| 204 | |
| 205 | ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram"); |
| 206 | if (ret < 0) |
| 207 | return ret; |
| 208 | |
| 209 | reg[0] = cpu_to_fdt64(msmc_start); |
| 210 | reg[1] = cpu_to_fdt64(msmc_size); |
| 211 | ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg)); |
| 212 | if (ret < 0) |
| 213 | return ret; |
| 214 | |
| 215 | fdt_setprop_cell(blob, node, "#address-cells", 1); |
| 216 | fdt_setprop_cell(blob, node, "#size-cells", 1); |
| 217 | |
| 218 | range[0] = 0; |
| 219 | range[1] = cpu_to_fdt32(msmc_start >> 32); |
| 220 | range[2] = cpu_to_fdt32(msmc_start & 0xffffffff); |
| 221 | range[3] = cpu_to_fdt32(msmc_size); |
| 222 | ret = fdt_setprop(blob, node, "ranges", range, sizeof(range)); |
| 223 | if (ret < 0) |
| 224 | return ret; |
| 225 | |
| 226 | subnode = fdt_first_subnode(blob, node); |
| 227 | prev_node = 0; |
| 228 | |
| 229 | /* Look for invalid subnodes and delete them */ |
| 230 | while (subnode >= 0) { |
| 231 | sub_reg = fdt_getprop(blob, subnode, "reg", &len); |
| 232 | addr = fdt_read_number(sub_reg, 1); |
| 233 | sub_reg++; |
| 234 | size = fdt_read_number(sub_reg, 1); |
| 235 | debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__, |
| 236 | subnode, addr, size); |
| 237 | if (addr + size > msmc_size || |
| 238 | !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) || |
| 239 | !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) { |
| 240 | fdt_del_node(blob, subnode); |
| 241 | debug("%s: deleting subnode %d\n", __func__, subnode); |
| 242 | if (!prev_node) |
| 243 | subnode = fdt_first_subnode(blob, node); |
| 244 | else |
| 245 | subnode = fdt_next_subnode(blob, prev_node); |
| 246 | } else { |
| 247 | prev_node = subnode; |
| 248 | subnode = fdt_next_subnode(blob, prev_node); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | return 0; |
| 253 | } |
Andrew F. Davis | 29c9db4 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 254 | |
| 255 | int fdt_disable_node(void *blob, char *node_path) |
| 256 | { |
| 257 | int offs; |
| 258 | int ret; |
| 259 | |
| 260 | offs = fdt_path_offset(blob, node_path); |
| 261 | if (offs < 0) { |
Andrew F. Davis | 28b90a4 | 2020-01-07 18:12:40 -0500 | [diff] [blame] | 262 | printf("Node %s not found.\n", node_path); |
| 263 | return offs; |
Andrew F. Davis | 29c9db4 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 264 | } |
| 265 | ret = fdt_setprop_string(blob, offs, "status", "disabled"); |
| 266 | if (ret < 0) { |
| 267 | printf("Could not add status property to node %s: %s\n", |
| 268 | node_path, fdt_strerror(ret)); |
| 269 | return ret; |
| 270 | } |
| 271 | return 0; |
| 272 | } |
| 273 | |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 274 | #endif |
Lokesh Vutla | c2562d7 | 2019-06-13 10:29:42 +0530 | [diff] [blame] | 275 | |
| 276 | #ifndef CONFIG_SYSRESET |
| 277 | void reset_cpu(ulong ignored) |
| 278 | { |
| 279 | } |
| 280 | #endif |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 281 | |
| 282 | #if defined(CONFIG_DISPLAY_CPUINFO) |
| 283 | int print_cpuinfo(void) |
| 284 | { |
| 285 | u32 soc, rev; |
| 286 | char *name; |
| 287 | |
| 288 | soc = (readl(CTRLMMR_WKUP_JTAG_DEVICE_ID) & |
| 289 | DEVICE_ID_FAMILY_MASK) >> DEVICE_ID_FAMILY_SHIFT; |
| 290 | rev = (readl(CTRLMMR_WKUP_JTAG_ID) & |
| 291 | JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT; |
| 292 | |
| 293 | printf("SoC: "); |
| 294 | switch (soc) { |
| 295 | case AM654: |
| 296 | name = "AM654"; |
| 297 | break; |
| 298 | case J721E: |
| 299 | name = "J721E"; |
| 300 | break; |
| 301 | default: |
| 302 | name = "Unknown Silicon"; |
| 303 | }; |
| 304 | |
Lokesh Vutla | b9c268c | 2020-02-10 10:39:17 +0530 | [diff] [blame] | 305 | printf("%s SR ", name); |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 306 | switch (rev) { |
| 307 | case REV_PG1_0: |
| 308 | name = "1.0"; |
| 309 | break; |
| 310 | case REV_PG2_0: |
| 311 | name = "2.0"; |
| 312 | break; |
| 313 | default: |
| 314 | name = "Unknown Revision"; |
| 315 | }; |
| 316 | printf("%s\n", name); |
| 317 | |
| 318 | return 0; |
| 319 | } |
| 320 | #endif |
Lokesh Vutla | e938b22 | 2019-10-07 13:52:17 +0530 | [diff] [blame] | 321 | |
| 322 | #ifdef CONFIG_ARM64 |
| 323 | void board_prep_linux(bootm_headers_t *images) |
| 324 | { |
| 325 | debug("Linux kernel Image start = 0x%lx end = 0x%lx\n", |
| 326 | images->os.start, images->os.end); |
| 327 | __asm_flush_dcache_range(images->os.start, |
| 328 | ROUND(images->os.end, |
| 329 | CONFIG_SYS_CACHELINE_SIZE)); |
| 330 | } |
| 331 | #endif |
Lokesh Vutla | 40109f4 | 2019-12-31 15:49:55 +0530 | [diff] [blame] | 332 | |
| 333 | #ifdef CONFIG_CPU_V7R |
| 334 | void disable_linefill_optimization(void) |
| 335 | { |
| 336 | u32 actlr; |
| 337 | |
| 338 | /* |
| 339 | * On K3 devices there are 2 conditions where R5F can deadlock: |
| 340 | * 1.When software is performing series of store operations to |
| 341 | * cacheable write back/write allocate memory region and later |
| 342 | * on software execute barrier operation (DSB or DMB). R5F may |
| 343 | * hang at the barrier instruction. |
| 344 | * 2.When software is performing a mix of load and store operations |
| 345 | * within a tight loop and store operations are all writing to |
| 346 | * cacheable write back/write allocates memory regions, R5F may |
| 347 | * hang at one of the load instruction. |
| 348 | * |
| 349 | * To avoid the above two conditions disable linefill optimization |
| 350 | * inside Cortex R5F. |
| 351 | */ |
| 352 | asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr)); |
| 353 | actlr |= (1 << 13); /* Set DLFO bit */ |
| 354 | asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr)); |
| 355 | } |
| 356 | #endif |
Andrew F. Davis | ea70da1 | 2020-01-10 14:35:21 -0500 | [diff] [blame] | 357 | |
| 358 | void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size) |
| 359 | { |
| 360 | struct ti_sci_msg_fwl_region region; |
| 361 | struct ti_sci_fwl_ops *fwl_ops; |
| 362 | struct ti_sci_handle *ti_sci; |
| 363 | size_t i, j; |
| 364 | |
| 365 | ti_sci = get_ti_sci_handle(); |
| 366 | fwl_ops = &ti_sci->ops.fwl_ops; |
| 367 | for (i = 0; i < fwl_data_size; i++) { |
| 368 | for (j = 0; j < fwl_data[i].regions; j++) { |
| 369 | region.fwl_id = fwl_data[i].fwl_id; |
| 370 | region.region = j; |
| 371 | region.n_permission_regs = 3; |
| 372 | |
| 373 | fwl_ops->get_fwl_region(ti_sci, ®ion); |
| 374 | |
| 375 | if (region.control != 0) { |
| 376 | pr_debug("Attempting to disable firewall %5d (%25s)\n", |
| 377 | region.fwl_id, fwl_data[i].name); |
| 378 | region.control = 0; |
| 379 | |
| 380 | if (fwl_ops->set_fwl_region(ti_sci, ®ion)) |
| 381 | pr_err("Could not disable firewall %5d (%25s)\n", |
| 382 | region.fwl_id, fwl_data[i].name); |
| 383 | } |
| 384 | } |
| 385 | } |
| 386 | } |