Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * K3: Common Architecture initialization |
| 4 | * |
| 5 | * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ |
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> |
| 7 | */ |
| 8 | |
| 9 | #include <common.h> |
Simon Glass | 9a3b4ce | 2019-12-28 10:45:01 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Simon Glass | 4d72caa | 2020-05-10 11:40:01 -0600 | [diff] [blame] | 11 | #include <image.h> |
Simon Glass | 691d719 | 2020-05-10 11:40:02 -0600 | [diff] [blame] | 12 | #include <init.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 13 | #include <log.h> |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 14 | #include <spl.h> |
Simon Glass | 401d1c4 | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 15 | #include <asm/global_data.h> |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 16 | #include "common.h" |
| 17 | #include <dm.h> |
| 18 | #include <remoteproc.h> |
Simon Glass | 90526e9 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 19 | #include <asm/cache.h> |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 20 | #include <linux/soc/ti/ti_sci_protocol.h> |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 21 | #include <fdt_support.h> |
Andreas Dannenberg | f9380a7 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 22 | #include <asm/arch/sys_proto.h> |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 23 | #include <asm/hardware.h> |
| 24 | #include <asm/io.h> |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 25 | #include <fs_loader.h> |
| 26 | #include <fs.h> |
| 27 | #include <env.h> |
| 28 | #include <elf.h> |
Dave Gerlach | 5ab71ea | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 29 | #include <soc.h> |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 30 | |
| 31 | struct ti_sci_handle *get_ti_sci_handle(void) |
| 32 | { |
| 33 | struct udevice *dev; |
| 34 | int ret; |
| 35 | |
Lokesh Vutla | e69ffdb | 2019-09-27 13:32:15 +0530 | [diff] [blame] | 36 | ret = uclass_get_device_by_driver(UCLASS_FIRMWARE, |
Simon Glass | 65e25be | 2020-12-28 20:34:56 -0700 | [diff] [blame] | 37 | DM_DRIVER_GET(ti_sci), &dev); |
Lokesh Vutla | 6ce424a | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 38 | if (ret) |
| 39 | panic("Failed to get SYSFW (%d)\n", ret); |
| 40 | |
| 41 | return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev); |
| 42 | } |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 43 | |
Lokesh Vutla | 6e44aeb | 2020-03-10 16:50:58 +0530 | [diff] [blame] | 44 | void k3_sysfw_print_ver(void) |
| 45 | { |
| 46 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
| 47 | char fw_desc[sizeof(ti_sci->version.firmware_description) + 1]; |
| 48 | |
| 49 | /* |
| 50 | * Output System Firmware version info. Note that since the |
| 51 | * 'firmware_description' field is not guaranteed to be zero- |
| 52 | * terminated we manually add a \0 terminator if needed. Further |
| 53 | * note that we intentionally no longer rely on the extended |
| 54 | * printf() formatter '%.*s' to not having to require a more |
| 55 | * full-featured printf() implementation. |
| 56 | */ |
| 57 | strncpy(fw_desc, ti_sci->version.firmware_description, |
| 58 | sizeof(ti_sci->version.firmware_description)); |
| 59 | fw_desc[sizeof(fw_desc) - 1] = '\0'; |
| 60 | |
| 61 | printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n", |
| 62 | ti_sci->version.abi_major, ti_sci->version.abi_minor, |
| 63 | ti_sci->version.firmware_revision, fw_desc); |
| 64 | } |
| 65 | |
Lokesh Vutla | 58ccd61 | 2020-08-05 22:44:17 +0530 | [diff] [blame] | 66 | void mmr_unlock(phys_addr_t base, u32 partition) |
| 67 | { |
| 68 | /* Translate the base address */ |
| 69 | phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE; |
| 70 | |
| 71 | /* Unlock the requested partition if locked using two-step sequence */ |
| 72 | writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0); |
| 73 | writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1); |
| 74 | } |
| 75 | |
Lokesh Vutla | 01dbe36 | 2020-08-05 22:44:23 +0530 | [diff] [blame] | 76 | bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data) |
| 77 | { |
| 78 | if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7)) |
| 79 | return false; |
| 80 | |
| 81 | return data->num_components > 1; |
| 82 | } |
| 83 | |
Andreas Dannenberg | e630afe1 | 2019-08-15 15:55:28 -0500 | [diff] [blame] | 84 | DECLARE_GLOBAL_DATA_PTR; |
| 85 | |
| 86 | #ifdef CONFIG_K3_EARLY_CONS |
| 87 | int early_console_init(void) |
| 88 | { |
| 89 | struct udevice *dev; |
| 90 | int ret; |
| 91 | |
| 92 | gd->baudrate = CONFIG_BAUDRATE; |
| 93 | |
| 94 | ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX, |
| 95 | &dev); |
| 96 | if (ret) { |
| 97 | printf("Error getting serial dev for early console! (%d)\n", |
| 98 | ret); |
| 99 | return ret; |
| 100 | } |
| 101 | |
| 102 | gd->cur_serial_dev = dev; |
| 103 | gd->flags |= GD_FLG_SERIAL_READY; |
| 104 | gd->have_console = 1; |
| 105 | |
| 106 | return 0; |
| 107 | } |
| 108 | #endif |
| 109 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 110 | #ifdef CONFIG_SYS_K3_SPL_ATF |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 111 | |
| 112 | void init_env(void) |
| 113 | { |
| 114 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 115 | char *part; |
| 116 | |
| 117 | env_init(); |
| 118 | env_relocate(); |
| 119 | switch (spl_boot_device()) { |
| 120 | case BOOT_DEVICE_MMC2: |
| 121 | part = env_get("bootpart"); |
| 122 | env_set("storage_interface", "mmc"); |
| 123 | env_set("fw_dev_part", part); |
| 124 | break; |
| 125 | case BOOT_DEVICE_SPI: |
| 126 | env_set("storage_interface", "ubi"); |
| 127 | env_set("fw_ubi_mtdpart", "UBI"); |
| 128 | env_set("fw_ubi_volume", "UBI0"); |
| 129 | break; |
| 130 | default: |
| 131 | printf("%s from device %u not supported!\n", |
| 132 | __func__, spl_boot_device()); |
| 133 | return; |
| 134 | } |
| 135 | #endif |
| 136 | } |
| 137 | |
| 138 | #ifdef CONFIG_FS_LOADER |
| 139 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 140 | { |
| 141 | struct udevice *fsdev; |
| 142 | char *name = NULL; |
| 143 | int size = 0; |
| 144 | |
| 145 | *loadaddr = 0; |
| 146 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 147 | switch (spl_boot_device()) { |
| 148 | case BOOT_DEVICE_MMC2: |
| 149 | name = env_get(name_fw); |
| 150 | *loadaddr = env_get_hex(name_loadaddr, *loadaddr); |
| 151 | break; |
| 152 | default: |
| 153 | printf("Loading rproc fw image from device %u not supported!\n", |
| 154 | spl_boot_device()); |
| 155 | return 0; |
| 156 | } |
| 157 | #endif |
| 158 | if (!*loadaddr) |
| 159 | return 0; |
| 160 | |
| 161 | if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) { |
| 162 | size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr, |
| 163 | 0, 0); |
| 164 | } |
| 165 | |
| 166 | return size; |
| 167 | } |
| 168 | #else |
| 169 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 170 | { |
| 171 | return 0; |
| 172 | } |
| 173 | #endif |
| 174 | |
| 175 | __weak void start_non_linux_remote_cores(void) |
| 176 | { |
| 177 | } |
| 178 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 179 | void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image) |
| 180 | { |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 181 | typedef void __noreturn (*image_entry_noargs_t)(void); |
Lokesh Vutla | c0669d2 | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 182 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 183 | u32 loadaddr = 0; |
| 184 | int ret, size; |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 185 | |
Lokesh Vutla | c0669d2 | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 186 | /* Release all the exclusive devices held by SPL before starting ATF */ |
| 187 | ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci); |
| 188 | |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 189 | ret = rproc_init(); |
| 190 | if (ret) |
| 191 | panic("rproc failed to be initialized (%d)\n", ret); |
| 192 | |
| 193 | init_env(); |
| 194 | start_non_linux_remote_cores(); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 195 | size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load", |
| 196 | &loadaddr); |
| 197 | |
Keerthy | 3ab34bc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 198 | |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 199 | /* |
| 200 | * It is assumed that remoteproc device 1 is the corresponding |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 201 | * Cortex-A core which runs ATF. Make sure DT reflects the same. |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 202 | */ |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 203 | ret = rproc_load(1, spl_image->entry_point, 0x200); |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 204 | if (ret) |
| 205 | panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 206 | |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 207 | /* Add an extra newline to differentiate the ATF logs from SPL */ |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 208 | printf("Starting ATF on ARM64 core...\n\n"); |
| 209 | |
| 210 | ret = rproc_start(1); |
Andreas Dannenberg | 4a1fa52 | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 211 | if (ret) |
| 212 | panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret); |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 213 | if (!(size > 0 && valid_elf_image(loadaddr))) { |
| 214 | debug("Shutting down...\n"); |
| 215 | release_resources_for_core_shutdown(); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 216 | |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 217 | while (1) |
| 218 | asm volatile("wfe"); |
| 219 | } |
Andreas Dannenberg | f9380a7 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 220 | |
Keerthy | d154252 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 221 | image_entry_noargs_t image_entry = |
| 222 | (image_entry_noargs_t)load_elf_image_phdr(loadaddr); |
| 223 | |
| 224 | image_entry(); |
Lokesh Vutla | a3501a4 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 225 | } |
| 226 | #endif |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 227 | |
| 228 | #if defined(CONFIG_OF_LIBFDT) |
| 229 | int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name) |
| 230 | { |
| 231 | u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2]; |
| 232 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
| 233 | int ret, node, subnode, len, prev_node; |
| 234 | u32 range[4], addr, size; |
| 235 | const fdt32_t *sub_reg; |
| 236 | |
| 237 | ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end); |
| 238 | msmc_size = msmc_end - msmc_start + 1; |
| 239 | debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__, |
| 240 | msmc_start, msmc_size); |
| 241 | |
| 242 | /* find or create "msmc_sram node */ |
| 243 | ret = fdt_path_offset(blob, parent_path); |
| 244 | if (ret < 0) |
| 245 | return ret; |
| 246 | |
| 247 | node = fdt_find_or_add_subnode(blob, ret, node_name); |
| 248 | if (node < 0) |
| 249 | return node; |
| 250 | |
| 251 | ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram"); |
| 252 | if (ret < 0) |
| 253 | return ret; |
| 254 | |
| 255 | reg[0] = cpu_to_fdt64(msmc_start); |
| 256 | reg[1] = cpu_to_fdt64(msmc_size); |
| 257 | ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg)); |
| 258 | if (ret < 0) |
| 259 | return ret; |
| 260 | |
| 261 | fdt_setprop_cell(blob, node, "#address-cells", 1); |
| 262 | fdt_setprop_cell(blob, node, "#size-cells", 1); |
| 263 | |
| 264 | range[0] = 0; |
| 265 | range[1] = cpu_to_fdt32(msmc_start >> 32); |
| 266 | range[2] = cpu_to_fdt32(msmc_start & 0xffffffff); |
| 267 | range[3] = cpu_to_fdt32(msmc_size); |
| 268 | ret = fdt_setprop(blob, node, "ranges", range, sizeof(range)); |
| 269 | if (ret < 0) |
| 270 | return ret; |
| 271 | |
| 272 | subnode = fdt_first_subnode(blob, node); |
| 273 | prev_node = 0; |
| 274 | |
| 275 | /* Look for invalid subnodes and delete them */ |
| 276 | while (subnode >= 0) { |
| 277 | sub_reg = fdt_getprop(blob, subnode, "reg", &len); |
| 278 | addr = fdt_read_number(sub_reg, 1); |
| 279 | sub_reg++; |
| 280 | size = fdt_read_number(sub_reg, 1); |
| 281 | debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__, |
| 282 | subnode, addr, size); |
| 283 | if (addr + size > msmc_size || |
| 284 | !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) || |
| 285 | !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) { |
| 286 | fdt_del_node(blob, subnode); |
| 287 | debug("%s: deleting subnode %d\n", __func__, subnode); |
| 288 | if (!prev_node) |
| 289 | subnode = fdt_first_subnode(blob, node); |
| 290 | else |
| 291 | subnode = fdt_next_subnode(blob, prev_node); |
| 292 | } else { |
| 293 | prev_node = subnode; |
| 294 | subnode = fdt_next_subnode(blob, prev_node); |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | return 0; |
| 299 | } |
Andrew F. Davis | 29c9db4 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 300 | |
| 301 | int fdt_disable_node(void *blob, char *node_path) |
| 302 | { |
| 303 | int offs; |
| 304 | int ret; |
| 305 | |
| 306 | offs = fdt_path_offset(blob, node_path); |
| 307 | if (offs < 0) { |
Andrew F. Davis | 28b90a4 | 2020-01-07 18:12:40 -0500 | [diff] [blame] | 308 | printf("Node %s not found.\n", node_path); |
| 309 | return offs; |
Andrew F. Davis | 29c9db4 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 310 | } |
| 311 | ret = fdt_setprop_string(blob, offs, "status", "disabled"); |
| 312 | if (ret < 0) { |
| 313 | printf("Could not add status property to node %s: %s\n", |
| 314 | node_path, fdt_strerror(ret)); |
| 315 | return ret; |
| 316 | } |
| 317 | return 0; |
| 318 | } |
| 319 | |
Lokesh Vutla | a9a8448 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 320 | #endif |
Lokesh Vutla | c2562d7 | 2019-06-13 10:29:42 +0530 | [diff] [blame] | 321 | |
| 322 | #ifndef CONFIG_SYSRESET |
| 323 | void reset_cpu(ulong ignored) |
| 324 | { |
| 325 | } |
| 326 | #endif |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 327 | |
| 328 | #if defined(CONFIG_DISPLAY_CPUINFO) |
| 329 | int print_cpuinfo(void) |
| 330 | { |
Dave Gerlach | 5ab71ea | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 331 | struct udevice *soc; |
| 332 | char name[64]; |
| 333 | int ret; |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 334 | |
| 335 | printf("SoC: "); |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 336 | |
Dave Gerlach | 5ab71ea | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 337 | ret = soc_get(&soc); |
| 338 | if (ret) { |
| 339 | printf("UNKNOWN\n"); |
| 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | ret = soc_get_family(soc, name, 64); |
| 344 | if (!ret) { |
| 345 | printf("%s ", name); |
| 346 | } |
| 347 | |
| 348 | ret = soc_get_revision(soc, name, 64); |
| 349 | if (!ret) { |
| 350 | printf("%s\n", name); |
| 351 | } |
Lokesh Vutla | f8ca912 | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 352 | |
| 353 | return 0; |
| 354 | } |
| 355 | #endif |
Lokesh Vutla | e938b22 | 2019-10-07 13:52:17 +0530 | [diff] [blame] | 356 | |
Lokesh Vutla | 2a18be7 | 2020-08-05 22:44:19 +0530 | [diff] [blame] | 357 | bool soc_is_j721e(void) |
| 358 | { |
| 359 | u32 soc; |
| 360 | |
| 361 | soc = (readl(CTRLMMR_WKUP_JTAG_ID) & |
| 362 | JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT; |
| 363 | |
| 364 | return soc == J721E; |
| 365 | } |
| 366 | |
Lokesh Vutla | 30de1ba | 2020-08-05 22:44:21 +0530 | [diff] [blame] | 367 | bool soc_is_j7200(void) |
| 368 | { |
| 369 | u32 soc; |
| 370 | |
| 371 | soc = (readl(CTRLMMR_WKUP_JTAG_ID) & |
| 372 | JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT; |
| 373 | |
| 374 | return soc == J7200; |
| 375 | } |
| 376 | |
Lokesh Vutla | e938b22 | 2019-10-07 13:52:17 +0530 | [diff] [blame] | 377 | #ifdef CONFIG_ARM64 |
| 378 | void board_prep_linux(bootm_headers_t *images) |
| 379 | { |
| 380 | debug("Linux kernel Image start = 0x%lx end = 0x%lx\n", |
| 381 | images->os.start, images->os.end); |
| 382 | __asm_flush_dcache_range(images->os.start, |
| 383 | ROUND(images->os.end, |
| 384 | CONFIG_SYS_CACHELINE_SIZE)); |
| 385 | } |
| 386 | #endif |
Lokesh Vutla | 40109f4 | 2019-12-31 15:49:55 +0530 | [diff] [blame] | 387 | |
| 388 | #ifdef CONFIG_CPU_V7R |
| 389 | void disable_linefill_optimization(void) |
| 390 | { |
| 391 | u32 actlr; |
| 392 | |
| 393 | /* |
| 394 | * On K3 devices there are 2 conditions where R5F can deadlock: |
| 395 | * 1.When software is performing series of store operations to |
| 396 | * cacheable write back/write allocate memory region and later |
| 397 | * on software execute barrier operation (DSB or DMB). R5F may |
| 398 | * hang at the barrier instruction. |
| 399 | * 2.When software is performing a mix of load and store operations |
| 400 | * within a tight loop and store operations are all writing to |
| 401 | * cacheable write back/write allocates memory regions, R5F may |
| 402 | * hang at one of the load instruction. |
| 403 | * |
| 404 | * To avoid the above two conditions disable linefill optimization |
| 405 | * inside Cortex R5F. |
| 406 | */ |
| 407 | asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr)); |
| 408 | actlr |= (1 << 13); /* Set DLFO bit */ |
| 409 | asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr)); |
| 410 | } |
| 411 | #endif |
Andrew F. Davis | ea70da1 | 2020-01-10 14:35:21 -0500 | [diff] [blame] | 412 | |
| 413 | void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size) |
| 414 | { |
| 415 | struct ti_sci_msg_fwl_region region; |
| 416 | struct ti_sci_fwl_ops *fwl_ops; |
| 417 | struct ti_sci_handle *ti_sci; |
| 418 | size_t i, j; |
| 419 | |
| 420 | ti_sci = get_ti_sci_handle(); |
| 421 | fwl_ops = &ti_sci->ops.fwl_ops; |
| 422 | for (i = 0; i < fwl_data_size; i++) { |
| 423 | for (j = 0; j < fwl_data[i].regions; j++) { |
| 424 | region.fwl_id = fwl_data[i].fwl_id; |
| 425 | region.region = j; |
| 426 | region.n_permission_regs = 3; |
| 427 | |
| 428 | fwl_ops->get_fwl_region(ti_sci, ®ion); |
| 429 | |
| 430 | if (region.control != 0) { |
| 431 | pr_debug("Attempting to disable firewall %5d (%25s)\n", |
| 432 | region.fwl_id, fwl_data[i].name); |
| 433 | region.control = 0; |
| 434 | |
| 435 | if (fwl_ops->set_fwl_region(ti_sci, ®ion)) |
| 436 | pr_err("Could not disable firewall %5d (%25s)\n", |
| 437 | region.fwl_id, fwl_data[i].name); |
| 438 | } |
| 439 | } |
| 440 | } |
| 441 | } |
Jan Kiszka | c02712a | 2020-05-18 07:57:22 +0200 | [diff] [blame] | 442 | |
| 443 | void spl_enable_dcache(void) |
| 444 | { |
| 445 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) |
| 446 | phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE; |
| 447 | |
| 448 | dram_init_banksize(); |
| 449 | |
| 450 | /* reserve TLB table */ |
| 451 | gd->arch.tlb_size = PGTABLE_SIZE; |
| 452 | |
| 453 | ram_top += get_effective_memsize(); |
| 454 | /* keep ram_top in the 32-bit address space */ |
| 455 | if (ram_top >= 0x100000000) |
| 456 | ram_top = (phys_addr_t) 0x100000000; |
| 457 | |
| 458 | gd->arch.tlb_addr = ram_top - gd->arch.tlb_size; |
| 459 | debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr, |
| 460 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 461 | |
| 462 | dcache_enable(); |
| 463 | #endif |
| 464 | } |
| 465 | |
| 466 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) |
| 467 | void spl_board_prepare_for_boot(void) |
| 468 | { |
| 469 | dcache_disable(); |
| 470 | } |
| 471 | |
Patrick Delaunay | 865fdfd | 2020-07-07 14:25:15 +0200 | [diff] [blame] | 472 | void spl_board_prepare_for_linux(void) |
Jan Kiszka | c02712a | 2020-05-18 07:57:22 +0200 | [diff] [blame] | 473 | { |
| 474 | dcache_disable(); |
| 475 | } |
| 476 | #endif |