Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Procedures for maintaining information about logical memory blocks. |
| 4 | * |
| 5 | * Peter Bergner, IBM Corp. June 2001. |
| 6 | * Copyright (C) 2001 Peter Bergner. |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 7 | */ |
| 8 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 9 | #include <alist.h> |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 10 | #include <efi_loader.h> |
Simon Glass | 4d72caa | 2020-05-10 11:40:01 -0600 | [diff] [blame] | 11 | #include <image.h> |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 12 | #include <mapmem.h> |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 13 | #include <lmb.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 14 | #include <log.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 15 | #include <malloc.h> |
Sughosh Ganu | f4fb154 | 2024-08-26 17:29:24 +0530 | [diff] [blame^] | 16 | #include <spl.h> |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 17 | |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 18 | #include <asm/global_data.h> |
Marek Vasut | bd994c0 | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 19 | #include <asm/sections.h> |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 20 | #include <linux/kernel.h> |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 21 | |
| 22 | DECLARE_GLOBAL_DATA_PTR; |
| 23 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 24 | #define LMB_ALLOC_ANYWHERE 0 |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 25 | #define LMB_ALIST_INITIAL_SIZE 4 |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 26 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 27 | static struct lmb lmb; |
| 28 | |
| 29 | static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 30 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 31 | struct lmb_region *rgn = lmb_rgn_lst->data; |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 32 | unsigned long long base, size, end; |
| 33 | enum lmb_flags flags; |
| 34 | int i; |
| 35 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 36 | printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count); |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 37 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 38 | for (i = 0; i < lmb_rgn_lst->count; i++) { |
| 39 | base = rgn[i].base; |
| 40 | size = rgn[i].size; |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 41 | end = base + size - 1; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 42 | flags = rgn[i].flags; |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 43 | |
| 44 | printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", |
| 45 | name, i, base, end, size, flags); |
| 46 | } |
| 47 | } |
| 48 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 49 | void lmb_dump_all_force(void) |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 50 | { |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 51 | printf("lmb_dump_all:\n"); |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 52 | lmb_dump_region(&lmb.free_mem, "memory"); |
| 53 | lmb_dump_region(&lmb.used_mem, "reserved"); |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 54 | } |
| 55 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 56 | void lmb_dump_all(void) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 57 | { |
| 58 | #ifdef DEBUG |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 59 | lmb_dump_all_force(); |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 60 | #endif |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 61 | } |
| 62 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 63 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, |
| 64 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 65 | { |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 66 | const phys_addr_t base1_end = base1 + size1 - 1; |
| 67 | const phys_addr_t base2_end = base2 + size2 - 1; |
| 68 | |
| 69 | return ((base1 <= base2_end) && (base2 <= base1_end)); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 70 | } |
| 71 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 72 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 73 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 74 | { |
| 75 | if (base2 == base1 + size1) |
| 76 | return 1; |
| 77 | else if (base1 == base2 + size2) |
| 78 | return -1; |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 83 | static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1, |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 84 | unsigned long r2) |
| 85 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 86 | struct lmb_region *rgn = lmb_rgn_lst->data; |
| 87 | |
| 88 | phys_addr_t base1 = rgn[r1].base; |
| 89 | phys_size_t size1 = rgn[r1].size; |
| 90 | phys_addr_t base2 = rgn[r2].base; |
| 91 | phys_size_t size2 = rgn[r2].size; |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 92 | |
| 93 | return lmb_addrs_overlap(base1, size1, base2, size2); |
| 94 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 95 | |
| 96 | static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1, |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 97 | unsigned long r2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 98 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 99 | struct lmb_region *rgn = lmb_rgn_lst->data; |
| 100 | |
| 101 | phys_addr_t base1 = rgn[r1].base; |
| 102 | phys_size_t size1 = rgn[r1].size; |
| 103 | phys_addr_t base2 = rgn[r2].base; |
| 104 | phys_size_t size2 = rgn[r2].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 105 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
| 106 | } |
| 107 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 108 | static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 109 | { |
| 110 | unsigned long i; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 111 | struct lmb_region *rgn = lmb_rgn_lst->data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 112 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 113 | for (i = r; i < lmb_rgn_lst->count - 1; i++) { |
| 114 | rgn[i].base = rgn[i + 1].base; |
| 115 | rgn[i].size = rgn[i + 1].size; |
| 116 | rgn[i].flags = rgn[i + 1].flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 117 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 118 | lmb_rgn_lst->count--; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 122 | static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1, |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 123 | unsigned long r2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 124 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 125 | struct lmb_region *rgn = lmb_rgn_lst->data; |
| 126 | |
| 127 | rgn[r1].size += rgn[r2].size; |
| 128 | lmb_remove_region(lmb_rgn_lst, r2); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 129 | } |
| 130 | |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 131 | /*Assumption : base addr of region 1 < base addr of region 2*/ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 132 | static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst, |
| 133 | unsigned long r1, unsigned long r2) |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 134 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 135 | struct lmb_region *rgn = lmb_rgn_lst->data; |
| 136 | |
| 137 | phys_addr_t base1 = rgn[r1].base; |
| 138 | phys_size_t size1 = rgn[r1].size; |
| 139 | phys_addr_t base2 = rgn[r2].base; |
| 140 | phys_size_t size2 = rgn[r2].size; |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 141 | |
| 142 | if (base1 + size1 > base2 + size2) { |
| 143 | printf("This will not be a case any time\n"); |
| 144 | return; |
| 145 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 146 | rgn[r1].size = base2 + size2 - base1; |
| 147 | lmb_remove_region(lmb_rgn_lst, r2); |
Udit Kumar | edb5824 | 2023-09-26 16:54:42 +0530 | [diff] [blame] | 148 | } |
| 149 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 150 | void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align) |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 151 | { |
| 152 | ulong bank_end; |
| 153 | int bank; |
| 154 | |
| 155 | /* |
| 156 | * Reserve memory from aligned address below the bottom of U-Boot stack |
| 157 | * until end of U-Boot area using LMB to prevent U-Boot from overwriting |
| 158 | * that memory. |
| 159 | */ |
| 160 | debug("## Current stack ends at 0x%08lx ", sp); |
| 161 | |
| 162 | /* adjust sp by 4K to be safe */ |
| 163 | sp -= align; |
| 164 | for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { |
| 165 | if (!gd->bd->bi_dram[bank].size || |
| 166 | sp < gd->bd->bi_dram[bank].start) |
| 167 | continue; |
| 168 | /* Watch out for RAM at end of address space! */ |
| 169 | bank_end = gd->bd->bi_dram[bank].start + |
| 170 | gd->bd->bi_dram[bank].size - 1; |
| 171 | if (sp > bank_end) |
| 172 | continue; |
| 173 | if (bank_end > end) |
| 174 | bank_end = end - 1; |
| 175 | |
Sughosh Ganu | f4fb154 | 2024-08-26 17:29:24 +0530 | [diff] [blame^] | 176 | lmb_reserve_flags(sp, bank_end - sp + 1, LMB_NOOVERWRITE); |
Marek Vasut | bd994c0 | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 177 | |
| 178 | if (gd->flags & GD_FLG_SKIP_RELOC) |
Sughosh Ganu | f4fb154 | 2024-08-26 17:29:24 +0530 | [diff] [blame^] | 179 | lmb_reserve_flags((phys_addr_t)(uintptr_t)_start, |
| 180 | gd->mon_len, LMB_NOOVERWRITE); |
Marek Vasut | bd994c0 | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 181 | |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 182 | break; |
| 183 | } |
| 184 | } |
| 185 | |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 186 | /** |
| 187 | * efi_lmb_reserve() - add reservations for EFI memory |
| 188 | * |
| 189 | * Add reservations for all EFI memory areas that are not |
| 190 | * EFI_CONVENTIONAL_MEMORY. |
| 191 | * |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 192 | * Return: 0 on success, 1 on failure |
| 193 | */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 194 | static __maybe_unused int efi_lmb_reserve(void) |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 195 | { |
| 196 | struct efi_mem_desc *memmap = NULL, *map; |
| 197 | efi_uintn_t i, map_size = 0; |
| 198 | efi_status_t ret; |
| 199 | |
| 200 | ret = efi_get_memory_map_alloc(&map_size, &memmap); |
| 201 | if (ret != EFI_SUCCESS) |
| 202 | return 1; |
| 203 | |
| 204 | for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) { |
Sjoerd Simons | c5279ea | 2023-01-19 09:38:18 +0100 | [diff] [blame] | 205 | if (map->type != EFI_CONVENTIONAL_MEMORY) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 206 | lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t) |
Sjoerd Simons | c5279ea | 2023-01-19 09:38:18 +0100 | [diff] [blame] | 207 | map->physical_start), |
| 208 | map->num_pages * EFI_PAGE_SIZE, |
| 209 | map->type == EFI_RESERVED_MEMORY_TYPE |
| 210 | ? LMB_NOMAP : LMB_NONE); |
| 211 | } |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 212 | } |
| 213 | efi_free_pool(memmap); |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 218 | static void lmb_reserve_common(void *fdt_blob) |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 219 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 220 | arch_lmb_reserve(); |
| 221 | board_lmb_reserve(); |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 222 | |
Simon Glass | 0c303f9 | 2021-09-25 19:43:21 -0600 | [diff] [blame] | 223 | if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 224 | boot_fdt_add_mem_rsv_regions(fdt_blob); |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 225 | |
| 226 | if (CONFIG_IS_ENABLED(EFI_LOADER)) |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 227 | efi_lmb_reserve(); |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 228 | } |
| 229 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 230 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 231 | void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob) |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 232 | { |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 233 | int i; |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 234 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 235 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 236 | if (bd->bi_dram[i].size) |
| 237 | lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size); |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 238 | } |
Stefan Roese | dfaf6a5 | 2020-08-12 11:55:46 +0200 | [diff] [blame] | 239 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 240 | lmb_reserve_common(fdt_blob); |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 244 | void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size, |
| 245 | void *fdt_blob) |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 246 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 247 | lmb_add(base, size); |
| 248 | lmb_reserve_common(fdt_blob); |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 249 | } |
| 250 | |
Sughosh Ganu | f4fb154 | 2024-08-26 17:29:24 +0530 | [diff] [blame^] | 251 | static __maybe_unused void lmb_reserve_common_spl(void) |
| 252 | { |
| 253 | phys_addr_t rsv_start; |
| 254 | phys_size_t rsv_size; |
| 255 | |
| 256 | /* |
| 257 | * Assume a SPL stack of 16KB. This must be |
| 258 | * more than enough for the SPL stage. |
| 259 | */ |
| 260 | if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) { |
| 261 | rsv_start = gd->start_addr_sp - 16384; |
| 262 | rsv_size = 16384; |
| 263 | lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE); |
| 264 | } |
| 265 | |
| 266 | if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) { |
| 267 | /* Reserve the bss region */ |
| 268 | rsv_start = (phys_addr_t)(uintptr_t)__bss_start; |
| 269 | rsv_size = (phys_addr_t)(uintptr_t)__bss_end - |
| 270 | (phys_addr_t)(uintptr_t)__bss_start; |
| 271 | lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE); |
| 272 | } |
| 273 | } |
| 274 | |
Sughosh Ganu | 8a9fc30 | 2024-08-26 17:29:23 +0530 | [diff] [blame] | 275 | /** |
| 276 | * lmb_add_memory() - Add memory range for LMB allocations |
| 277 | * |
| 278 | * Add the entire available memory range to the pool of memory that |
| 279 | * can be used by the LMB module for allocations. |
| 280 | * |
| 281 | * Return: None |
| 282 | */ |
| 283 | void lmb_add_memory(void) |
| 284 | { |
| 285 | int i; |
| 286 | phys_size_t size; |
| 287 | phys_addr_t rgn_top; |
| 288 | u64 ram_top = gd->ram_top; |
| 289 | struct bd_info *bd = gd->bd; |
| 290 | |
| 291 | /* Assume a 4GB ram_top if not defined */ |
| 292 | if (!ram_top) |
| 293 | ram_top = 0x100000000ULL; |
| 294 | |
| 295 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
| 296 | size = bd->bi_dram[i].size; |
| 297 | if (size) { |
| 298 | if (bd->bi_dram[i].start > ram_top) |
| 299 | continue; |
| 300 | |
| 301 | rgn_top = bd->bi_dram[i].start + |
| 302 | bd->bi_dram[i].size; |
| 303 | |
| 304 | if (rgn_top > ram_top) |
| 305 | size -= rgn_top - ram_top; |
| 306 | |
| 307 | lmb_add(bd->bi_dram[i].start, size); |
| 308 | } |
| 309 | } |
| 310 | } |
| 311 | |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 312 | static long lmb_resize_regions(struct alist *lmb_rgn_lst, |
| 313 | unsigned long idx_start, |
| 314 | phys_addr_t base, phys_size_t size) |
| 315 | { |
| 316 | phys_size_t rgnsize; |
| 317 | unsigned long rgn_cnt, idx, idx_end; |
| 318 | phys_addr_t rgnbase, rgnend; |
| 319 | phys_addr_t mergebase, mergeend; |
| 320 | struct lmb_region *rgn = lmb_rgn_lst->data; |
| 321 | |
| 322 | rgn_cnt = 0; |
| 323 | idx = idx_start; |
| 324 | idx_end = idx_start; |
| 325 | |
| 326 | /* |
| 327 | * First thing to do is to identify how many regions |
| 328 | * the requested region overlaps. |
| 329 | * If the flags match, combine all these overlapping |
| 330 | * regions into a single region, and remove the merged |
| 331 | * regions. |
| 332 | */ |
| 333 | while (idx <= lmb_rgn_lst->count - 1) { |
| 334 | rgnbase = rgn[idx].base; |
| 335 | rgnsize = rgn[idx].size; |
| 336 | |
| 337 | if (lmb_addrs_overlap(base, size, rgnbase, |
| 338 | rgnsize)) { |
| 339 | if (rgn[idx].flags != LMB_NONE) |
| 340 | return -1; |
| 341 | rgn_cnt++; |
| 342 | idx_end = idx; |
| 343 | } |
| 344 | idx++; |
| 345 | } |
| 346 | |
| 347 | /* The merged region's base and size */ |
| 348 | rgnbase = rgn[idx_start].base; |
| 349 | mergebase = min(base, rgnbase); |
| 350 | rgnend = rgn[idx_end].base + rgn[idx_end].size; |
| 351 | mergeend = max(rgnend, (base + size)); |
| 352 | |
| 353 | rgn[idx_start].base = mergebase; |
| 354 | rgn[idx_start].size = mergeend - mergebase; |
| 355 | |
| 356 | /* Now remove the merged regions */ |
| 357 | while (--rgn_cnt) |
| 358 | lmb_remove_region(lmb_rgn_lst, idx_start + 1); |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 363 | /** |
| 364 | * lmb_add_region_flags() - Add an lmb region to the given list |
| 365 | * @lmb_rgn_lst: LMB list to which region is to be added(free/used) |
| 366 | * @base: Start address of the region |
| 367 | * @size: Size of the region to be added |
| 368 | * @flags: Attributes of the LMB region |
| 369 | * |
| 370 | * Add a region of memory to the list. If the region does not exist, add |
| 371 | * it to the list. Depending on the attributes of the region to be added, |
| 372 | * the function might resize an already existing region or coalesce two |
| 373 | * adjacent regions. |
| 374 | * |
| 375 | * |
| 376 | * Returns: 0 if the region addition successful, -1 on failure |
| 377 | */ |
| 378 | static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 379 | phys_size_t size, enum lmb_flags flags) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 380 | { |
| 381 | unsigned long coalesced = 0; |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 382 | long ret, i; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 383 | struct lmb_region *rgn = lmb_rgn_lst->data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 384 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 385 | if (alist_err(lmb_rgn_lst)) |
| 386 | return -1; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 387 | |
| 388 | /* First try and coalesce this LMB with another. */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 389 | for (i = 0; i < lmb_rgn_lst->count; i++) { |
| 390 | phys_addr_t rgnbase = rgn[i].base; |
| 391 | phys_size_t rgnsize = rgn[i].size; |
| 392 | phys_size_t rgnflags = rgn[i].flags; |
Sjoerd Simons | 0d91c88 | 2023-02-12 16:07:05 +0100 | [diff] [blame] | 393 | phys_addr_t end = base + size - 1; |
| 394 | phys_addr_t rgnend = rgnbase + rgnsize - 1; |
Sjoerd Simons | 0d91c88 | 2023-02-12 16:07:05 +0100 | [diff] [blame] | 395 | if (rgnbase <= base && end <= rgnend) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 396 | if (flags == rgnflags) |
| 397 | /* Already have this region, so we're done */ |
| 398 | return 0; |
| 399 | else |
| 400 | return -1; /* regions with new flags */ |
| 401 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 402 | |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 403 | ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
| 404 | if (ret > 0) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 405 | if (flags != rgnflags) |
| 406 | break; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 407 | rgn[i].base -= size; |
| 408 | rgn[i].size += size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 409 | coalesced++; |
| 410 | break; |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 411 | } else if (ret < 0) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 412 | if (flags != rgnflags) |
| 413 | break; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 414 | rgn[i].size += size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 415 | coalesced++; |
| 416 | break; |
Simon Goldschmidt | 0f7c51a | 2019-01-14 22:38:16 +0100 | [diff] [blame] | 417 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 418 | if (flags == LMB_NONE) { |
| 419 | ret = lmb_resize_regions(lmb_rgn_lst, i, base, |
| 420 | size); |
| 421 | if (ret < 0) |
| 422 | return -1; |
| 423 | |
| 424 | coalesced++; |
| 425 | break; |
| 426 | } else { |
| 427 | return -1; |
| 428 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 429 | } |
| 430 | } |
| 431 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 432 | if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { |
| 433 | rgn = lmb_rgn_lst->data; |
| 434 | if (rgn[i].flags == rgn[i + 1].flags) { |
| 435 | if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { |
| 436 | lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); |
| 437 | coalesced++; |
| 438 | } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { |
| 439 | /* fix overlapping area */ |
| 440 | lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); |
| 441 | coalesced++; |
| 442 | } |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 443 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | if (coalesced) |
| 447 | return coalesced; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 448 | |
| 449 | if (alist_full(lmb_rgn_lst) && |
| 450 | !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc)) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 451 | return -1; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 452 | rgn = lmb_rgn_lst->data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 453 | |
| 454 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 455 | for (i = lmb_rgn_lst->count; i >= 0; i--) { |
| 456 | if (i && base < rgn[i - 1].base) { |
| 457 | rgn[i] = rgn[i - 1]; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 458 | } else { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 459 | rgn[i].base = base; |
| 460 | rgn[i].size = size; |
| 461 | rgn[i].flags = flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 462 | break; |
| 463 | } |
| 464 | } |
| 465 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 466 | lmb_rgn_lst->count++; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 467 | |
| 468 | return 0; |
| 469 | } |
| 470 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 471 | static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base, |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 472 | phys_size_t size) |
| 473 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 474 | return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE); |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 475 | } |
| 476 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 477 | /* This routine may be called with relocation disabled. */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 478 | long lmb_add(phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 479 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 480 | struct alist *lmb_rgn_lst = &lmb.free_mem; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 481 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 482 | return lmb_add_region(lmb_rgn_lst, base, size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 483 | } |
| 484 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 485 | long lmb_free(phys_addr_t base, phys_size_t size) |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 486 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 487 | struct lmb_region *rgn; |
| 488 | struct alist *lmb_rgn_lst = &lmb.used_mem; |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 489 | phys_addr_t rgnbegin, rgnend; |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 490 | phys_addr_t end = base + size - 1; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 491 | int i; |
| 492 | |
| 493 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 494 | rgn = lmb_rgn_lst->data; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 495 | /* Find the region where (base, size) belongs to */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 496 | for (i = 0; i < lmb_rgn_lst->count; i++) { |
| 497 | rgnbegin = rgn[i].base; |
| 498 | rgnend = rgnbegin + rgn[i].size - 1; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 499 | |
| 500 | if ((rgnbegin <= base) && (end <= rgnend)) |
| 501 | break; |
| 502 | } |
| 503 | |
| 504 | /* Didn't find the region */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 505 | if (i == lmb_rgn_lst->count) |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 506 | return -1; |
| 507 | |
| 508 | /* Check to see if we are removing entire region */ |
| 509 | if ((rgnbegin == base) && (rgnend == end)) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 510 | lmb_remove_region(lmb_rgn_lst, i); |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 511 | return 0; |
| 512 | } |
| 513 | |
| 514 | /* Check to see if region is matching at the front */ |
| 515 | if (rgnbegin == base) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 516 | rgn[i].base = end + 1; |
| 517 | rgn[i].size -= size; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | /* Check to see if the region is matching at the end */ |
| 522 | if (rgnend == end) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 523 | rgn[i].size -= size; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | /* |
| 528 | * We need to split the entry - adjust the current one to the |
| 529 | * beginging of the hole and add the region after hole. |
| 530 | */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 531 | rgn[i].size = base - rgn[i].base; |
| 532 | return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end, |
| 533 | rgn[i].flags); |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 534 | } |
| 535 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 536 | long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags) |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 537 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 538 | struct alist *lmb_rgn_lst = &lmb.used_mem; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 539 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 540 | return lmb_add_region_flags(lmb_rgn_lst, base, size, flags); |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 541 | } |
| 542 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 543 | long lmb_reserve(phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 544 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 545 | return lmb_reserve_flags(base, size, LMB_NONE); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 546 | } |
| 547 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 548 | static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base, |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 549 | phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 550 | { |
| 551 | unsigned long i; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 552 | struct lmb_region *rgn = lmb_rgn_lst->data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 553 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 554 | for (i = 0; i < lmb_rgn_lst->count; i++) { |
| 555 | phys_addr_t rgnbase = rgn[i].base; |
| 556 | phys_size_t rgnsize = rgn[i].size; |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 557 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 558 | break; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 559 | } |
| 560 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 561 | return (i < lmb_rgn_lst->count) ? i : -1; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 562 | } |
| 563 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 564 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 565 | { |
| 566 | return addr & ~(size - 1); |
| 567 | } |
| 568 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 569 | static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align, |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 570 | phys_addr_t max_addr, enum lmb_flags flags) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 571 | { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 572 | long i, rgn; |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 573 | phys_addr_t base = 0; |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 574 | phys_addr_t res_base; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 575 | struct lmb_region *lmb_used = lmb.used_mem.data; |
| 576 | struct lmb_region *lmb_memory = lmb.free_mem.data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 577 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 578 | for (i = lmb.free_mem.count - 1; i >= 0; i--) { |
| 579 | phys_addr_t lmbbase = lmb_memory[i].base; |
| 580 | phys_size_t lmbsize = lmb_memory[i].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 581 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 582 | if (lmbsize < size) |
| 583 | continue; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 584 | if (max_addr == LMB_ALLOC_ANYWHERE) |
| 585 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
| 586 | else if (lmbbase < max_addr) { |
Stephen Warren | ad3fda5 | 2014-07-31 13:40:07 -0600 | [diff] [blame] | 587 | base = lmbbase + lmbsize; |
| 588 | if (base < lmbbase) |
| 589 | base = -1; |
| 590 | base = min(base, max_addr); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 591 | base = lmb_align_down(base - size, align); |
| 592 | } else |
| 593 | continue; |
| 594 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 595 | while (base && lmbbase <= base) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 596 | rgn = lmb_overlaps_region(&lmb.used_mem, base, size); |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 597 | if (rgn < 0) { |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 598 | /* This area isn't reserved, take it */ |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 599 | if (lmb_add_region_flags(&lmb.used_mem, base, |
| 600 | size, flags) < 0) |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 601 | return 0; |
| 602 | return base; |
| 603 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 604 | |
| 605 | res_base = lmb_used[rgn].base; |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 606 | if (res_base < size) |
| 607 | break; |
| 608 | base = lmb_align_down(res_base - size, align); |
| 609 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 610 | } |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 611 | return 0; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 612 | } |
| 613 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 614 | phys_addr_t lmb_alloc(phys_size_t size, ulong align) |
Sughosh Ganu | 3d679ae | 2024-08-26 17:29:16 +0530 | [diff] [blame] | 615 | { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 616 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); |
Sughosh Ganu | 3d679ae | 2024-08-26 17:29:16 +0530 | [diff] [blame] | 617 | } |
| 618 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 619 | phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr) |
Sughosh Ganu | 3d679ae | 2024-08-26 17:29:16 +0530 | [diff] [blame] | 620 | { |
| 621 | phys_addr_t alloc; |
| 622 | |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 623 | alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE); |
Sughosh Ganu | 3d679ae | 2024-08-26 17:29:16 +0530 | [diff] [blame] | 624 | |
| 625 | if (alloc == 0) |
| 626 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", |
| 627 | (ulong)size, (ulong)max_addr); |
| 628 | |
| 629 | return alloc; |
| 630 | } |
| 631 | |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 632 | static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size, |
| 633 | enum lmb_flags flags) |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 634 | { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 635 | long rgn; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 636 | struct lmb_region *lmb_memory = lmb.free_mem.data; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 637 | |
| 638 | /* Check if the requested address is in one of the memory regions */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 639 | rgn = lmb_overlaps_region(&lmb.free_mem, base, size); |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 640 | if (rgn >= 0) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 641 | /* |
| 642 | * Check if the requested end address is in the same memory |
| 643 | * region we found. |
| 644 | */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 645 | if (lmb_addrs_overlap(lmb_memory[rgn].base, |
| 646 | lmb_memory[rgn].size, |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 647 | base + size - 1, 1)) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 648 | /* ok, reserve the memory */ |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 649 | if (lmb_reserve_flags(base, size, flags) >= 0) |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 650 | return base; |
| 651 | } |
| 652 | } |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 653 | |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 654 | return 0; |
| 655 | } |
| 656 | |
Sughosh Ganu | 5e9553c | 2024-08-26 17:29:19 +0530 | [diff] [blame] | 657 | /* |
| 658 | * Try to allocate a specific address range: must be in defined memory but not |
| 659 | * reserved |
| 660 | */ |
| 661 | phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size) |
| 662 | { |
| 663 | return __lmb_alloc_addr(base, size, LMB_NONE); |
| 664 | } |
| 665 | |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 666 | /* Return number of bytes from a given address that are free */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 667 | phys_size_t lmb_get_free_size(phys_addr_t addr) |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 668 | { |
| 669 | int i; |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 670 | long rgn; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 671 | struct lmb_region *lmb_used = lmb.used_mem.data; |
| 672 | struct lmb_region *lmb_memory = lmb.free_mem.data; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 673 | |
| 674 | /* check if the requested address is in the memory regions */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 675 | rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1); |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 676 | if (rgn >= 0) { |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 677 | for (i = 0; i < lmb.used_mem.count; i++) { |
| 678 | if (addr < lmb_used[i].base) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 679 | /* first reserved range > requested address */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 680 | return lmb_used[i].base - addr; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 681 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 682 | if (lmb_used[i].base + |
| 683 | lmb_used[i].size > addr) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 684 | /* requested addr is in this reserved range */ |
| 685 | return 0; |
| 686 | } |
| 687 | } |
| 688 | /* if we come here: no reserved ranges above requested addr */ |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 689 | return lmb_memory[lmb.free_mem.count - 1].base + |
| 690 | lmb_memory[lmb.free_mem.count - 1].size - addr; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 691 | } |
| 692 | return 0; |
| 693 | } |
| 694 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 695 | int lmb_is_reserved_flags(phys_addr_t addr, int flags) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 696 | { |
| 697 | int i; |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 698 | struct lmb_region *lmb_used = lmb.used_mem.data; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 699 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 700 | for (i = 0; i < lmb.used_mem.count; i++) { |
| 701 | phys_addr_t upper = lmb_used[i].base + |
| 702 | lmb_used[i].size - 1; |
| 703 | if (addr >= lmb_used[i].base && addr <= upper) |
| 704 | return (lmb_used[i].flags & flags) == flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 705 | } |
| 706 | return 0; |
| 707 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 708 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 709 | __weak void board_lmb_reserve(void) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 710 | { |
| 711 | /* please define platform specific board_lmb_reserve() */ |
| 712 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 713 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 714 | __weak void arch_lmb_reserve(void) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 715 | { |
| 716 | /* please define platform specific arch_lmb_reserve() */ |
| 717 | } |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 718 | |
| 719 | static int lmb_setup(void) |
| 720 | { |
| 721 | bool ret; |
| 722 | |
| 723 | ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region), |
| 724 | (uint)LMB_ALIST_INITIAL_SIZE); |
| 725 | if (!ret) { |
| 726 | log_debug("Unable to initialise the list for LMB free memory\n"); |
| 727 | return -ENOMEM; |
| 728 | } |
| 729 | |
| 730 | ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), |
| 731 | (uint)LMB_ALIST_INITIAL_SIZE); |
| 732 | if (!ret) { |
| 733 | log_debug("Unable to initialise the list for LMB used memory\n"); |
| 734 | return -ENOMEM; |
| 735 | } |
| 736 | |
| 737 | return 0; |
| 738 | } |
| 739 | |
| 740 | /** |
| 741 | * lmb_init() - Initialise the LMB module |
| 742 | * |
| 743 | * Initialise the LMB lists needed for keeping the memory map. There |
| 744 | * are two lists, in form of alloced list data structure. One for the |
| 745 | * available memory, and one for the used memory. Initialise the two |
| 746 | * lists as part of board init. Add memory to the available memory |
| 747 | * list and reserve common areas by adding them to the used memory |
| 748 | * list. |
| 749 | * |
| 750 | * Return: 0 on success, -ve on error |
| 751 | */ |
| 752 | int lmb_init(void) |
| 753 | { |
| 754 | int ret; |
| 755 | |
| 756 | ret = lmb_setup(); |
| 757 | if (ret) { |
| 758 | log_info("Unable to init LMB\n"); |
| 759 | return ret; |
| 760 | } |
| 761 | |
Sughosh Ganu | 8a9fc30 | 2024-08-26 17:29:23 +0530 | [diff] [blame] | 762 | lmb_add_memory(); |
| 763 | |
Sughosh Ganu | f4fb154 | 2024-08-26 17:29:24 +0530 | [diff] [blame^] | 764 | /* Reserve the U-Boot image region once U-Boot has relocated */ |
| 765 | if (spl_phase() == PHASE_SPL) |
| 766 | lmb_reserve_common_spl(); |
| 767 | else if (spl_phase() == PHASE_BOARD_R) |
| 768 | lmb_reserve_common((void *)gd->fdt_blob); |
| 769 | |
Sughosh Ganu | ed17a33 | 2024-08-26 17:29:18 +0530 | [diff] [blame] | 770 | return 0; |
| 771 | } |
| 772 | |
| 773 | #if CONFIG_IS_ENABLED(UNIT_TEST) |
| 774 | struct lmb *lmb_get(void) |
| 775 | { |
| 776 | return &lmb; |
| 777 | } |
| 778 | |
| 779 | int lmb_push(struct lmb *store) |
| 780 | { |
| 781 | int ret; |
| 782 | |
| 783 | *store = lmb; |
| 784 | ret = lmb_setup(); |
| 785 | if (ret) |
| 786 | return ret; |
| 787 | |
| 788 | return 0; |
| 789 | } |
| 790 | |
| 791 | void lmb_pop(struct lmb *store) |
| 792 | { |
| 793 | alist_uninit(&lmb.free_mem); |
| 794 | alist_uninit(&lmb.used_mem); |
| 795 | lmb = *store; |
| 796 | } |
| 797 | #endif /* UNIT_TEST */ |