Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Procedures for maintaining information about logical memory blocks. |
| 4 | * |
| 5 | * Peter Bergner, IBM Corp. June 2001. |
| 6 | * Copyright (C) 2001 Peter Bergner. |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <common.h> |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 10 | #include <efi_loader.h> |
Simon Glass | 4d72caa | 2020-05-10 11:40:01 -0600 | [diff] [blame] | 11 | #include <image.h> |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 12 | #include <mapmem.h> |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 13 | #include <lmb.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 14 | #include <log.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 15 | #include <malloc.h> |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 16 | |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 17 | #include <asm/global_data.h> |
Marek Vasut | bd994c0 | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 18 | #include <asm/sections.h> |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 19 | |
| 20 | DECLARE_GLOBAL_DATA_PTR; |
| 21 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 22 | #define LMB_ALLOC_ANYWHERE 0 |
| 23 | |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 24 | static void lmb_dump_region(struct lmb_region *rgn, char *name) |
| 25 | { |
| 26 | unsigned long long base, size, end; |
| 27 | enum lmb_flags flags; |
| 28 | int i; |
| 29 | |
| 30 | printf(" %s.cnt = 0x%lx\n", name, rgn->cnt); |
| 31 | |
| 32 | for (i = 0; i < rgn->cnt; i++) { |
| 33 | base = rgn->region[i].base; |
| 34 | size = rgn->region[i].size; |
| 35 | end = base + size - 1; |
| 36 | flags = rgn->region[i].flags; |
| 37 | |
| 38 | printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", |
| 39 | name, i, base, end, size, flags); |
| 40 | } |
| 41 | } |
| 42 | |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 43 | void lmb_dump_all_force(struct lmb *lmb) |
| 44 | { |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 45 | printf("lmb_dump_all:\n"); |
Patrick Delaunay | 358c778 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 46 | lmb_dump_region(&lmb->memory, "memory"); |
| 47 | lmb_dump_region(&lmb->reserved, "reserved"); |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 48 | } |
| 49 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 50 | void lmb_dump_all(struct lmb *lmb) |
| 51 | { |
| 52 | #ifdef DEBUG |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 53 | lmb_dump_all_force(lmb); |
| 54 | #endif |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 55 | } |
| 56 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 57 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, |
| 58 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 59 | { |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 60 | const phys_addr_t base1_end = base1 + size1 - 1; |
| 61 | const phys_addr_t base2_end = base2 + size2 - 1; |
| 62 | |
| 63 | return ((base1 <= base2_end) && (base2 <= base1_end)); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 64 | } |
| 65 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 66 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 67 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 68 | { |
| 69 | if (base2 == base1 + size1) |
| 70 | return 1; |
| 71 | else if (base1 == base2 + size2) |
| 72 | return -1; |
| 73 | |
| 74 | return 0; |
| 75 | } |
| 76 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 77 | static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, |
| 78 | unsigned long r2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 79 | { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 80 | phys_addr_t base1 = rgn->region[r1].base; |
| 81 | phys_size_t size1 = rgn->region[r1].size; |
| 82 | phys_addr_t base2 = rgn->region[r2].base; |
| 83 | phys_size_t size2 = rgn->region[r2].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 84 | |
| 85 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
| 86 | } |
| 87 | |
| 88 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
| 89 | { |
| 90 | unsigned long i; |
| 91 | |
| 92 | for (i = r; i < rgn->cnt - 1; i++) { |
| 93 | rgn->region[i].base = rgn->region[i + 1].base; |
| 94 | rgn->region[i].size = rgn->region[i + 1].size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 95 | rgn->region[i].flags = rgn->region[i + 1].flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 96 | } |
| 97 | rgn->cnt--; |
| 98 | } |
| 99 | |
| 100 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 101 | static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, |
| 102 | unsigned long r2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 103 | { |
| 104 | rgn->region[r1].size += rgn->region[r2].size; |
| 105 | lmb_remove_region(rgn, r2); |
| 106 | } |
| 107 | |
| 108 | void lmb_init(struct lmb *lmb) |
| 109 | { |
Patrick Delaunay | 6d66502 | 2021-03-10 10:16:31 +0100 | [diff] [blame] | 110 | #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS) |
Patrick Delaunay | 4fa0150 | 2021-03-10 10:16:28 +0100 | [diff] [blame] | 111 | lmb->memory.max = CONFIG_LMB_MAX_REGIONS; |
| 112 | lmb->reserved.max = CONFIG_LMB_MAX_REGIONS; |
Patrice Chotard | 5e2548c | 2022-08-02 10:21:35 +0200 | [diff] [blame] | 113 | #elif defined(CONFIG_LMB_MEMORY_REGIONS) |
Patrick Delaunay | 6d66502 | 2021-03-10 10:16:31 +0100 | [diff] [blame] | 114 | lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS; |
| 115 | lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS; |
| 116 | lmb->memory.region = lmb->memory_regions; |
| 117 | lmb->reserved.region = lmb->reserved_regions; |
| 118 | #endif |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 119 | lmb->memory.cnt = 0; |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 120 | lmb->reserved.cnt = 0; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 121 | } |
| 122 | |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 123 | void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) |
| 124 | { |
| 125 | ulong bank_end; |
| 126 | int bank; |
| 127 | |
| 128 | /* |
| 129 | * Reserve memory from aligned address below the bottom of U-Boot stack |
| 130 | * until end of U-Boot area using LMB to prevent U-Boot from overwriting |
| 131 | * that memory. |
| 132 | */ |
| 133 | debug("## Current stack ends at 0x%08lx ", sp); |
| 134 | |
| 135 | /* adjust sp by 4K to be safe */ |
| 136 | sp -= align; |
| 137 | for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { |
| 138 | if (!gd->bd->bi_dram[bank].size || |
| 139 | sp < gd->bd->bi_dram[bank].start) |
| 140 | continue; |
| 141 | /* Watch out for RAM at end of address space! */ |
| 142 | bank_end = gd->bd->bi_dram[bank].start + |
| 143 | gd->bd->bi_dram[bank].size - 1; |
| 144 | if (sp > bank_end) |
| 145 | continue; |
| 146 | if (bank_end > end) |
| 147 | bank_end = end - 1; |
| 148 | |
| 149 | lmb_reserve(lmb, sp, bank_end - sp + 1); |
Marek Vasut | bd994c0 | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 150 | |
| 151 | if (gd->flags & GD_FLG_SKIP_RELOC) |
| 152 | lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len); |
| 153 | |
Marek Vasut | 1274698 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 154 | break; |
| 155 | } |
| 156 | } |
| 157 | |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 158 | /** |
| 159 | * efi_lmb_reserve() - add reservations for EFI memory |
| 160 | * |
| 161 | * Add reservations for all EFI memory areas that are not |
| 162 | * EFI_CONVENTIONAL_MEMORY. |
| 163 | * |
| 164 | * @lmb: lmb environment |
| 165 | * Return: 0 on success, 1 on failure |
| 166 | */ |
| 167 | static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) |
| 168 | { |
| 169 | struct efi_mem_desc *memmap = NULL, *map; |
| 170 | efi_uintn_t i, map_size = 0; |
| 171 | efi_status_t ret; |
| 172 | |
| 173 | ret = efi_get_memory_map_alloc(&map_size, &memmap); |
| 174 | if (ret != EFI_SUCCESS) |
| 175 | return 1; |
| 176 | |
| 177 | for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) { |
| 178 | if (map->type != EFI_CONVENTIONAL_MEMORY) |
| 179 | lmb_reserve(lmb, |
| 180 | map_to_sysmem((void *)(uintptr_t) |
| 181 | map->physical_start), |
| 182 | map->num_pages * EFI_PAGE_SIZE); |
| 183 | } |
| 184 | efi_free_pool(memmap); |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 189 | static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob) |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 190 | { |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 191 | arch_lmb_reserve(lmb); |
| 192 | board_lmb_reserve(lmb); |
| 193 | |
Simon Glass | 0c303f9 | 2021-09-25 19:43:21 -0600 | [diff] [blame] | 194 | if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 195 | boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); |
Heinrich Schuchardt | 06d514d | 2023-01-04 01:36:14 +0100 | [diff] [blame] | 196 | |
| 197 | if (CONFIG_IS_ENABLED(EFI_LOADER)) |
| 198 | efi_lmb_reserve(lmb); |
Simon Goldschmidt | aa3c609 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 199 | } |
| 200 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 201 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
Masahiro Yamada | b75d8dc | 2020-06-26 15:13:33 +0900 | [diff] [blame] | 202 | void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob) |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 203 | { |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 204 | int i; |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 205 | |
| 206 | lmb_init(lmb); |
Stefan Roese | dfaf6a5 | 2020-08-12 11:55:46 +0200 | [diff] [blame] | 207 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 208 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
| 209 | if (bd->bi_dram[i].size) { |
| 210 | lmb_add(lmb, bd->bi_dram[i].start, |
| 211 | bd->bi_dram[i].size); |
| 212 | } |
| 213 | } |
Stefan Roese | dfaf6a5 | 2020-08-12 11:55:46 +0200 | [diff] [blame] | 214 | |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 215 | lmb_reserve_common(lmb, fdt_blob); |
| 216 | } |
| 217 | |
| 218 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
| 219 | void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, |
| 220 | phys_size_t size, void *fdt_blob) |
| 221 | { |
| 222 | lmb_init(lmb); |
| 223 | lmb_add(lmb, base, size); |
| 224 | lmb_reserve_common(lmb, fdt_blob); |
| 225 | } |
| 226 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 227 | /* This routine called with relocation disabled. */ |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 228 | static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, |
| 229 | phys_size_t size, enum lmb_flags flags) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 230 | { |
| 231 | unsigned long coalesced = 0; |
| 232 | long adjacent, i; |
| 233 | |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 234 | if (rgn->cnt == 0) { |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 235 | rgn->region[0].base = base; |
| 236 | rgn->region[0].size = size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 237 | rgn->region[0].flags = flags; |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 238 | rgn->cnt = 1; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | /* First try and coalesce this LMB with another. */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 243 | for (i = 0; i < rgn->cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 244 | phys_addr_t rgnbase = rgn->region[i].base; |
| 245 | phys_size_t rgnsize = rgn->region[i].size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 246 | phys_size_t rgnflags = rgn->region[i].flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 247 | |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 248 | if (rgnbase == base && rgnsize == size) { |
| 249 | if (flags == rgnflags) |
| 250 | /* Already have this region, so we're done */ |
| 251 | return 0; |
| 252 | else |
| 253 | return -1; /* regions with new flags */ |
| 254 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 255 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 256 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
| 257 | if (adjacent > 0) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 258 | if (flags != rgnflags) |
| 259 | break; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 260 | rgn->region[i].base -= size; |
| 261 | rgn->region[i].size += size; |
| 262 | coalesced++; |
| 263 | break; |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 264 | } else if (adjacent < 0) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 265 | if (flags != rgnflags) |
| 266 | break; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 267 | rgn->region[i].size += size; |
| 268 | coalesced++; |
| 269 | break; |
Simon Goldschmidt | 0f7c51a | 2019-01-14 22:38:16 +0100 | [diff] [blame] | 270 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { |
| 271 | /* regions overlap */ |
| 272 | return -1; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 273 | } |
| 274 | } |
| 275 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 276 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 277 | if (rgn->region[i].flags == rgn->region[i + 1].flags) { |
| 278 | lmb_coalesce_regions(rgn, i, i + 1); |
| 279 | coalesced++; |
| 280 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | if (coalesced) |
| 284 | return coalesced; |
Patrick Delaunay | 00fd8da | 2021-03-10 10:16:27 +0100 | [diff] [blame] | 285 | if (rgn->cnt >= rgn->max) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 286 | return -1; |
| 287 | |
| 288 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
| 289 | for (i = rgn->cnt-1; i >= 0; i--) { |
| 290 | if (base < rgn->region[i].base) { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 291 | rgn->region[i + 1].base = rgn->region[i].base; |
| 292 | rgn->region[i + 1].size = rgn->region[i].size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 293 | rgn->region[i + 1].flags = rgn->region[i].flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 294 | } else { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 295 | rgn->region[i + 1].base = base; |
| 296 | rgn->region[i + 1].size = size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 297 | rgn->region[i + 1].flags = flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 298 | break; |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | if (base < rgn->region[0].base) { |
| 303 | rgn->region[0].base = base; |
| 304 | rgn->region[0].size = size; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 305 | rgn->region[0].flags = flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | rgn->cnt++; |
| 309 | |
| 310 | return 0; |
| 311 | } |
| 312 | |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 313 | static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, |
| 314 | phys_size_t size) |
| 315 | { |
| 316 | return lmb_add_region_flags(rgn, base, size, LMB_NONE); |
| 317 | } |
| 318 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 319 | /* This routine may be called with relocation disabled. */ |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 320 | long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 321 | { |
| 322 | struct lmb_region *_rgn = &(lmb->memory); |
| 323 | |
| 324 | return lmb_add_region(_rgn, base, size); |
| 325 | } |
| 326 | |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 327 | long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 328 | { |
| 329 | struct lmb_region *rgn = &(lmb->reserved); |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 330 | phys_addr_t rgnbegin, rgnend; |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 331 | phys_addr_t end = base + size - 1; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 332 | int i; |
| 333 | |
| 334 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
| 335 | |
| 336 | /* Find the region where (base, size) belongs to */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 337 | for (i = 0; i < rgn->cnt; i++) { |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 338 | rgnbegin = rgn->region[i].base; |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 339 | rgnend = rgnbegin + rgn->region[i].size - 1; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 340 | |
| 341 | if ((rgnbegin <= base) && (end <= rgnend)) |
| 342 | break; |
| 343 | } |
| 344 | |
| 345 | /* Didn't find the region */ |
| 346 | if (i == rgn->cnt) |
| 347 | return -1; |
| 348 | |
| 349 | /* Check to see if we are removing entire region */ |
| 350 | if ((rgnbegin == base) && (rgnend == end)) { |
| 351 | lmb_remove_region(rgn, i); |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | /* Check to see if region is matching at the front */ |
| 356 | if (rgnbegin == base) { |
Simon Goldschmidt | d67f33c | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 357 | rgn->region[i].base = end + 1; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 358 | rgn->region[i].size -= size; |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | /* Check to see if the region is matching at the end */ |
| 363 | if (rgnend == end) { |
| 364 | rgn->region[i].size -= size; |
| 365 | return 0; |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * We need to split the entry - adjust the current one to the |
| 370 | * beginging of the hole and add the region after hole. |
| 371 | */ |
| 372 | rgn->region[i].size = base - rgn->region[i].base; |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 373 | return lmb_add_region_flags(rgn, end + 1, rgnend - end, |
| 374 | rgn->region[i].flags); |
| 375 | } |
| 376 | |
| 377 | long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size, |
| 378 | enum lmb_flags flags) |
| 379 | { |
| 380 | struct lmb_region *_rgn = &(lmb->reserved); |
| 381 | |
| 382 | return lmb_add_region_flags(_rgn, base, size, flags); |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 383 | } |
| 384 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 385 | long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 386 | { |
Patrick Delaunay | 59c0ea5 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 387 | return lmb_reserve_flags(lmb, base, size, LMB_NONE); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 388 | } |
| 389 | |
Jeroen Hofstee | 750a6ff | 2014-10-08 22:57:39 +0200 | [diff] [blame] | 390 | static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 391 | phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 392 | { |
| 393 | unsigned long i; |
| 394 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 395 | for (i = 0; i < rgn->cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 396 | phys_addr_t rgnbase = rgn->region[i].base; |
| 397 | phys_size_t rgnsize = rgn->region[i].size; |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 398 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 399 | break; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | return (i < rgn->cnt) ? i : -1; |
| 403 | } |
| 404 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 405 | phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 406 | { |
| 407 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); |
| 408 | } |
| 409 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 410 | phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 411 | { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 412 | phys_addr_t alloc; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 413 | |
| 414 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); |
| 415 | |
| 416 | if (alloc == 0) |
| 417 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 418 | (ulong)size, (ulong)max_addr); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 419 | |
| 420 | return alloc; |
| 421 | } |
| 422 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 423 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 424 | { |
| 425 | return addr & ~(size - 1); |
| 426 | } |
| 427 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 428 | phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 429 | { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 430 | long i, rgn; |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 431 | phys_addr_t base = 0; |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 432 | phys_addr_t res_base; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 433 | |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 434 | for (i = lmb->memory.cnt - 1; i >= 0; i--) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 435 | phys_addr_t lmbbase = lmb->memory.region[i].base; |
| 436 | phys_size_t lmbsize = lmb->memory.region[i].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 437 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 438 | if (lmbsize < size) |
| 439 | continue; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 440 | if (max_addr == LMB_ALLOC_ANYWHERE) |
| 441 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
| 442 | else if (lmbbase < max_addr) { |
Stephen Warren | ad3fda5 | 2014-07-31 13:40:07 -0600 | [diff] [blame] | 443 | base = lmbbase + lmbsize; |
| 444 | if (base < lmbbase) |
| 445 | base = -1; |
| 446 | base = min(base, max_addr); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 447 | base = lmb_align_down(base - size, align); |
| 448 | } else |
| 449 | continue; |
| 450 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 451 | while (base && lmbbase <= base) { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 452 | rgn = lmb_overlaps_region(&lmb->reserved, base, size); |
| 453 | if (rgn < 0) { |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 454 | /* This area isn't reserved, take it */ |
| 455 | if (lmb_add_region(&lmb->reserved, base, |
Simon Goldschmidt | 0f7c51a | 2019-01-14 22:38:16 +0100 | [diff] [blame] | 456 | size) < 0) |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 457 | return 0; |
| 458 | return base; |
| 459 | } |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 460 | res_base = lmb->reserved.region[rgn].base; |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 461 | if (res_base < size) |
| 462 | break; |
| 463 | base = lmb_align_down(res_base - size, align); |
| 464 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 465 | } |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 466 | return 0; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 467 | } |
| 468 | |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 469 | /* |
| 470 | * Try to allocate a specific address range: must be in defined memory but not |
| 471 | * reserved |
| 472 | */ |
| 473 | phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
| 474 | { |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 475 | long rgn; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 476 | |
| 477 | /* Check if the requested address is in one of the memory regions */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 478 | rgn = lmb_overlaps_region(&lmb->memory, base, size); |
| 479 | if (rgn >= 0) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 480 | /* |
| 481 | * Check if the requested end address is in the same memory |
| 482 | * region we found. |
| 483 | */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 484 | if (lmb_addrs_overlap(lmb->memory.region[rgn].base, |
| 485 | lmb->memory.region[rgn].size, |
| 486 | base + size - 1, 1)) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 487 | /* ok, reserve the memory */ |
| 488 | if (lmb_reserve(lmb, base, size) >= 0) |
| 489 | return base; |
| 490 | } |
| 491 | } |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | /* Return number of bytes from a given address that are free */ |
Simon Goldschmidt | 65304aa | 2019-01-21 20:29:55 +0100 | [diff] [blame] | 496 | phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 497 | { |
| 498 | int i; |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 499 | long rgn; |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 500 | |
| 501 | /* check if the requested address is in the memory regions */ |
Simon Goldschmidt | e35d2a7 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 502 | rgn = lmb_overlaps_region(&lmb->memory, addr, 1); |
| 503 | if (rgn >= 0) { |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 504 | for (i = 0; i < lmb->reserved.cnt; i++) { |
| 505 | if (addr < lmb->reserved.region[i].base) { |
| 506 | /* first reserved range > requested address */ |
| 507 | return lmb->reserved.region[i].base - addr; |
| 508 | } |
| 509 | if (lmb->reserved.region[i].base + |
| 510 | lmb->reserved.region[i].size > addr) { |
| 511 | /* requested addr is in this reserved range */ |
| 512 | return 0; |
| 513 | } |
| 514 | } |
| 515 | /* if we come here: no reserved ranges above requested addr */ |
| 516 | return lmb->memory.region[lmb->memory.cnt - 1].base + |
| 517 | lmb->memory.region[lmb->memory.cnt - 1].size - addr; |
| 518 | } |
| 519 | return 0; |
| 520 | } |
| 521 | |
Patrick Delaunay | e359a4a | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 522 | int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 523 | { |
| 524 | int i; |
| 525 | |
| 526 | for (i = 0; i < lmb->reserved.cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 527 | phys_addr_t upper = lmb->reserved.region[i].base + |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 528 | lmb->reserved.region[i].size - 1; |
| 529 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) |
Patrick Delaunay | e359a4a | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 530 | return (lmb->reserved.region[i].flags & flags) == flags; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 531 | } |
| 532 | return 0; |
| 533 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 534 | |
Patrick Delaunay | e359a4a | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 535 | int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) |
| 536 | { |
| 537 | return lmb_is_reserved_flags(lmb, addr, LMB_NONE); |
| 538 | } |
| 539 | |
Jeroen Hofstee | 2c34f3f | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 540 | __weak void board_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 541 | { |
| 542 | /* please define platform specific board_lmb_reserve() */ |
| 543 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 544 | |
Jeroen Hofstee | 2c34f3f | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 545 | __weak void arch_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 546 | { |
| 547 | /* please define platform specific arch_lmb_reserve() */ |
| 548 | } |