blob: 9b1854223d73bb1b68759b45d16c48bea735c85f [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala4ed65522008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala4ed65522008-02-27 21:51:47 -06007 */
8
Sughosh Ganued17a332024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010010#include <efi_loader.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060011#include <image.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010012#include <mapmem.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060013#include <lmb.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060014#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Sughosh Ganuf4fb1542024-08-26 17:29:24 +053016#include <spl.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060017
Marek Vasut12746982021-09-10 22:47:09 +020018#include <asm/global_data.h>
Marek Vasutbd994c02021-11-13 18:34:37 +010019#include <asm/sections.h>
Sughosh Ganued17a332024-08-26 17:29:18 +053020#include <linux/kernel.h>
Marek Vasut12746982021-09-10 22:47:09 +020021
22DECLARE_GLOBAL_DATA_PTR;
23
Kumar Gala4ed65522008-02-27 21:51:47 -060024#define LMB_ALLOC_ANYWHERE 0
Sughosh Ganued17a332024-08-26 17:29:18 +053025#define LMB_ALIST_INITIAL_SIZE 4
Kumar Gala4ed65522008-02-27 21:51:47 -060026
Sughosh Ganued17a332024-08-26 17:29:18 +053027static struct lmb lmb;
28
29static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Patrick Delaunay358c7782021-05-07 14:50:31 +020030{
Sughosh Ganued17a332024-08-26 17:29:18 +053031 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay358c7782021-05-07 14:50:31 +020032 unsigned long long base, size, end;
33 enum lmb_flags flags;
34 int i;
35
Sughosh Ganued17a332024-08-26 17:29:18 +053036 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay358c7782021-05-07 14:50:31 +020037
Sughosh Ganued17a332024-08-26 17:29:18 +053038 for (i = 0; i < lmb_rgn_lst->count; i++) {
39 base = rgn[i].base;
40 size = rgn[i].size;
Patrick Delaunay358c7782021-05-07 14:50:31 +020041 end = base + size - 1;
Sughosh Ganued17a332024-08-26 17:29:18 +053042 flags = rgn[i].flags;
Patrick Delaunay358c7782021-05-07 14:50:31 +020043
44 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
45 name, i, base, end, size, flags);
46 }
47}
48
Sughosh Ganued17a332024-08-26 17:29:18 +053049void lmb_dump_all_force(void)
Tero Kristo9996cea2020-07-20 11:10:45 +030050{
Tero Kristo9996cea2020-07-20 11:10:45 +030051 printf("lmb_dump_all:\n");
Sughosh Ganued17a332024-08-26 17:29:18 +053052 lmb_dump_region(&lmb.free_mem, "memory");
53 lmb_dump_region(&lmb.used_mem, "reserved");
Tero Kristo9996cea2020-07-20 11:10:45 +030054}
55
Sughosh Ganued17a332024-08-26 17:29:18 +053056void lmb_dump_all(void)
Kumar Gala4ed65522008-02-27 21:51:47 -060057{
58#ifdef DEBUG
Sughosh Ganued17a332024-08-26 17:29:18 +053059 lmb_dump_all_force();
Tero Kristo9996cea2020-07-20 11:10:45 +030060#endif
Kumar Gala4ed65522008-02-27 21:51:47 -060061}
62
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010063static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
64 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060065{
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +010066 const phys_addr_t base1_end = base1 + size1 - 1;
67 const phys_addr_t base2_end = base2 + size2 - 1;
68
69 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala4ed65522008-02-27 21:51:47 -060070}
71
Becky Bruce391fd932008-06-09 20:37:18 -050072static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010073 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060074{
75 if (base2 == base1 + size1)
76 return 1;
77 else if (base1 == base2 + size2)
78 return -1;
79
80 return 0;
81}
82
Sughosh Ganued17a332024-08-26 17:29:18 +053083static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumaredb58242023-09-26 16:54:42 +053084 unsigned long r2)
85{
Sughosh Ganued17a332024-08-26 17:29:18 +053086 struct lmb_region *rgn = lmb_rgn_lst->data;
87
88 phys_addr_t base1 = rgn[r1].base;
89 phys_size_t size1 = rgn[r1].size;
90 phys_addr_t base2 = rgn[r2].base;
91 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +053092
93 return lmb_addrs_overlap(base1, size1, base2, size2);
94}
Sughosh Ganued17a332024-08-26 17:29:18 +053095
96static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010097 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -060098{
Sughosh Ganued17a332024-08-26 17:29:18 +053099 struct lmb_region *rgn = lmb_rgn_lst->data;
100
101 phys_addr_t base1 = rgn[r1].base;
102 phys_size_t size1 = rgn[r1].size;
103 phys_addr_t base2 = rgn[r2].base;
104 phys_size_t size2 = rgn[r2].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600105 return lmb_addrs_adjacent(base1, size1, base2, size2);
106}
107
Sughosh Ganued17a332024-08-26 17:29:18 +0530108static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala4ed65522008-02-27 21:51:47 -0600109{
110 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530111 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600112
Sughosh Ganued17a332024-08-26 17:29:18 +0530113 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
114 rgn[i].base = rgn[i + 1].base;
115 rgn[i].size = rgn[i + 1].size;
116 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600117 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530118 lmb_rgn_lst->count--;
Kumar Gala4ed65522008-02-27 21:51:47 -0600119}
120
121/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530122static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100123 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600124{
Sughosh Ganued17a332024-08-26 17:29:18 +0530125 struct lmb_region *rgn = lmb_rgn_lst->data;
126
127 rgn[r1].size += rgn[r2].size;
128 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala4ed65522008-02-27 21:51:47 -0600129}
130
Udit Kumaredb58242023-09-26 16:54:42 +0530131/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganued17a332024-08-26 17:29:18 +0530132static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
133 unsigned long r1, unsigned long r2)
Udit Kumaredb58242023-09-26 16:54:42 +0530134{
Sughosh Ganued17a332024-08-26 17:29:18 +0530135 struct lmb_region *rgn = lmb_rgn_lst->data;
136
137 phys_addr_t base1 = rgn[r1].base;
138 phys_size_t size1 = rgn[r1].size;
139 phys_addr_t base2 = rgn[r2].base;
140 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +0530141
142 if (base1 + size1 > base2 + size2) {
143 printf("This will not be a case any time\n");
144 return;
145 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530146 rgn[r1].size = base2 + size2 - base1;
147 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumaredb58242023-09-26 16:54:42 +0530148}
149
Sughosh Ganued17a332024-08-26 17:29:18 +0530150void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align)
Marek Vasut12746982021-09-10 22:47:09 +0200151{
152 ulong bank_end;
153 int bank;
154
155 /*
156 * Reserve memory from aligned address below the bottom of U-Boot stack
157 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
158 * that memory.
159 */
160 debug("## Current stack ends at 0x%08lx ", sp);
161
162 /* adjust sp by 4K to be safe */
163 sp -= align;
164 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
165 if (!gd->bd->bi_dram[bank].size ||
166 sp < gd->bd->bi_dram[bank].start)
167 continue;
168 /* Watch out for RAM at end of address space! */
169 bank_end = gd->bd->bi_dram[bank].start +
170 gd->bd->bi_dram[bank].size - 1;
171 if (sp > bank_end)
172 continue;
173 if (bank_end > end)
174 bank_end = end - 1;
175
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530176 lmb_reserve_flags(sp, bank_end - sp + 1, LMB_NOOVERWRITE);
Marek Vasutbd994c02021-11-13 18:34:37 +0100177
178 if (gd->flags & GD_FLG_SKIP_RELOC)
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530179 lmb_reserve_flags((phys_addr_t)(uintptr_t)_start,
180 gd->mon_len, LMB_NOOVERWRITE);
Marek Vasutbd994c02021-11-13 18:34:37 +0100181
Marek Vasut12746982021-09-10 22:47:09 +0200182 break;
183 }
184}
185
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100186/**
187 * efi_lmb_reserve() - add reservations for EFI memory
188 *
189 * Add reservations for all EFI memory areas that are not
190 * EFI_CONVENTIONAL_MEMORY.
191 *
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100192 * Return: 0 on success, 1 on failure
193 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530194static __maybe_unused int efi_lmb_reserve(void)
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100195{
196 struct efi_mem_desc *memmap = NULL, *map;
197 efi_uintn_t i, map_size = 0;
198 efi_status_t ret;
199
200 ret = efi_get_memory_map_alloc(&map_size, &memmap);
201 if (ret != EFI_SUCCESS)
202 return 1;
203
204 for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100205 if (map->type != EFI_CONVENTIONAL_MEMORY) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530206 lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t)
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100207 map->physical_start),
208 map->num_pages * EFI_PAGE_SIZE,
209 map->type == EFI_RESERVED_MEMORY_TYPE
210 ? LMB_NOMAP : LMB_NONE);
211 }
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100212 }
213 efi_free_pool(memmap);
214
215 return 0;
216}
217
Sughosh Ganued17a332024-08-26 17:29:18 +0530218static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100219{
Sughosh Ganued17a332024-08-26 17:29:18 +0530220 arch_lmb_reserve();
221 board_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100222
Simon Glass0c303f92021-09-25 19:43:21 -0600223 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganued17a332024-08-26 17:29:18 +0530224 boot_fdt_add_mem_rsv_regions(fdt_blob);
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100225
226 if (CONFIG_IS_ENABLED(EFI_LOADER))
Sughosh Ganued17a332024-08-26 17:29:18 +0530227 efi_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100228}
229
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100230/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530231void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100232{
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100233 int i;
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100234
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100235 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530236 if (bd->bi_dram[i].size)
237 lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100238 }
Stefan Roesedfaf6a52020-08-12 11:55:46 +0200239
Sughosh Ganued17a332024-08-26 17:29:18 +0530240 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100241}
242
243/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530244void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size,
245 void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100246{
Sughosh Ganued17a332024-08-26 17:29:18 +0530247 lmb_add(base, size);
248 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100249}
250
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530251static __maybe_unused void lmb_reserve_common_spl(void)
252{
253 phys_addr_t rsv_start;
254 phys_size_t rsv_size;
255
256 /*
257 * Assume a SPL stack of 16KB. This must be
258 * more than enough for the SPL stage.
259 */
260 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
261 rsv_start = gd->start_addr_sp - 16384;
262 rsv_size = 16384;
263 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
264 }
265
266 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
267 /* Reserve the bss region */
268 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
269 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
270 (phys_addr_t)(uintptr_t)__bss_start;
271 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
272 }
273}
274
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530275/**
276 * lmb_add_memory() - Add memory range for LMB allocations
277 *
278 * Add the entire available memory range to the pool of memory that
279 * can be used by the LMB module for allocations.
280 *
281 * Return: None
282 */
283void lmb_add_memory(void)
284{
285 int i;
286 phys_size_t size;
287 phys_addr_t rgn_top;
288 u64 ram_top = gd->ram_top;
289 struct bd_info *bd = gd->bd;
290
291 /* Assume a 4GB ram_top if not defined */
292 if (!ram_top)
293 ram_top = 0x100000000ULL;
294
295 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
296 size = bd->bi_dram[i].size;
297 if (size) {
298 if (bd->bi_dram[i].start > ram_top)
299 continue;
300
301 rgn_top = bd->bi_dram[i].start +
302 bd->bi_dram[i].size;
303
304 if (rgn_top > ram_top)
305 size -= rgn_top - ram_top;
306
307 lmb_add(bd->bi_dram[i].start, size);
308 }
309 }
310}
311
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530312static long lmb_resize_regions(struct alist *lmb_rgn_lst,
313 unsigned long idx_start,
314 phys_addr_t base, phys_size_t size)
315{
316 phys_size_t rgnsize;
317 unsigned long rgn_cnt, idx, idx_end;
318 phys_addr_t rgnbase, rgnend;
319 phys_addr_t mergebase, mergeend;
320 struct lmb_region *rgn = lmb_rgn_lst->data;
321
322 rgn_cnt = 0;
323 idx = idx_start;
324 idx_end = idx_start;
325
326 /*
327 * First thing to do is to identify how many regions
328 * the requested region overlaps.
329 * If the flags match, combine all these overlapping
330 * regions into a single region, and remove the merged
331 * regions.
332 */
333 while (idx <= lmb_rgn_lst->count - 1) {
334 rgnbase = rgn[idx].base;
335 rgnsize = rgn[idx].size;
336
337 if (lmb_addrs_overlap(base, size, rgnbase,
338 rgnsize)) {
339 if (rgn[idx].flags != LMB_NONE)
340 return -1;
341 rgn_cnt++;
342 idx_end = idx;
343 }
344 idx++;
345 }
346
347 /* The merged region's base and size */
348 rgnbase = rgn[idx_start].base;
349 mergebase = min(base, rgnbase);
350 rgnend = rgn[idx_end].base + rgn[idx_end].size;
351 mergeend = max(rgnend, (base + size));
352
353 rgn[idx_start].base = mergebase;
354 rgn[idx_start].size = mergeend - mergebase;
355
356 /* Now remove the merged regions */
357 while (--rgn_cnt)
358 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
359
360 return 0;
361}
362
Sughosh Ganued17a332024-08-26 17:29:18 +0530363/**
364 * lmb_add_region_flags() - Add an lmb region to the given list
365 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
366 * @base: Start address of the region
367 * @size: Size of the region to be added
368 * @flags: Attributes of the LMB region
369 *
370 * Add a region of memory to the list. If the region does not exist, add
371 * it to the list. Depending on the attributes of the region to be added,
372 * the function might resize an already existing region or coalesce two
373 * adjacent regions.
374 *
375 *
376 * Returns: 0 if the region addition successful, -1 on failure
377 */
378static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200379 phys_size_t size, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600380{
381 unsigned long coalesced = 0;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530382 long ret, i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530383 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600384
Sughosh Ganued17a332024-08-26 17:29:18 +0530385 if (alist_err(lmb_rgn_lst))
386 return -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600387
388 /* First try and coalesce this LMB with another. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530389 for (i = 0; i < lmb_rgn_lst->count; i++) {
390 phys_addr_t rgnbase = rgn[i].base;
391 phys_size_t rgnsize = rgn[i].size;
392 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100393 phys_addr_t end = base + size - 1;
394 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100395 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200396 if (flags == rgnflags)
397 /* Already have this region, so we're done */
398 return 0;
399 else
400 return -1; /* regions with new flags */
401 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600402
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530403 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
404 if (ret > 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200405 if (flags != rgnflags)
406 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530407 rgn[i].base -= size;
408 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600409 coalesced++;
410 break;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530411 } else if (ret < 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200412 if (flags != rgnflags)
413 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530414 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600415 coalesced++;
416 break;
Simon Goldschmidt0f7c51a2019-01-14 22:38:16 +0100417 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530418 if (flags == LMB_NONE) {
419 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
420 size);
421 if (ret < 0)
422 return -1;
423
424 coalesced++;
425 break;
426 } else {
427 return -1;
428 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600429 }
430 }
431
Sughosh Ganued17a332024-08-26 17:29:18 +0530432 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
433 rgn = lmb_rgn_lst->data;
434 if (rgn[i].flags == rgn[i + 1].flags) {
435 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
436 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
437 coalesced++;
438 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
439 /* fix overlapping area */
440 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
441 coalesced++;
442 }
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200443 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600444 }
445
446 if (coalesced)
447 return coalesced;
Sughosh Ganued17a332024-08-26 17:29:18 +0530448
449 if (alist_full(lmb_rgn_lst) &&
450 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala4ed65522008-02-27 21:51:47 -0600451 return -1;
Sughosh Ganued17a332024-08-26 17:29:18 +0530452 rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600453
454 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530455 for (i = lmb_rgn_lst->count; i >= 0; i--) {
456 if (i && base < rgn[i - 1].base) {
457 rgn[i] = rgn[i - 1];
Kumar Gala4ed65522008-02-27 21:51:47 -0600458 } else {
Sughosh Ganued17a332024-08-26 17:29:18 +0530459 rgn[i].base = base;
460 rgn[i].size = size;
461 rgn[i].flags = flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600462 break;
463 }
464 }
465
Sughosh Ganued17a332024-08-26 17:29:18 +0530466 lmb_rgn_lst->count++;
Kumar Gala4ed65522008-02-27 21:51:47 -0600467
468 return 0;
469}
470
Sughosh Ganued17a332024-08-26 17:29:18 +0530471static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200472 phys_size_t size)
473{
Sughosh Ganued17a332024-08-26 17:29:18 +0530474 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200475}
476
Kumar Gala4ed65522008-02-27 21:51:47 -0600477/* This routine may be called with relocation disabled. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530478long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600479{
Sughosh Ganued17a332024-08-26 17:29:18 +0530480 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala4ed65522008-02-27 21:51:47 -0600481
Sughosh Ganued17a332024-08-26 17:29:18 +0530482 return lmb_add_region(lmb_rgn_lst, base, size);
Kumar Gala4ed65522008-02-27 21:51:47 -0600483}
484
Sughosh Ganued17a332024-08-26 17:29:18 +0530485long lmb_free(phys_addr_t base, phys_size_t size)
Andy Fleming63796c42008-06-16 13:58:54 -0500486{
Sughosh Ganued17a332024-08-26 17:29:18 +0530487 struct lmb_region *rgn;
488 struct alist *lmb_rgn_lst = &lmb.used_mem;
Andy Fleming98874ff2008-07-07 14:24:39 -0500489 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +0100490 phys_addr_t end = base + size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500491 int i;
492
493 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganued17a332024-08-26 17:29:18 +0530494 rgn = lmb_rgn_lst->data;
Andy Fleming63796c42008-06-16 13:58:54 -0500495 /* Find the region where (base, size) belongs to */
Sughosh Ganued17a332024-08-26 17:29:18 +0530496 for (i = 0; i < lmb_rgn_lst->count; i++) {
497 rgnbegin = rgn[i].base;
498 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500499
500 if ((rgnbegin <= base) && (end <= rgnend))
501 break;
502 }
503
504 /* Didn't find the region */
Sughosh Ganued17a332024-08-26 17:29:18 +0530505 if (i == lmb_rgn_lst->count)
Andy Fleming63796c42008-06-16 13:58:54 -0500506 return -1;
507
508 /* Check to see if we are removing entire region */
509 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530510 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming63796c42008-06-16 13:58:54 -0500511 return 0;
512 }
513
514 /* Check to see if region is matching at the front */
515 if (rgnbegin == base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530516 rgn[i].base = end + 1;
517 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500518 return 0;
519 }
520
521 /* Check to see if the region is matching at the end */
522 if (rgnend == end) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530523 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500524 return 0;
525 }
526
527 /*
528 * We need to split the entry - adjust the current one to the
529 * beginging of the hole and add the region after hole.
530 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530531 rgn[i].size = base - rgn[i].base;
532 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
533 rgn[i].flags);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200534}
535
Sughosh Ganued17a332024-08-26 17:29:18 +0530536long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200537{
Sughosh Ganued17a332024-08-26 17:29:18 +0530538 struct alist *lmb_rgn_lst = &lmb.used_mem;
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200539
Sughosh Ganued17a332024-08-26 17:29:18 +0530540 return lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Andy Fleming63796c42008-06-16 13:58:54 -0500541}
542
Sughosh Ganued17a332024-08-26 17:29:18 +0530543long lmb_reserve(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600544{
Sughosh Ganued17a332024-08-26 17:29:18 +0530545 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala4ed65522008-02-27 21:51:47 -0600546}
547
Sughosh Ganued17a332024-08-26 17:29:18 +0530548static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruce391fd932008-06-09 20:37:18 -0500549 phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600550{
551 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530552 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600553
Sughosh Ganued17a332024-08-26 17:29:18 +0530554 for (i = 0; i < lmb_rgn_lst->count; i++) {
555 phys_addr_t rgnbase = rgn[i].base;
556 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100557 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala4ed65522008-02-27 21:51:47 -0600558 break;
Kumar Gala4ed65522008-02-27 21:51:47 -0600559 }
560
Sughosh Ganued17a332024-08-26 17:29:18 +0530561 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600562}
563
Becky Bruce391fd932008-06-09 20:37:18 -0500564static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600565{
566 return addr & ~(size - 1);
567}
568
Sughosh Ganued17a332024-08-26 17:29:18 +0530569static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530570 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600571{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100572 long i, rgn;
Becky Bruce391fd932008-06-09 20:37:18 -0500573 phys_addr_t base = 0;
Andy Fleming7570a992008-06-16 13:58:55 -0500574 phys_addr_t res_base;
Sughosh Ganued17a332024-08-26 17:29:18 +0530575 struct lmb_region *lmb_used = lmb.used_mem.data;
576 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600577
Sughosh Ganued17a332024-08-26 17:29:18 +0530578 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
579 phys_addr_t lmbbase = lmb_memory[i].base;
580 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600581
Andy Fleming7570a992008-06-16 13:58:55 -0500582 if (lmbsize < size)
583 continue;
Kumar Gala4ed65522008-02-27 21:51:47 -0600584 if (max_addr == LMB_ALLOC_ANYWHERE)
585 base = lmb_align_down(lmbbase + lmbsize - size, align);
586 else if (lmbbase < max_addr) {
Stephen Warrenad3fda52014-07-31 13:40:07 -0600587 base = lmbbase + lmbsize;
588 if (base < lmbbase)
589 base = -1;
590 base = min(base, max_addr);
Kumar Gala4ed65522008-02-27 21:51:47 -0600591 base = lmb_align_down(base - size, align);
592 } else
593 continue;
594
Andy Fleming7570a992008-06-16 13:58:55 -0500595 while (base && lmbbase <= base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530596 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100597 if (rgn < 0) {
Andy Fleming7570a992008-06-16 13:58:55 -0500598 /* This area isn't reserved, take it */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530599 if (lmb_add_region_flags(&lmb.used_mem, base,
600 size, flags) < 0)
Andy Fleming7570a992008-06-16 13:58:55 -0500601 return 0;
602 return base;
603 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530604
605 res_base = lmb_used[rgn].base;
Andy Fleming7570a992008-06-16 13:58:55 -0500606 if (res_base < size)
607 break;
608 base = lmb_align_down(res_base - size, align);
609 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600610 }
Andy Fleming7570a992008-06-16 13:58:55 -0500611 return 0;
Kumar Gala4ed65522008-02-27 21:51:47 -0600612}
613
Sughosh Ganued17a332024-08-26 17:29:18 +0530614phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530615{
Sughosh Ganued17a332024-08-26 17:29:18 +0530616 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530617}
618
Sughosh Ganued17a332024-08-26 17:29:18 +0530619phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530620{
621 phys_addr_t alloc;
622
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530623 alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530624
625 if (alloc == 0)
626 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
627 (ulong)size, (ulong)max_addr);
628
629 return alloc;
630}
631
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530632static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
633 enum lmb_flags flags)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100634{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100635 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530636 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100637
638 /* Check if the requested address is in one of the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530639 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100640 if (rgn >= 0) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100641 /*
642 * Check if the requested end address is in the same memory
643 * region we found.
644 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530645 if (lmb_addrs_overlap(lmb_memory[rgn].base,
646 lmb_memory[rgn].size,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100647 base + size - 1, 1)) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100648 /* ok, reserve the memory */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530649 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100650 return base;
651 }
652 }
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530653
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100654 return 0;
655}
656
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530657/*
658 * Try to allocate a specific address range: must be in defined memory but not
659 * reserved
660 */
661phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
662{
663 return __lmb_alloc_addr(base, size, LMB_NONE);
664}
665
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100666/* Return number of bytes from a given address that are free */
Sughosh Ganued17a332024-08-26 17:29:18 +0530667phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100668{
669 int i;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100670 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530671 struct lmb_region *lmb_used = lmb.used_mem.data;
672 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100673
674 /* check if the requested address is in the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530675 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100676 if (rgn >= 0) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530677 for (i = 0; i < lmb.used_mem.count; i++) {
678 if (addr < lmb_used[i].base) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100679 /* first reserved range > requested address */
Sughosh Ganued17a332024-08-26 17:29:18 +0530680 return lmb_used[i].base - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100681 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530682 if (lmb_used[i].base +
683 lmb_used[i].size > addr) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100684 /* requested addr is in this reserved range */
685 return 0;
686 }
687 }
688 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganued17a332024-08-26 17:29:18 +0530689 return lmb_memory[lmb.free_mem.count - 1].base +
690 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100691 }
692 return 0;
693}
694
Sughosh Ganued17a332024-08-26 17:29:18 +0530695int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600696{
697 int i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530698 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600699
Sughosh Ganued17a332024-08-26 17:29:18 +0530700 for (i = 0; i < lmb.used_mem.count; i++) {
701 phys_addr_t upper = lmb_used[i].base +
702 lmb_used[i].size - 1;
703 if (addr >= lmb_used[i].base && addr <= upper)
704 return (lmb_used[i].flags & flags) == flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600705 }
706 return 0;
707}
Mike Frysingera16028d2009-11-03 11:35:59 -0500708
Sughosh Ganued17a332024-08-26 17:29:18 +0530709__weak void board_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500710{
711 /* please define platform specific board_lmb_reserve() */
712}
Mike Frysingera16028d2009-11-03 11:35:59 -0500713
Sughosh Ganued17a332024-08-26 17:29:18 +0530714__weak void arch_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500715{
716 /* please define platform specific arch_lmb_reserve() */
717}
Sughosh Ganued17a332024-08-26 17:29:18 +0530718
719static int lmb_setup(void)
720{
721 bool ret;
722
723 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
724 (uint)LMB_ALIST_INITIAL_SIZE);
725 if (!ret) {
726 log_debug("Unable to initialise the list for LMB free memory\n");
727 return -ENOMEM;
728 }
729
730 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
731 (uint)LMB_ALIST_INITIAL_SIZE);
732 if (!ret) {
733 log_debug("Unable to initialise the list for LMB used memory\n");
734 return -ENOMEM;
735 }
736
737 return 0;
738}
739
740/**
741 * lmb_init() - Initialise the LMB module
742 *
743 * Initialise the LMB lists needed for keeping the memory map. There
744 * are two lists, in form of alloced list data structure. One for the
745 * available memory, and one for the used memory. Initialise the two
746 * lists as part of board init. Add memory to the available memory
747 * list and reserve common areas by adding them to the used memory
748 * list.
749 *
750 * Return: 0 on success, -ve on error
751 */
752int lmb_init(void)
753{
754 int ret;
755
756 ret = lmb_setup();
757 if (ret) {
758 log_info("Unable to init LMB\n");
759 return ret;
760 }
761
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530762 lmb_add_memory();
763
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530764 /* Reserve the U-Boot image region once U-Boot has relocated */
765 if (spl_phase() == PHASE_SPL)
766 lmb_reserve_common_spl();
767 else if (spl_phase() == PHASE_BOARD_R)
768 lmb_reserve_common((void *)gd->fdt_blob);
769
Sughosh Ganued17a332024-08-26 17:29:18 +0530770 return 0;
771}
772
773#if CONFIG_IS_ENABLED(UNIT_TEST)
774struct lmb *lmb_get(void)
775{
776 return &lmb;
777}
778
779int lmb_push(struct lmb *store)
780{
781 int ret;
782
783 *store = lmb;
784 ret = lmb_setup();
785 if (ret)
786 return ret;
787
788 return 0;
789}
790
791void lmb_pop(struct lmb *store)
792{
793 alist_uninit(&lmb.free_mem);
794 alist_uninit(&lmb.used_mem);
795 lmb = *store;
796}
797#endif /* UNIT_TEST */