blob: 24d1ebe586b34754522d6a1135d584c0a92adb1c [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala4ed65522008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala4ed65522008-02-27 21:51:47 -06007 */
8
Sughosh Ganued17a332024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010010#include <efi_loader.h>
Sughosh Ganu2f619152024-10-15 21:07:07 +053011#include <event.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060012#include <image.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010013#include <mapmem.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060014#include <lmb.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <malloc.h>
Sughosh Ganuf4fb1542024-08-26 17:29:24 +053017#include <spl.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060018
Marek Vasut12746982021-09-10 22:47:09 +020019#include <asm/global_data.h>
Marek Vasutbd994c02021-11-13 18:34:37 +010020#include <asm/sections.h>
Sughosh Ganued17a332024-08-26 17:29:18 +053021#include <linux/kernel.h>
Sughosh Ganu6534d262024-08-26 17:29:30 +053022#include <linux/sizes.h>
Marek Vasut12746982021-09-10 22:47:09 +020023
24DECLARE_GLOBAL_DATA_PTR;
25
Sughosh Ganu2f619152024-10-15 21:07:07 +053026#define MAP_OP_RESERVE (u8)0x1
27#define MAP_OP_FREE (u8)0x2
28#define MAP_OP_ADD (u8)0x3
29
Kumar Gala4ed65522008-02-27 21:51:47 -060030#define LMB_ALLOC_ANYWHERE 0
Sughosh Ganued17a332024-08-26 17:29:18 +053031#define LMB_ALIST_INITIAL_SIZE 4
Kumar Gala4ed65522008-02-27 21:51:47 -060032
Sughosh Ganued17a332024-08-26 17:29:18 +053033static struct lmb lmb;
34
Sughosh Ganu2f619152024-10-15 21:07:07 +053035static bool lmb_should_notify(enum lmb_flags flags)
36{
37 return !lmb.test && !(flags & LMB_NONOTIFY) &&
38 CONFIG_IS_ENABLED(EFI_LOADER);
39}
40
41static int __maybe_unused lmb_map_update_notify(phys_addr_t addr,
42 phys_size_t size,
43 u8 op)
44{
45 u64 efi_addr;
46 u64 pages;
47 efi_status_t status;
48
49 if (op != MAP_OP_RESERVE && op != MAP_OP_FREE && op != MAP_OP_ADD) {
50 log_err("Invalid map update op received (%d)\n", op);
51 return -1;
52 }
53
54 efi_addr = (uintptr_t)map_sysmem(addr, 0);
55 pages = efi_size_in_pages(size + (efi_addr & EFI_PAGE_MASK));
56 efi_addr &= ~EFI_PAGE_MASK;
57
58 status = efi_add_memory_map_pg(efi_addr, pages,
59 op == MAP_OP_RESERVE ?
60 EFI_BOOT_SERVICES_DATA :
61 EFI_CONVENTIONAL_MEMORY,
62 false);
63 if (status != EFI_SUCCESS) {
64 log_err("%s: LMB Map notify failure %lu\n", __func__,
65 status & ~EFI_ERROR_MASK);
66 return -1;
67 } else {
68 return 0;
69 }
70}
71
Sughosh Ganuf8ffc6f2024-08-26 17:29:40 +053072static void lmb_print_region_flags(enum lmb_flags flags)
73{
74 u64 bitpos;
Sughosh Ganu3c6896a2024-10-15 21:07:04 +053075 const char *flag_str[] = { "none", "no-map", "no-overwrite", "no-notify" };
Sughosh Ganuf8ffc6f2024-08-26 17:29:40 +053076
77 do {
78 bitpos = flags ? fls(flags) - 1 : 0;
Sughosh Ganuc3cf0dc2024-10-21 22:48:20 +053079 assert_noisy(bitpos < ARRAY_SIZE(flag_str));
Sughosh Ganuf8ffc6f2024-08-26 17:29:40 +053080 printf("%s", flag_str[bitpos]);
81 flags &= ~(1ull << bitpos);
82 puts(flags ? ", " : "\n");
83 } while (flags);
84}
85
Sughosh Ganued17a332024-08-26 17:29:18 +053086static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Patrick Delaunay358c7782021-05-07 14:50:31 +020087{
Sughosh Ganued17a332024-08-26 17:29:18 +053088 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay358c7782021-05-07 14:50:31 +020089 unsigned long long base, size, end;
90 enum lmb_flags flags;
91 int i;
92
Sughosh Ganued17a332024-08-26 17:29:18 +053093 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay358c7782021-05-07 14:50:31 +020094
Sughosh Ganued17a332024-08-26 17:29:18 +053095 for (i = 0; i < lmb_rgn_lst->count; i++) {
96 base = rgn[i].base;
97 size = rgn[i].size;
Patrick Delaunay358c7782021-05-07 14:50:31 +020098 end = base + size - 1;
Sughosh Ganued17a332024-08-26 17:29:18 +053099 flags = rgn[i].flags;
Patrick Delaunay358c7782021-05-07 14:50:31 +0200100
Sughosh Ganuf8ffc6f2024-08-26 17:29:40 +0530101 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ",
102 name, i, base, end, size);
103 lmb_print_region_flags(flags);
Patrick Delaunay358c7782021-05-07 14:50:31 +0200104 }
105}
106
Sughosh Ganued17a332024-08-26 17:29:18 +0530107void lmb_dump_all_force(void)
Tero Kristo9996cea2020-07-20 11:10:45 +0300108{
Tero Kristo9996cea2020-07-20 11:10:45 +0300109 printf("lmb_dump_all:\n");
Sughosh Ganued17a332024-08-26 17:29:18 +0530110 lmb_dump_region(&lmb.free_mem, "memory");
111 lmb_dump_region(&lmb.used_mem, "reserved");
Tero Kristo9996cea2020-07-20 11:10:45 +0300112}
113
Sughosh Ganued17a332024-08-26 17:29:18 +0530114void lmb_dump_all(void)
Kumar Gala4ed65522008-02-27 21:51:47 -0600115{
116#ifdef DEBUG
Sughosh Ganued17a332024-08-26 17:29:18 +0530117 lmb_dump_all_force();
Tero Kristo9996cea2020-07-20 11:10:45 +0300118#endif
Kumar Gala4ed65522008-02-27 21:51:47 -0600119}
120
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100121static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
122 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600123{
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +0100124 const phys_addr_t base1_end = base1 + size1 - 1;
125 const phys_addr_t base2_end = base2 + size2 - 1;
126
127 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala4ed65522008-02-27 21:51:47 -0600128}
129
Becky Bruce391fd932008-06-09 20:37:18 -0500130static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100131 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600132{
133 if (base2 == base1 + size1)
134 return 1;
135 else if (base1 == base2 + size2)
136 return -1;
137
138 return 0;
139}
140
Sughosh Ganued17a332024-08-26 17:29:18 +0530141static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumaredb58242023-09-26 16:54:42 +0530142 unsigned long r2)
143{
Sughosh Ganued17a332024-08-26 17:29:18 +0530144 struct lmb_region *rgn = lmb_rgn_lst->data;
145
146 phys_addr_t base1 = rgn[r1].base;
147 phys_size_t size1 = rgn[r1].size;
148 phys_addr_t base2 = rgn[r2].base;
149 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +0530150
151 return lmb_addrs_overlap(base1, size1, base2, size2);
152}
Sughosh Ganued17a332024-08-26 17:29:18 +0530153
154static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100155 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600156{
Sughosh Ganued17a332024-08-26 17:29:18 +0530157 struct lmb_region *rgn = lmb_rgn_lst->data;
158
159 phys_addr_t base1 = rgn[r1].base;
160 phys_size_t size1 = rgn[r1].size;
161 phys_addr_t base2 = rgn[r2].base;
162 phys_size_t size2 = rgn[r2].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600163 return lmb_addrs_adjacent(base1, size1, base2, size2);
164}
165
Sughosh Ganued17a332024-08-26 17:29:18 +0530166static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala4ed65522008-02-27 21:51:47 -0600167{
168 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530169 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600170
Sughosh Ganued17a332024-08-26 17:29:18 +0530171 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
172 rgn[i].base = rgn[i + 1].base;
173 rgn[i].size = rgn[i + 1].size;
174 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600175 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530176 lmb_rgn_lst->count--;
Kumar Gala4ed65522008-02-27 21:51:47 -0600177}
178
179/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530180static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100181 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600182{
Sughosh Ganued17a332024-08-26 17:29:18 +0530183 struct lmb_region *rgn = lmb_rgn_lst->data;
184
185 rgn[r1].size += rgn[r2].size;
186 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala4ed65522008-02-27 21:51:47 -0600187}
188
Udit Kumaredb58242023-09-26 16:54:42 +0530189/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganued17a332024-08-26 17:29:18 +0530190static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
191 unsigned long r1, unsigned long r2)
Udit Kumaredb58242023-09-26 16:54:42 +0530192{
Sughosh Ganued17a332024-08-26 17:29:18 +0530193 struct lmb_region *rgn = lmb_rgn_lst->data;
194
195 phys_addr_t base1 = rgn[r1].base;
196 phys_size_t size1 = rgn[r1].size;
197 phys_addr_t base2 = rgn[r2].base;
198 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +0530199
200 if (base1 + size1 > base2 + size2) {
201 printf("This will not be a case any time\n");
202 return;
203 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530204 rgn[r1].size = base2 + size2 - base1;
205 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumaredb58242023-09-26 16:54:42 +0530206}
207
Sughosh Ganu6534d262024-08-26 17:29:30 +0530208static void lmb_reserve_uboot_region(void)
209{
210 int bank;
211 ulong end, bank_end;
212 phys_addr_t rsv_start;
213
214 rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
215 end = gd->ram_top;
216
217 /*
218 * Reserve memory from aligned address below the bottom of U-Boot stack
219 * until end of RAM area to prevent LMB from overwriting that memory.
220 */
221 debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
222
223 /* adjust sp by 16K to be safe */
224 rsv_start -= SZ_16K;
225 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
226 if (!gd->bd->bi_dram[bank].size ||
227 rsv_start < gd->bd->bi_dram[bank].start)
228 continue;
229 /* Watch out for RAM at end of address space! */
230 bank_end = gd->bd->bi_dram[bank].start +
231 gd->bd->bi_dram[bank].size - 1;
232 if (rsv_start > bank_end)
233 continue;
234 if (bank_end > end)
235 bank_end = end - 1;
236
237 lmb_reserve_flags(rsv_start, bank_end - rsv_start + 1,
238 LMB_NOOVERWRITE);
239
240 if (gd->flags & GD_FLG_SKIP_RELOC)
241 lmb_reserve_flags((phys_addr_t)(uintptr_t)_start,
242 gd->mon_len, LMB_NOOVERWRITE);
243
244 break;
245 }
246}
247
Sughosh Ganued17a332024-08-26 17:29:18 +0530248static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100249{
Sughosh Ganu6534d262024-08-26 17:29:30 +0530250 lmb_reserve_uboot_region();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100251
Simon Glass0c303f92021-09-25 19:43:21 -0600252 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganued17a332024-08-26 17:29:18 +0530253 boot_fdt_add_mem_rsv_regions(fdt_blob);
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100254}
255
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530256static __maybe_unused void lmb_reserve_common_spl(void)
257{
258 phys_addr_t rsv_start;
259 phys_size_t rsv_size;
260
261 /*
262 * Assume a SPL stack of 16KB. This must be
263 * more than enough for the SPL stage.
264 */
265 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
266 rsv_start = gd->start_addr_sp - 16384;
267 rsv_size = 16384;
268 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
269 }
270
271 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
272 /* Reserve the bss region */
273 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
274 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
275 (phys_addr_t)(uintptr_t)__bss_start;
276 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
277 }
278}
279
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530280/**
281 * lmb_add_memory() - Add memory range for LMB allocations
282 *
283 * Add the entire available memory range to the pool of memory that
284 * can be used by the LMB module for allocations.
285 *
286 * Return: None
287 */
288void lmb_add_memory(void)
289{
290 int i;
291 phys_size_t size;
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530292 u64 ram_top = gd->ram_top;
293 struct bd_info *bd = gd->bd;
294
Sughosh Ganu497da0c2024-10-15 21:07:11 +0530295 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP))
296 return lmb_arch_add_memory();
297
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530298 /* Assume a 4GB ram_top if not defined */
299 if (!ram_top)
300 ram_top = 0x100000000ULL;
301
302 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
303 size = bd->bi_dram[i].size;
304 if (size) {
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530305 lmb_add(bd->bi_dram[i].start, size);
Sughosh Ganueb052cb2024-10-15 21:07:05 +0530306
307 /*
308 * Reserve memory above ram_top as
309 * no-overwrite so that it cannot be
310 * allocated
311 */
312 if (bd->bi_dram[i].start >= ram_top)
313 lmb_reserve_flags(bd->bi_dram[i].start, size,
314 LMB_NOOVERWRITE);
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530315 }
316 }
317}
318
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530319static long lmb_resize_regions(struct alist *lmb_rgn_lst,
320 unsigned long idx_start,
321 phys_addr_t base, phys_size_t size)
322{
323 phys_size_t rgnsize;
324 unsigned long rgn_cnt, idx, idx_end;
325 phys_addr_t rgnbase, rgnend;
326 phys_addr_t mergebase, mergeend;
327 struct lmb_region *rgn = lmb_rgn_lst->data;
328
329 rgn_cnt = 0;
330 idx = idx_start;
331 idx_end = idx_start;
332
333 /*
334 * First thing to do is to identify how many regions
335 * the requested region overlaps.
336 * If the flags match, combine all these overlapping
337 * regions into a single region, and remove the merged
338 * regions.
339 */
340 while (idx <= lmb_rgn_lst->count - 1) {
341 rgnbase = rgn[idx].base;
342 rgnsize = rgn[idx].size;
343
344 if (lmb_addrs_overlap(base, size, rgnbase,
345 rgnsize)) {
346 if (rgn[idx].flags != LMB_NONE)
347 return -1;
348 rgn_cnt++;
349 idx_end = idx;
350 }
351 idx++;
352 }
353
354 /* The merged region's base and size */
355 rgnbase = rgn[idx_start].base;
356 mergebase = min(base, rgnbase);
357 rgnend = rgn[idx_end].base + rgn[idx_end].size;
358 mergeend = max(rgnend, (base + size));
359
360 rgn[idx_start].base = mergebase;
361 rgn[idx_start].size = mergeend - mergebase;
362
363 /* Now remove the merged regions */
364 while (--rgn_cnt)
365 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
366
367 return 0;
368}
369
Sughosh Ganued17a332024-08-26 17:29:18 +0530370/**
371 * lmb_add_region_flags() - Add an lmb region to the given list
372 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
373 * @base: Start address of the region
374 * @size: Size of the region to be added
375 * @flags: Attributes of the LMB region
376 *
377 * Add a region of memory to the list. If the region does not exist, add
378 * it to the list. Depending on the attributes of the region to be added,
379 * the function might resize an already existing region or coalesce two
380 * adjacent regions.
381 *
382 *
383 * Returns: 0 if the region addition successful, -1 on failure
384 */
385static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200386 phys_size_t size, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600387{
388 unsigned long coalesced = 0;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530389 long ret, i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530390 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600391
Sughosh Ganued17a332024-08-26 17:29:18 +0530392 if (alist_err(lmb_rgn_lst))
393 return -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600394
395 /* First try and coalesce this LMB with another. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530396 for (i = 0; i < lmb_rgn_lst->count; i++) {
397 phys_addr_t rgnbase = rgn[i].base;
398 phys_size_t rgnsize = rgn[i].size;
399 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100400 phys_addr_t end = base + size - 1;
401 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100402 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200403 if (flags == rgnflags)
404 /* Already have this region, so we're done */
405 return 0;
406 else
407 return -1; /* regions with new flags */
408 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600409
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530410 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
411 if (ret > 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200412 if (flags != rgnflags)
413 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530414 rgn[i].base -= size;
415 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600416 coalesced++;
417 break;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530418 } else if (ret < 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200419 if (flags != rgnflags)
420 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530421 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600422 coalesced++;
423 break;
Simon Goldschmidt0f7c51a2019-01-14 22:38:16 +0100424 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530425 if (flags == LMB_NONE) {
426 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
427 size);
428 if (ret < 0)
429 return -1;
430
431 coalesced++;
432 break;
433 } else {
434 return -1;
435 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600436 }
437 }
438
Sughosh Ganued17a332024-08-26 17:29:18 +0530439 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
440 rgn = lmb_rgn_lst->data;
441 if (rgn[i].flags == rgn[i + 1].flags) {
442 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
443 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
444 coalesced++;
445 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
446 /* fix overlapping area */
447 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
448 coalesced++;
449 }
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200450 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600451 }
452
453 if (coalesced)
Ilias Apalodimas0f57b002024-10-23 18:22:00 +0300454 return 0;
Sughosh Ganued17a332024-08-26 17:29:18 +0530455
456 if (alist_full(lmb_rgn_lst) &&
457 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala4ed65522008-02-27 21:51:47 -0600458 return -1;
Sughosh Ganued17a332024-08-26 17:29:18 +0530459 rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600460
461 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530462 for (i = lmb_rgn_lst->count; i >= 0; i--) {
463 if (i && base < rgn[i - 1].base) {
464 rgn[i] = rgn[i - 1];
Kumar Gala4ed65522008-02-27 21:51:47 -0600465 } else {
Sughosh Ganued17a332024-08-26 17:29:18 +0530466 rgn[i].base = base;
467 rgn[i].size = size;
468 rgn[i].flags = flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600469 break;
470 }
471 }
472
Sughosh Ganued17a332024-08-26 17:29:18 +0530473 lmb_rgn_lst->count++;
Kumar Gala4ed65522008-02-27 21:51:47 -0600474
475 return 0;
476}
477
Sughosh Ganued17a332024-08-26 17:29:18 +0530478static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200479 phys_size_t size)
480{
Sughosh Ganued17a332024-08-26 17:29:18 +0530481 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200482}
483
Kumar Gala4ed65522008-02-27 21:51:47 -0600484/* This routine may be called with relocation disabled. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530485long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600486{
Sughosh Ganu2f619152024-10-15 21:07:07 +0530487 long ret;
Sughosh Ganued17a332024-08-26 17:29:18 +0530488 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala4ed65522008-02-27 21:51:47 -0600489
Sughosh Ganu2f619152024-10-15 21:07:07 +0530490 ret = lmb_add_region(lmb_rgn_lst, base, size);
Ilias Apalodimas0f57b002024-10-23 18:22:00 +0300491 if (ret)
Sughosh Ganu2f619152024-10-15 21:07:07 +0530492 return ret;
493
494 if (lmb_should_notify(LMB_NONE))
495 return lmb_map_update_notify(base, size, MAP_OP_ADD);
496
497 return 0;
Kumar Gala4ed65522008-02-27 21:51:47 -0600498}
499
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530500static long _lmb_free(phys_addr_t base, phys_size_t size)
Andy Fleming63796c42008-06-16 13:58:54 -0500501{
Sughosh Ganued17a332024-08-26 17:29:18 +0530502 struct lmb_region *rgn;
503 struct alist *lmb_rgn_lst = &lmb.used_mem;
Andy Fleming98874ff2008-07-07 14:24:39 -0500504 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +0100505 phys_addr_t end = base + size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500506 int i;
507
508 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganued17a332024-08-26 17:29:18 +0530509 rgn = lmb_rgn_lst->data;
Andy Fleming63796c42008-06-16 13:58:54 -0500510 /* Find the region where (base, size) belongs to */
Sughosh Ganued17a332024-08-26 17:29:18 +0530511 for (i = 0; i < lmb_rgn_lst->count; i++) {
512 rgnbegin = rgn[i].base;
513 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500514
515 if ((rgnbegin <= base) && (end <= rgnend))
516 break;
517 }
518
519 /* Didn't find the region */
Sughosh Ganued17a332024-08-26 17:29:18 +0530520 if (i == lmb_rgn_lst->count)
Andy Fleming63796c42008-06-16 13:58:54 -0500521 return -1;
522
523 /* Check to see if we are removing entire region */
524 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530525 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming63796c42008-06-16 13:58:54 -0500526 return 0;
527 }
528
529 /* Check to see if region is matching at the front */
530 if (rgnbegin == base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530531 rgn[i].base = end + 1;
532 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500533 return 0;
534 }
535
536 /* Check to see if the region is matching at the end */
537 if (rgnend == end) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530538 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500539 return 0;
540 }
541
542 /*
543 * We need to split the entry - adjust the current one to the
544 * beginging of the hole and add the region after hole.
545 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530546 rgn[i].size = base - rgn[i].base;
547 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
548 rgn[i].flags);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200549}
550
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530551/**
552 * lmb_free_flags() - Free up a region of memory
553 * @base: Base Address of region to be freed
554 * @size: Size of the region to be freed
555 * @flags: Memory region attributes
556 *
557 * Free up a region of memory.
558 *
559 * Return: 0 if successful, -1 on failure
560 */
561long lmb_free_flags(phys_addr_t base, phys_size_t size,
Sughosh Ganu2f619152024-10-15 21:07:07 +0530562 uint flags)
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530563{
Sughosh Ganu2f619152024-10-15 21:07:07 +0530564 long ret;
565
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530566 ret = _lmb_free(base, size);
Sughosh Ganu2f619152024-10-15 21:07:07 +0530567 if (ret < 0)
568 return ret;
569
570 if (lmb_should_notify(flags))
571 return lmb_map_update_notify(base, size, MAP_OP_FREE);
572
573 return ret;
574}
575
576long lmb_free(phys_addr_t base, phys_size_t size)
577{
578 return lmb_free_flags(base, size, LMB_NONE);
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530579}
580
Sughosh Ganued17a332024-08-26 17:29:18 +0530581long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200582{
Sughosh Ganu2f619152024-10-15 21:07:07 +0530583 long ret = 0;
Sughosh Ganued17a332024-08-26 17:29:18 +0530584 struct alist *lmb_rgn_lst = &lmb.used_mem;
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200585
Sughosh Ganu2f619152024-10-15 21:07:07 +0530586 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Ilias Apalodimas0f57b002024-10-23 18:22:00 +0300587 if (ret)
588 return ret;
Sughosh Ganu2f619152024-10-15 21:07:07 +0530589
590 if (lmb_should_notify(flags))
591 return lmb_map_update_notify(base, size, MAP_OP_RESERVE);
592
593 return ret;
Andy Fleming63796c42008-06-16 13:58:54 -0500594}
595
Sughosh Ganued17a332024-08-26 17:29:18 +0530596long lmb_reserve(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600597{
Sughosh Ganued17a332024-08-26 17:29:18 +0530598 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala4ed65522008-02-27 21:51:47 -0600599}
600
Sughosh Ganued17a332024-08-26 17:29:18 +0530601static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruce391fd932008-06-09 20:37:18 -0500602 phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600603{
604 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530605 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600606
Sughosh Ganued17a332024-08-26 17:29:18 +0530607 for (i = 0; i < lmb_rgn_lst->count; i++) {
608 phys_addr_t rgnbase = rgn[i].base;
609 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100610 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala4ed65522008-02-27 21:51:47 -0600611 break;
Kumar Gala4ed65522008-02-27 21:51:47 -0600612 }
613
Sughosh Ganued17a332024-08-26 17:29:18 +0530614 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600615}
616
Becky Bruce391fd932008-06-09 20:37:18 -0500617static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600618{
619 return addr & ~(size - 1);
620}
621
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530622static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530623 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600624{
Sughosh Ganu2f619152024-10-15 21:07:07 +0530625 u8 op;
626 int ret;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100627 long i, rgn;
Becky Bruce391fd932008-06-09 20:37:18 -0500628 phys_addr_t base = 0;
Andy Fleming7570a992008-06-16 13:58:55 -0500629 phys_addr_t res_base;
Sughosh Ganued17a332024-08-26 17:29:18 +0530630 struct lmb_region *lmb_used = lmb.used_mem.data;
631 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600632
Sughosh Ganued17a332024-08-26 17:29:18 +0530633 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
634 phys_addr_t lmbbase = lmb_memory[i].base;
635 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600636
Andy Fleming7570a992008-06-16 13:58:55 -0500637 if (lmbsize < size)
638 continue;
Kumar Gala4ed65522008-02-27 21:51:47 -0600639 if (max_addr == LMB_ALLOC_ANYWHERE)
640 base = lmb_align_down(lmbbase + lmbsize - size, align);
641 else if (lmbbase < max_addr) {
Stephen Warrenad3fda52014-07-31 13:40:07 -0600642 base = lmbbase + lmbsize;
643 if (base < lmbbase)
644 base = -1;
645 base = min(base, max_addr);
Kumar Gala4ed65522008-02-27 21:51:47 -0600646 base = lmb_align_down(base - size, align);
647 } else
648 continue;
649
Andy Fleming7570a992008-06-16 13:58:55 -0500650 while (base && lmbbase <= base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530651 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100652 if (rgn < 0) {
Andy Fleming7570a992008-06-16 13:58:55 -0500653 /* This area isn't reserved, take it */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530654 if (lmb_add_region_flags(&lmb.used_mem, base,
Ilias Apalodimas0f57b002024-10-23 18:22:00 +0300655 size, flags))
Andy Fleming7570a992008-06-16 13:58:55 -0500656 return 0;
Sughosh Ganu2f619152024-10-15 21:07:07 +0530657
658 if (lmb_should_notify(flags)) {
659 op = MAP_OP_RESERVE;
660 ret = lmb_map_update_notify(base, size,
661 op);
662 if (ret)
663 return ret;
664 }
665
Andy Fleming7570a992008-06-16 13:58:55 -0500666 return base;
667 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530668
669 res_base = lmb_used[rgn].base;
Andy Fleming7570a992008-06-16 13:58:55 -0500670 if (res_base < size)
671 break;
672 base = lmb_align_down(res_base - size, align);
673 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600674 }
Andy Fleming7570a992008-06-16 13:58:55 -0500675 return 0;
Kumar Gala4ed65522008-02-27 21:51:47 -0600676}
677
Sughosh Ganued17a332024-08-26 17:29:18 +0530678phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530679{
Sughosh Ganued17a332024-08-26 17:29:18 +0530680 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530681}
682
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530683/**
684 * lmb_alloc_flags() - Allocate memory region with specified attributes
685 * @size: Size of the region requested
686 * @align: Alignment of the memory region requested
687 * @flags: Memory region attributes to be set
688 *
689 * Allocate a region of memory with the attributes specified through the
690 * parameter.
691 *
692 * Return: base address on success, 0 on error
693 */
694phys_addr_t lmb_alloc_flags(phys_size_t size, ulong align, uint flags)
695{
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530696 return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE,
697 flags);
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530698}
699
Sughosh Ganued17a332024-08-26 17:29:18 +0530700phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530701{
702 phys_addr_t alloc;
703
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530704 alloc = _lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530705
706 if (alloc == 0)
707 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
708 (ulong)size, (ulong)max_addr);
709
710 return alloc;
711}
712
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530713/**
714 * lmb_alloc_base_flags() - Allocate specified memory region with specified attributes
715 * @size: Size of the region requested
716 * @align: Alignment of the memory region requested
717 * @max_addr: Maximum address of the requested region
718 * @flags: Memory region attributes to be set
719 *
720 * Allocate a region of memory with the attributes specified through the
721 * parameter. The max_addr parameter is used to specify the maximum address
722 * below which the requested region should be allocated.
723 *
724 * Return: base address on success, 0 on error
725 */
726phys_addr_t lmb_alloc_base_flags(phys_size_t size, ulong align,
727 phys_addr_t max_addr, uint flags)
728{
729 phys_addr_t alloc;
730
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530731 alloc = _lmb_alloc_base(size, align, max_addr, flags);
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530732
733 if (alloc == 0)
734 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
735 (ulong)size, (ulong)max_addr);
736
737 return alloc;
738}
739
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530740static phys_addr_t _lmb_alloc_addr(phys_addr_t base, phys_size_t size,
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530741 enum lmb_flags flags)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100742{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100743 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530744 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100745
746 /* Check if the requested address is in one of the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530747 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100748 if (rgn >= 0) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100749 /*
750 * Check if the requested end address is in the same memory
751 * region we found.
752 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530753 if (lmb_addrs_overlap(lmb_memory[rgn].base,
754 lmb_memory[rgn].size,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100755 base + size - 1, 1)) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100756 /* ok, reserve the memory */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530757 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100758 return base;
759 }
760 }
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530761
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100762 return 0;
763}
764
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530765/*
766 * Try to allocate a specific address range: must be in defined memory but not
767 * reserved
768 */
769phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
770{
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530771 return _lmb_alloc_addr(base, size, LMB_NONE);
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530772}
773
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530774/**
775 * lmb_alloc_addr_flags() - Allocate specified memory address with specified attributes
776 * @base: Base Address requested
777 * @size: Size of the region requested
778 * @flags: Memory region attributes to be set
779 *
780 * Allocate a region of memory with the attributes specified through the
781 * parameter. The base parameter is used to specify the base address
782 * of the requested region.
783 *
784 * Return: base address on success, 0 on error
785 */
786phys_addr_t lmb_alloc_addr_flags(phys_addr_t base, phys_size_t size,
787 uint flags)
788{
Sughosh Ganu8d0df5f2024-10-15 21:07:17 +0530789 return _lmb_alloc_addr(base, size, flags);
Sughosh Ganuc8a8f012024-10-15 21:07:03 +0530790}
791
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100792/* Return number of bytes from a given address that are free */
Sughosh Ganued17a332024-08-26 17:29:18 +0530793phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100794{
795 int i;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100796 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530797 struct lmb_region *lmb_used = lmb.used_mem.data;
798 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100799
800 /* check if the requested address is in the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530801 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100802 if (rgn >= 0) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530803 for (i = 0; i < lmb.used_mem.count; i++) {
804 if (addr < lmb_used[i].base) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100805 /* first reserved range > requested address */
Sughosh Ganued17a332024-08-26 17:29:18 +0530806 return lmb_used[i].base - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100807 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530808 if (lmb_used[i].base +
809 lmb_used[i].size > addr) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100810 /* requested addr is in this reserved range */
811 return 0;
812 }
813 }
814 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganued17a332024-08-26 17:29:18 +0530815 return lmb_memory[lmb.free_mem.count - 1].base +
816 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100817 }
818 return 0;
819}
820
Sughosh Ganued17a332024-08-26 17:29:18 +0530821int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600822{
823 int i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530824 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600825
Sughosh Ganued17a332024-08-26 17:29:18 +0530826 for (i = 0; i < lmb.used_mem.count; i++) {
827 phys_addr_t upper = lmb_used[i].base +
828 lmb_used[i].size - 1;
829 if (addr >= lmb_used[i].base && addr <= upper)
830 return (lmb_used[i].flags & flags) == flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600831 }
832 return 0;
833}
Mike Frysingera16028d2009-11-03 11:35:59 -0500834
Sughosh Ganu2f619152024-10-15 21:07:07 +0530835static int lmb_setup(bool test)
Sughosh Ganued17a332024-08-26 17:29:18 +0530836{
837 bool ret;
838
839 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
840 (uint)LMB_ALIST_INITIAL_SIZE);
841 if (!ret) {
842 log_debug("Unable to initialise the list for LMB free memory\n");
843 return -ENOMEM;
844 }
845
846 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
847 (uint)LMB_ALIST_INITIAL_SIZE);
848 if (!ret) {
849 log_debug("Unable to initialise the list for LMB used memory\n");
850 return -ENOMEM;
851 }
852
Sughosh Ganu2f619152024-10-15 21:07:07 +0530853 lmb.test = test;
854
Sughosh Ganued17a332024-08-26 17:29:18 +0530855 return 0;
856}
857
858/**
859 * lmb_init() - Initialise the LMB module
860 *
861 * Initialise the LMB lists needed for keeping the memory map. There
862 * are two lists, in form of alloced list data structure. One for the
863 * available memory, and one for the used memory. Initialise the two
864 * lists as part of board init. Add memory to the available memory
865 * list and reserve common areas by adding them to the used memory
866 * list.
867 *
868 * Return: 0 on success, -ve on error
869 */
870int lmb_init(void)
871{
872 int ret;
873
Sughosh Ganu2f619152024-10-15 21:07:07 +0530874 ret = lmb_setup(false);
Sughosh Ganued17a332024-08-26 17:29:18 +0530875 if (ret) {
876 log_info("Unable to init LMB\n");
877 return ret;
878 }
879
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530880 lmb_add_memory();
881
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530882 /* Reserve the U-Boot image region once U-Boot has relocated */
Simon Glass456bdb72024-09-29 19:49:36 -0600883 if (xpl_phase() == PHASE_SPL)
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530884 lmb_reserve_common_spl();
Simon Glass456bdb72024-09-29 19:49:36 -0600885 else if (xpl_phase() == PHASE_BOARD_R)
Sughosh Ganuf4fb1542024-08-26 17:29:24 +0530886 lmb_reserve_common((void *)gd->fdt_blob);
887
Sughosh Ganued17a332024-08-26 17:29:18 +0530888 return 0;
889}
890
Sughosh Ganued17a332024-08-26 17:29:18 +0530891struct lmb *lmb_get(void)
892{
893 return &lmb;
894}
895
Simon Glass1c30f7a2024-10-21 10:19:31 +0200896#if CONFIG_IS_ENABLED(UNIT_TEST)
Sughosh Ganued17a332024-08-26 17:29:18 +0530897int lmb_push(struct lmb *store)
898{
899 int ret;
900
901 *store = lmb;
Sughosh Ganu2f619152024-10-15 21:07:07 +0530902 ret = lmb_setup(true);
Sughosh Ganued17a332024-08-26 17:29:18 +0530903 if (ret)
904 return ret;
905
906 return 0;
907}
908
909void lmb_pop(struct lmb *store)
910{
911 alist_uninit(&lmb.free_mem);
912 alist_uninit(&lmb.used_mem);
913 lmb = *store;
914}
915#endif /* UNIT_TEST */