blob: 44ecf96811cd573ca4c14d672f6f597bd03ea24b [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala4ed65522008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala4ed65522008-02-27 21:51:47 -06007 */
8
Sughosh Ganued17a332024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010010#include <efi_loader.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060011#include <image.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010012#include <mapmem.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060013#include <lmb.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060014#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060016
Marek Vasut12746982021-09-10 22:47:09 +020017#include <asm/global_data.h>
Marek Vasutbd994c02021-11-13 18:34:37 +010018#include <asm/sections.h>
Sughosh Ganued17a332024-08-26 17:29:18 +053019#include <linux/kernel.h>
Marek Vasut12746982021-09-10 22:47:09 +020020
21DECLARE_GLOBAL_DATA_PTR;
22
Kumar Gala4ed65522008-02-27 21:51:47 -060023#define LMB_ALLOC_ANYWHERE 0
Sughosh Ganued17a332024-08-26 17:29:18 +053024#define LMB_ALIST_INITIAL_SIZE 4
Kumar Gala4ed65522008-02-27 21:51:47 -060025
Sughosh Ganued17a332024-08-26 17:29:18 +053026static struct lmb lmb;
27
28static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Patrick Delaunay358c7782021-05-07 14:50:31 +020029{
Sughosh Ganued17a332024-08-26 17:29:18 +053030 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay358c7782021-05-07 14:50:31 +020031 unsigned long long base, size, end;
32 enum lmb_flags flags;
33 int i;
34
Sughosh Ganued17a332024-08-26 17:29:18 +053035 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay358c7782021-05-07 14:50:31 +020036
Sughosh Ganued17a332024-08-26 17:29:18 +053037 for (i = 0; i < lmb_rgn_lst->count; i++) {
38 base = rgn[i].base;
39 size = rgn[i].size;
Patrick Delaunay358c7782021-05-07 14:50:31 +020040 end = base + size - 1;
Sughosh Ganued17a332024-08-26 17:29:18 +053041 flags = rgn[i].flags;
Patrick Delaunay358c7782021-05-07 14:50:31 +020042
43 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
44 name, i, base, end, size, flags);
45 }
46}
47
Sughosh Ganued17a332024-08-26 17:29:18 +053048void lmb_dump_all_force(void)
Tero Kristo9996cea2020-07-20 11:10:45 +030049{
Tero Kristo9996cea2020-07-20 11:10:45 +030050 printf("lmb_dump_all:\n");
Sughosh Ganued17a332024-08-26 17:29:18 +053051 lmb_dump_region(&lmb.free_mem, "memory");
52 lmb_dump_region(&lmb.used_mem, "reserved");
Tero Kristo9996cea2020-07-20 11:10:45 +030053}
54
Sughosh Ganued17a332024-08-26 17:29:18 +053055void lmb_dump_all(void)
Kumar Gala4ed65522008-02-27 21:51:47 -060056{
57#ifdef DEBUG
Sughosh Ganued17a332024-08-26 17:29:18 +053058 lmb_dump_all_force();
Tero Kristo9996cea2020-07-20 11:10:45 +030059#endif
Kumar Gala4ed65522008-02-27 21:51:47 -060060}
61
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010062static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
63 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060064{
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +010065 const phys_addr_t base1_end = base1 + size1 - 1;
66 const phys_addr_t base2_end = base2 + size2 - 1;
67
68 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala4ed65522008-02-27 21:51:47 -060069}
70
Becky Bruce391fd932008-06-09 20:37:18 -050071static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010072 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060073{
74 if (base2 == base1 + size1)
75 return 1;
76 else if (base1 == base2 + size2)
77 return -1;
78
79 return 0;
80}
81
Sughosh Ganued17a332024-08-26 17:29:18 +053082static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumaredb58242023-09-26 16:54:42 +053083 unsigned long r2)
84{
Sughosh Ganued17a332024-08-26 17:29:18 +053085 struct lmb_region *rgn = lmb_rgn_lst->data;
86
87 phys_addr_t base1 = rgn[r1].base;
88 phys_size_t size1 = rgn[r1].size;
89 phys_addr_t base2 = rgn[r2].base;
90 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +053091
92 return lmb_addrs_overlap(base1, size1, base2, size2);
93}
Sughosh Ganued17a332024-08-26 17:29:18 +053094
95static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010096 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -060097{
Sughosh Ganued17a332024-08-26 17:29:18 +053098 struct lmb_region *rgn = lmb_rgn_lst->data;
99
100 phys_addr_t base1 = rgn[r1].base;
101 phys_size_t size1 = rgn[r1].size;
102 phys_addr_t base2 = rgn[r2].base;
103 phys_size_t size2 = rgn[r2].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600104 return lmb_addrs_adjacent(base1, size1, base2, size2);
105}
106
Sughosh Ganued17a332024-08-26 17:29:18 +0530107static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala4ed65522008-02-27 21:51:47 -0600108{
109 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530110 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600111
Sughosh Ganued17a332024-08-26 17:29:18 +0530112 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
113 rgn[i].base = rgn[i + 1].base;
114 rgn[i].size = rgn[i + 1].size;
115 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600116 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530117 lmb_rgn_lst->count--;
Kumar Gala4ed65522008-02-27 21:51:47 -0600118}
119
120/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530121static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100122 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600123{
Sughosh Ganued17a332024-08-26 17:29:18 +0530124 struct lmb_region *rgn = lmb_rgn_lst->data;
125
126 rgn[r1].size += rgn[r2].size;
127 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala4ed65522008-02-27 21:51:47 -0600128}
129
Udit Kumaredb58242023-09-26 16:54:42 +0530130/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganued17a332024-08-26 17:29:18 +0530131static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
132 unsigned long r1, unsigned long r2)
Udit Kumaredb58242023-09-26 16:54:42 +0530133{
Sughosh Ganued17a332024-08-26 17:29:18 +0530134 struct lmb_region *rgn = lmb_rgn_lst->data;
135
136 phys_addr_t base1 = rgn[r1].base;
137 phys_size_t size1 = rgn[r1].size;
138 phys_addr_t base2 = rgn[r2].base;
139 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +0530140
141 if (base1 + size1 > base2 + size2) {
142 printf("This will not be a case any time\n");
143 return;
144 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530145 rgn[r1].size = base2 + size2 - base1;
146 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumaredb58242023-09-26 16:54:42 +0530147}
148
Sughosh Ganued17a332024-08-26 17:29:18 +0530149void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align)
Marek Vasut12746982021-09-10 22:47:09 +0200150{
151 ulong bank_end;
152 int bank;
153
154 /*
155 * Reserve memory from aligned address below the bottom of U-Boot stack
156 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
157 * that memory.
158 */
159 debug("## Current stack ends at 0x%08lx ", sp);
160
161 /* adjust sp by 4K to be safe */
162 sp -= align;
163 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
164 if (!gd->bd->bi_dram[bank].size ||
165 sp < gd->bd->bi_dram[bank].start)
166 continue;
167 /* Watch out for RAM at end of address space! */
168 bank_end = gd->bd->bi_dram[bank].start +
169 gd->bd->bi_dram[bank].size - 1;
170 if (sp > bank_end)
171 continue;
172 if (bank_end > end)
173 bank_end = end - 1;
174
Sughosh Ganued17a332024-08-26 17:29:18 +0530175 lmb_reserve(sp, bank_end - sp + 1);
Marek Vasutbd994c02021-11-13 18:34:37 +0100176
177 if (gd->flags & GD_FLG_SKIP_RELOC)
Sughosh Ganued17a332024-08-26 17:29:18 +0530178 lmb_reserve((phys_addr_t)(uintptr_t)_start, gd->mon_len);
Marek Vasutbd994c02021-11-13 18:34:37 +0100179
Marek Vasut12746982021-09-10 22:47:09 +0200180 break;
181 }
182}
183
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100184/**
185 * efi_lmb_reserve() - add reservations for EFI memory
186 *
187 * Add reservations for all EFI memory areas that are not
188 * EFI_CONVENTIONAL_MEMORY.
189 *
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100190 * Return: 0 on success, 1 on failure
191 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530192static __maybe_unused int efi_lmb_reserve(void)
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100193{
194 struct efi_mem_desc *memmap = NULL, *map;
195 efi_uintn_t i, map_size = 0;
196 efi_status_t ret;
197
198 ret = efi_get_memory_map_alloc(&map_size, &memmap);
199 if (ret != EFI_SUCCESS)
200 return 1;
201
202 for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100203 if (map->type != EFI_CONVENTIONAL_MEMORY) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530204 lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t)
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100205 map->physical_start),
206 map->num_pages * EFI_PAGE_SIZE,
207 map->type == EFI_RESERVED_MEMORY_TYPE
208 ? LMB_NOMAP : LMB_NONE);
209 }
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100210 }
211 efi_free_pool(memmap);
212
213 return 0;
214}
215
Sughosh Ganued17a332024-08-26 17:29:18 +0530216static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100217{
Sughosh Ganued17a332024-08-26 17:29:18 +0530218 arch_lmb_reserve();
219 board_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100220
Simon Glass0c303f92021-09-25 19:43:21 -0600221 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganued17a332024-08-26 17:29:18 +0530222 boot_fdt_add_mem_rsv_regions(fdt_blob);
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100223
224 if (CONFIG_IS_ENABLED(EFI_LOADER))
Sughosh Ganued17a332024-08-26 17:29:18 +0530225 efi_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100226}
227
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100228/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530229void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100230{
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100231 int i;
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100232
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100233 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530234 if (bd->bi_dram[i].size)
235 lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100236 }
Stefan Roesedfaf6a52020-08-12 11:55:46 +0200237
Sughosh Ganued17a332024-08-26 17:29:18 +0530238 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100239}
240
241/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530242void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size,
243 void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100244{
Sughosh Ganued17a332024-08-26 17:29:18 +0530245 lmb_add(base, size);
246 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100247}
248
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530249/**
250 * lmb_add_memory() - Add memory range for LMB allocations
251 *
252 * Add the entire available memory range to the pool of memory that
253 * can be used by the LMB module for allocations.
254 *
255 * Return: None
256 */
257void lmb_add_memory(void)
258{
259 int i;
260 phys_size_t size;
261 phys_addr_t rgn_top;
262 u64 ram_top = gd->ram_top;
263 struct bd_info *bd = gd->bd;
264
265 /* Assume a 4GB ram_top if not defined */
266 if (!ram_top)
267 ram_top = 0x100000000ULL;
268
269 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
270 size = bd->bi_dram[i].size;
271 if (size) {
272 if (bd->bi_dram[i].start > ram_top)
273 continue;
274
275 rgn_top = bd->bi_dram[i].start +
276 bd->bi_dram[i].size;
277
278 if (rgn_top > ram_top)
279 size -= rgn_top - ram_top;
280
281 lmb_add(bd->bi_dram[i].start, size);
282 }
283 }
284}
285
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530286static long lmb_resize_regions(struct alist *lmb_rgn_lst,
287 unsigned long idx_start,
288 phys_addr_t base, phys_size_t size)
289{
290 phys_size_t rgnsize;
291 unsigned long rgn_cnt, idx, idx_end;
292 phys_addr_t rgnbase, rgnend;
293 phys_addr_t mergebase, mergeend;
294 struct lmb_region *rgn = lmb_rgn_lst->data;
295
296 rgn_cnt = 0;
297 idx = idx_start;
298 idx_end = idx_start;
299
300 /*
301 * First thing to do is to identify how many regions
302 * the requested region overlaps.
303 * If the flags match, combine all these overlapping
304 * regions into a single region, and remove the merged
305 * regions.
306 */
307 while (idx <= lmb_rgn_lst->count - 1) {
308 rgnbase = rgn[idx].base;
309 rgnsize = rgn[idx].size;
310
311 if (lmb_addrs_overlap(base, size, rgnbase,
312 rgnsize)) {
313 if (rgn[idx].flags != LMB_NONE)
314 return -1;
315 rgn_cnt++;
316 idx_end = idx;
317 }
318 idx++;
319 }
320
321 /* The merged region's base and size */
322 rgnbase = rgn[idx_start].base;
323 mergebase = min(base, rgnbase);
324 rgnend = rgn[idx_end].base + rgn[idx_end].size;
325 mergeend = max(rgnend, (base + size));
326
327 rgn[idx_start].base = mergebase;
328 rgn[idx_start].size = mergeend - mergebase;
329
330 /* Now remove the merged regions */
331 while (--rgn_cnt)
332 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
333
334 return 0;
335}
336
Sughosh Ganued17a332024-08-26 17:29:18 +0530337/**
338 * lmb_add_region_flags() - Add an lmb region to the given list
339 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
340 * @base: Start address of the region
341 * @size: Size of the region to be added
342 * @flags: Attributes of the LMB region
343 *
344 * Add a region of memory to the list. If the region does not exist, add
345 * it to the list. Depending on the attributes of the region to be added,
346 * the function might resize an already existing region or coalesce two
347 * adjacent regions.
348 *
349 *
350 * Returns: 0 if the region addition successful, -1 on failure
351 */
352static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200353 phys_size_t size, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600354{
355 unsigned long coalesced = 0;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530356 long ret, i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530357 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600358
Sughosh Ganued17a332024-08-26 17:29:18 +0530359 if (alist_err(lmb_rgn_lst))
360 return -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600361
362 /* First try and coalesce this LMB with another. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530363 for (i = 0; i < lmb_rgn_lst->count; i++) {
364 phys_addr_t rgnbase = rgn[i].base;
365 phys_size_t rgnsize = rgn[i].size;
366 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100367 phys_addr_t end = base + size - 1;
368 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100369 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200370 if (flags == rgnflags)
371 /* Already have this region, so we're done */
372 return 0;
373 else
374 return -1; /* regions with new flags */
375 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600376
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530377 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
378 if (ret > 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200379 if (flags != rgnflags)
380 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530381 rgn[i].base -= size;
382 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600383 coalesced++;
384 break;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530385 } else if (ret < 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200386 if (flags != rgnflags)
387 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530388 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600389 coalesced++;
390 break;
Simon Goldschmidt0f7c51a2019-01-14 22:38:16 +0100391 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530392 if (flags == LMB_NONE) {
393 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
394 size);
395 if (ret < 0)
396 return -1;
397
398 coalesced++;
399 break;
400 } else {
401 return -1;
402 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600403 }
404 }
405
Sughosh Ganued17a332024-08-26 17:29:18 +0530406 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
407 rgn = lmb_rgn_lst->data;
408 if (rgn[i].flags == rgn[i + 1].flags) {
409 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
410 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
411 coalesced++;
412 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
413 /* fix overlapping area */
414 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
415 coalesced++;
416 }
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200417 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600418 }
419
420 if (coalesced)
421 return coalesced;
Sughosh Ganued17a332024-08-26 17:29:18 +0530422
423 if (alist_full(lmb_rgn_lst) &&
424 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala4ed65522008-02-27 21:51:47 -0600425 return -1;
Sughosh Ganued17a332024-08-26 17:29:18 +0530426 rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600427
428 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530429 for (i = lmb_rgn_lst->count; i >= 0; i--) {
430 if (i && base < rgn[i - 1].base) {
431 rgn[i] = rgn[i - 1];
Kumar Gala4ed65522008-02-27 21:51:47 -0600432 } else {
Sughosh Ganued17a332024-08-26 17:29:18 +0530433 rgn[i].base = base;
434 rgn[i].size = size;
435 rgn[i].flags = flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600436 break;
437 }
438 }
439
Sughosh Ganued17a332024-08-26 17:29:18 +0530440 lmb_rgn_lst->count++;
Kumar Gala4ed65522008-02-27 21:51:47 -0600441
442 return 0;
443}
444
Sughosh Ganued17a332024-08-26 17:29:18 +0530445static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200446 phys_size_t size)
447{
Sughosh Ganued17a332024-08-26 17:29:18 +0530448 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200449}
450
Kumar Gala4ed65522008-02-27 21:51:47 -0600451/* This routine may be called with relocation disabled. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530452long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600453{
Sughosh Ganued17a332024-08-26 17:29:18 +0530454 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala4ed65522008-02-27 21:51:47 -0600455
Sughosh Ganued17a332024-08-26 17:29:18 +0530456 return lmb_add_region(lmb_rgn_lst, base, size);
Kumar Gala4ed65522008-02-27 21:51:47 -0600457}
458
Sughosh Ganued17a332024-08-26 17:29:18 +0530459long lmb_free(phys_addr_t base, phys_size_t size)
Andy Fleming63796c42008-06-16 13:58:54 -0500460{
Sughosh Ganued17a332024-08-26 17:29:18 +0530461 struct lmb_region *rgn;
462 struct alist *lmb_rgn_lst = &lmb.used_mem;
Andy Fleming98874ff2008-07-07 14:24:39 -0500463 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +0100464 phys_addr_t end = base + size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500465 int i;
466
467 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganued17a332024-08-26 17:29:18 +0530468 rgn = lmb_rgn_lst->data;
Andy Fleming63796c42008-06-16 13:58:54 -0500469 /* Find the region where (base, size) belongs to */
Sughosh Ganued17a332024-08-26 17:29:18 +0530470 for (i = 0; i < lmb_rgn_lst->count; i++) {
471 rgnbegin = rgn[i].base;
472 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500473
474 if ((rgnbegin <= base) && (end <= rgnend))
475 break;
476 }
477
478 /* Didn't find the region */
Sughosh Ganued17a332024-08-26 17:29:18 +0530479 if (i == lmb_rgn_lst->count)
Andy Fleming63796c42008-06-16 13:58:54 -0500480 return -1;
481
482 /* Check to see if we are removing entire region */
483 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530484 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming63796c42008-06-16 13:58:54 -0500485 return 0;
486 }
487
488 /* Check to see if region is matching at the front */
489 if (rgnbegin == base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530490 rgn[i].base = end + 1;
491 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500492 return 0;
493 }
494
495 /* Check to see if the region is matching at the end */
496 if (rgnend == end) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530497 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500498 return 0;
499 }
500
501 /*
502 * We need to split the entry - adjust the current one to the
503 * beginging of the hole and add the region after hole.
504 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530505 rgn[i].size = base - rgn[i].base;
506 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
507 rgn[i].flags);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200508}
509
Sughosh Ganued17a332024-08-26 17:29:18 +0530510long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200511{
Sughosh Ganued17a332024-08-26 17:29:18 +0530512 struct alist *lmb_rgn_lst = &lmb.used_mem;
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200513
Sughosh Ganued17a332024-08-26 17:29:18 +0530514 return lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Andy Fleming63796c42008-06-16 13:58:54 -0500515}
516
Sughosh Ganued17a332024-08-26 17:29:18 +0530517long lmb_reserve(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600518{
Sughosh Ganued17a332024-08-26 17:29:18 +0530519 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala4ed65522008-02-27 21:51:47 -0600520}
521
Sughosh Ganued17a332024-08-26 17:29:18 +0530522static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruce391fd932008-06-09 20:37:18 -0500523 phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600524{
525 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530526 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600527
Sughosh Ganued17a332024-08-26 17:29:18 +0530528 for (i = 0; i < lmb_rgn_lst->count; i++) {
529 phys_addr_t rgnbase = rgn[i].base;
530 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100531 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala4ed65522008-02-27 21:51:47 -0600532 break;
Kumar Gala4ed65522008-02-27 21:51:47 -0600533 }
534
Sughosh Ganued17a332024-08-26 17:29:18 +0530535 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600536}
537
Becky Bruce391fd932008-06-09 20:37:18 -0500538static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600539{
540 return addr & ~(size - 1);
541}
542
Sughosh Ganued17a332024-08-26 17:29:18 +0530543static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530544 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600545{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100546 long i, rgn;
Becky Bruce391fd932008-06-09 20:37:18 -0500547 phys_addr_t base = 0;
Andy Fleming7570a992008-06-16 13:58:55 -0500548 phys_addr_t res_base;
Sughosh Ganued17a332024-08-26 17:29:18 +0530549 struct lmb_region *lmb_used = lmb.used_mem.data;
550 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600551
Sughosh Ganued17a332024-08-26 17:29:18 +0530552 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
553 phys_addr_t lmbbase = lmb_memory[i].base;
554 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600555
Andy Fleming7570a992008-06-16 13:58:55 -0500556 if (lmbsize < size)
557 continue;
Kumar Gala4ed65522008-02-27 21:51:47 -0600558 if (max_addr == LMB_ALLOC_ANYWHERE)
559 base = lmb_align_down(lmbbase + lmbsize - size, align);
560 else if (lmbbase < max_addr) {
Stephen Warrenad3fda52014-07-31 13:40:07 -0600561 base = lmbbase + lmbsize;
562 if (base < lmbbase)
563 base = -1;
564 base = min(base, max_addr);
Kumar Gala4ed65522008-02-27 21:51:47 -0600565 base = lmb_align_down(base - size, align);
566 } else
567 continue;
568
Andy Fleming7570a992008-06-16 13:58:55 -0500569 while (base && lmbbase <= base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530570 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100571 if (rgn < 0) {
Andy Fleming7570a992008-06-16 13:58:55 -0500572 /* This area isn't reserved, take it */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530573 if (lmb_add_region_flags(&lmb.used_mem, base,
574 size, flags) < 0)
Andy Fleming7570a992008-06-16 13:58:55 -0500575 return 0;
576 return base;
577 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530578
579 res_base = lmb_used[rgn].base;
Andy Fleming7570a992008-06-16 13:58:55 -0500580 if (res_base < size)
581 break;
582 base = lmb_align_down(res_base - size, align);
583 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600584 }
Andy Fleming7570a992008-06-16 13:58:55 -0500585 return 0;
Kumar Gala4ed65522008-02-27 21:51:47 -0600586}
587
Sughosh Ganued17a332024-08-26 17:29:18 +0530588phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530589{
Sughosh Ganued17a332024-08-26 17:29:18 +0530590 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530591}
592
Sughosh Ganued17a332024-08-26 17:29:18 +0530593phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530594{
595 phys_addr_t alloc;
596
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530597 alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530598
599 if (alloc == 0)
600 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
601 (ulong)size, (ulong)max_addr);
602
603 return alloc;
604}
605
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530606static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
607 enum lmb_flags flags)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100608{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100609 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530610 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100611
612 /* Check if the requested address is in one of the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530613 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100614 if (rgn >= 0) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100615 /*
616 * Check if the requested end address is in the same memory
617 * region we found.
618 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530619 if (lmb_addrs_overlap(lmb_memory[rgn].base,
620 lmb_memory[rgn].size,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100621 base + size - 1, 1)) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100622 /* ok, reserve the memory */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530623 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100624 return base;
625 }
626 }
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530627
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100628 return 0;
629}
630
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530631/*
632 * Try to allocate a specific address range: must be in defined memory but not
633 * reserved
634 */
635phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
636{
637 return __lmb_alloc_addr(base, size, LMB_NONE);
638}
639
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100640/* Return number of bytes from a given address that are free */
Sughosh Ganued17a332024-08-26 17:29:18 +0530641phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100642{
643 int i;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100644 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530645 struct lmb_region *lmb_used = lmb.used_mem.data;
646 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100647
648 /* check if the requested address is in the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530649 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100650 if (rgn >= 0) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530651 for (i = 0; i < lmb.used_mem.count; i++) {
652 if (addr < lmb_used[i].base) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100653 /* first reserved range > requested address */
Sughosh Ganued17a332024-08-26 17:29:18 +0530654 return lmb_used[i].base - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100655 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530656 if (lmb_used[i].base +
657 lmb_used[i].size > addr) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100658 /* requested addr is in this reserved range */
659 return 0;
660 }
661 }
662 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganued17a332024-08-26 17:29:18 +0530663 return lmb_memory[lmb.free_mem.count - 1].base +
664 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100665 }
666 return 0;
667}
668
Sughosh Ganued17a332024-08-26 17:29:18 +0530669int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600670{
671 int i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530672 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600673
Sughosh Ganued17a332024-08-26 17:29:18 +0530674 for (i = 0; i < lmb.used_mem.count; i++) {
675 phys_addr_t upper = lmb_used[i].base +
676 lmb_used[i].size - 1;
677 if (addr >= lmb_used[i].base && addr <= upper)
678 return (lmb_used[i].flags & flags) == flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600679 }
680 return 0;
681}
Mike Frysingera16028d2009-11-03 11:35:59 -0500682
Sughosh Ganued17a332024-08-26 17:29:18 +0530683__weak void board_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500684{
685 /* please define platform specific board_lmb_reserve() */
686}
Mike Frysingera16028d2009-11-03 11:35:59 -0500687
Sughosh Ganued17a332024-08-26 17:29:18 +0530688__weak void arch_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500689{
690 /* please define platform specific arch_lmb_reserve() */
691}
Sughosh Ganued17a332024-08-26 17:29:18 +0530692
693static int lmb_setup(void)
694{
695 bool ret;
696
697 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
698 (uint)LMB_ALIST_INITIAL_SIZE);
699 if (!ret) {
700 log_debug("Unable to initialise the list for LMB free memory\n");
701 return -ENOMEM;
702 }
703
704 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
705 (uint)LMB_ALIST_INITIAL_SIZE);
706 if (!ret) {
707 log_debug("Unable to initialise the list for LMB used memory\n");
708 return -ENOMEM;
709 }
710
711 return 0;
712}
713
714/**
715 * lmb_init() - Initialise the LMB module
716 *
717 * Initialise the LMB lists needed for keeping the memory map. There
718 * are two lists, in form of alloced list data structure. One for the
719 * available memory, and one for the used memory. Initialise the two
720 * lists as part of board init. Add memory to the available memory
721 * list and reserve common areas by adding them to the used memory
722 * list.
723 *
724 * Return: 0 on success, -ve on error
725 */
726int lmb_init(void)
727{
728 int ret;
729
730 ret = lmb_setup();
731 if (ret) {
732 log_info("Unable to init LMB\n");
733 return ret;
734 }
735
Sughosh Ganu8a9fc302024-08-26 17:29:23 +0530736 lmb_add_memory();
737
Sughosh Ganued17a332024-08-26 17:29:18 +0530738 return 0;
739}
740
741#if CONFIG_IS_ENABLED(UNIT_TEST)
742struct lmb *lmb_get(void)
743{
744 return &lmb;
745}
746
747int lmb_push(struct lmb *store)
748{
749 int ret;
750
751 *store = lmb;
752 ret = lmb_setup();
753 if (ret)
754 return ret;
755
756 return 0;
757}
758
759void lmb_pop(struct lmb *store)
760{
761 alist_uninit(&lmb.free_mem);
762 alist_uninit(&lmb.used_mem);
763 lmb = *store;
764}
765#endif /* UNIT_TEST */