blob: 8be0c1a58352ff1fb29e2e1e810634f625745db1 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala4ed65522008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala4ed65522008-02-27 21:51:47 -06007 */
8
Sughosh Ganued17a332024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010010#include <efi_loader.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060011#include <image.h>
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +010012#include <mapmem.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060013#include <lmb.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060014#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Kumar Gala4ed65522008-02-27 21:51:47 -060016
Marek Vasut12746982021-09-10 22:47:09 +020017#include <asm/global_data.h>
Marek Vasutbd994c02021-11-13 18:34:37 +010018#include <asm/sections.h>
Sughosh Ganued17a332024-08-26 17:29:18 +053019#include <linux/kernel.h>
Marek Vasut12746982021-09-10 22:47:09 +020020
21DECLARE_GLOBAL_DATA_PTR;
22
Kumar Gala4ed65522008-02-27 21:51:47 -060023#define LMB_ALLOC_ANYWHERE 0
Sughosh Ganued17a332024-08-26 17:29:18 +053024#define LMB_ALIST_INITIAL_SIZE 4
Kumar Gala4ed65522008-02-27 21:51:47 -060025
Sughosh Ganued17a332024-08-26 17:29:18 +053026static struct lmb lmb;
27
28static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Patrick Delaunay358c7782021-05-07 14:50:31 +020029{
Sughosh Ganued17a332024-08-26 17:29:18 +053030 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay358c7782021-05-07 14:50:31 +020031 unsigned long long base, size, end;
32 enum lmb_flags flags;
33 int i;
34
Sughosh Ganued17a332024-08-26 17:29:18 +053035 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay358c7782021-05-07 14:50:31 +020036
Sughosh Ganued17a332024-08-26 17:29:18 +053037 for (i = 0; i < lmb_rgn_lst->count; i++) {
38 base = rgn[i].base;
39 size = rgn[i].size;
Patrick Delaunay358c7782021-05-07 14:50:31 +020040 end = base + size - 1;
Sughosh Ganued17a332024-08-26 17:29:18 +053041 flags = rgn[i].flags;
Patrick Delaunay358c7782021-05-07 14:50:31 +020042
43 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
44 name, i, base, end, size, flags);
45 }
46}
47
Sughosh Ganued17a332024-08-26 17:29:18 +053048void lmb_dump_all_force(void)
Tero Kristo9996cea2020-07-20 11:10:45 +030049{
Tero Kristo9996cea2020-07-20 11:10:45 +030050 printf("lmb_dump_all:\n");
Sughosh Ganued17a332024-08-26 17:29:18 +053051 lmb_dump_region(&lmb.free_mem, "memory");
52 lmb_dump_region(&lmb.used_mem, "reserved");
Tero Kristo9996cea2020-07-20 11:10:45 +030053}
54
Sughosh Ganued17a332024-08-26 17:29:18 +053055void lmb_dump_all(void)
Kumar Gala4ed65522008-02-27 21:51:47 -060056{
57#ifdef DEBUG
Sughosh Ganued17a332024-08-26 17:29:18 +053058 lmb_dump_all_force();
Tero Kristo9996cea2020-07-20 11:10:45 +030059#endif
Kumar Gala4ed65522008-02-27 21:51:47 -060060}
61
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010062static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
63 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060064{
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +010065 const phys_addr_t base1_end = base1 + size1 - 1;
66 const phys_addr_t base2_end = base2 + size2 - 1;
67
68 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala4ed65522008-02-27 21:51:47 -060069}
70
Becky Bruce391fd932008-06-09 20:37:18 -050071static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010072 phys_addr_t base2, phys_size_t size2)
Kumar Gala4ed65522008-02-27 21:51:47 -060073{
74 if (base2 == base1 + size1)
75 return 1;
76 else if (base1 == base2 + size2)
77 return -1;
78
79 return 0;
80}
81
Sughosh Ganued17a332024-08-26 17:29:18 +053082static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumaredb58242023-09-26 16:54:42 +053083 unsigned long r2)
84{
Sughosh Ganued17a332024-08-26 17:29:18 +053085 struct lmb_region *rgn = lmb_rgn_lst->data;
86
87 phys_addr_t base1 = rgn[r1].base;
88 phys_size_t size1 = rgn[r1].size;
89 phys_addr_t base2 = rgn[r2].base;
90 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +053091
92 return lmb_addrs_overlap(base1, size1, base2, size2);
93}
Sughosh Ganued17a332024-08-26 17:29:18 +053094
95static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +010096 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -060097{
Sughosh Ganued17a332024-08-26 17:29:18 +053098 struct lmb_region *rgn = lmb_rgn_lst->data;
99
100 phys_addr_t base1 = rgn[r1].base;
101 phys_size_t size1 = rgn[r1].size;
102 phys_addr_t base2 = rgn[r2].base;
103 phys_size_t size2 = rgn[r2].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600104 return lmb_addrs_adjacent(base1, size1, base2, size2);
105}
106
Sughosh Ganued17a332024-08-26 17:29:18 +0530107static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala4ed65522008-02-27 21:51:47 -0600108{
109 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530110 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600111
Sughosh Ganued17a332024-08-26 17:29:18 +0530112 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
113 rgn[i].base = rgn[i + 1].base;
114 rgn[i].size = rgn[i + 1].size;
115 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600116 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530117 lmb_rgn_lst->count--;
Kumar Gala4ed65522008-02-27 21:51:47 -0600118}
119
120/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530121static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100122 unsigned long r2)
Kumar Gala4ed65522008-02-27 21:51:47 -0600123{
Sughosh Ganued17a332024-08-26 17:29:18 +0530124 struct lmb_region *rgn = lmb_rgn_lst->data;
125
126 rgn[r1].size += rgn[r2].size;
127 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala4ed65522008-02-27 21:51:47 -0600128}
129
Udit Kumaredb58242023-09-26 16:54:42 +0530130/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganued17a332024-08-26 17:29:18 +0530131static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
132 unsigned long r1, unsigned long r2)
Udit Kumaredb58242023-09-26 16:54:42 +0530133{
Sughosh Ganued17a332024-08-26 17:29:18 +0530134 struct lmb_region *rgn = lmb_rgn_lst->data;
135
136 phys_addr_t base1 = rgn[r1].base;
137 phys_size_t size1 = rgn[r1].size;
138 phys_addr_t base2 = rgn[r2].base;
139 phys_size_t size2 = rgn[r2].size;
Udit Kumaredb58242023-09-26 16:54:42 +0530140
141 if (base1 + size1 > base2 + size2) {
142 printf("This will not be a case any time\n");
143 return;
144 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530145 rgn[r1].size = base2 + size2 - base1;
146 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumaredb58242023-09-26 16:54:42 +0530147}
148
Sughosh Ganued17a332024-08-26 17:29:18 +0530149void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align)
Marek Vasut12746982021-09-10 22:47:09 +0200150{
151 ulong bank_end;
152 int bank;
153
154 /*
155 * Reserve memory from aligned address below the bottom of U-Boot stack
156 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
157 * that memory.
158 */
159 debug("## Current stack ends at 0x%08lx ", sp);
160
161 /* adjust sp by 4K to be safe */
162 sp -= align;
163 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
164 if (!gd->bd->bi_dram[bank].size ||
165 sp < gd->bd->bi_dram[bank].start)
166 continue;
167 /* Watch out for RAM at end of address space! */
168 bank_end = gd->bd->bi_dram[bank].start +
169 gd->bd->bi_dram[bank].size - 1;
170 if (sp > bank_end)
171 continue;
172 if (bank_end > end)
173 bank_end = end - 1;
174
Sughosh Ganued17a332024-08-26 17:29:18 +0530175 lmb_reserve(sp, bank_end - sp + 1);
Marek Vasutbd994c02021-11-13 18:34:37 +0100176
177 if (gd->flags & GD_FLG_SKIP_RELOC)
Sughosh Ganued17a332024-08-26 17:29:18 +0530178 lmb_reserve((phys_addr_t)(uintptr_t)_start, gd->mon_len);
Marek Vasutbd994c02021-11-13 18:34:37 +0100179
Marek Vasut12746982021-09-10 22:47:09 +0200180 break;
181 }
182}
183
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100184/**
185 * efi_lmb_reserve() - add reservations for EFI memory
186 *
187 * Add reservations for all EFI memory areas that are not
188 * EFI_CONVENTIONAL_MEMORY.
189 *
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100190 * Return: 0 on success, 1 on failure
191 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530192static __maybe_unused int efi_lmb_reserve(void)
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100193{
194 struct efi_mem_desc *memmap = NULL, *map;
195 efi_uintn_t i, map_size = 0;
196 efi_status_t ret;
197
198 ret = efi_get_memory_map_alloc(&map_size, &memmap);
199 if (ret != EFI_SUCCESS)
200 return 1;
201
202 for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100203 if (map->type != EFI_CONVENTIONAL_MEMORY) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530204 lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t)
Sjoerd Simonsc5279ea2023-01-19 09:38:18 +0100205 map->physical_start),
206 map->num_pages * EFI_PAGE_SIZE,
207 map->type == EFI_RESERVED_MEMORY_TYPE
208 ? LMB_NOMAP : LMB_NONE);
209 }
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100210 }
211 efi_free_pool(memmap);
212
213 return 0;
214}
215
Sughosh Ganued17a332024-08-26 17:29:18 +0530216static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100217{
Sughosh Ganued17a332024-08-26 17:29:18 +0530218 arch_lmb_reserve();
219 board_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100220
Simon Glass0c303f92021-09-25 19:43:21 -0600221 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganued17a332024-08-26 17:29:18 +0530222 boot_fdt_add_mem_rsv_regions(fdt_blob);
Heinrich Schuchardt06d514d2023-01-04 01:36:14 +0100223
224 if (CONFIG_IS_ENABLED(EFI_LOADER))
Sughosh Ganued17a332024-08-26 17:29:18 +0530225 efi_lmb_reserve();
Simon Goldschmidtaa3c6092019-01-14 22:38:19 +0100226}
227
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100228/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530229void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100230{
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100231 int i;
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100232
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100233 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530234 if (bd->bi_dram[i].size)
235 lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100236 }
Stefan Roesedfaf6a52020-08-12 11:55:46 +0200237
Sughosh Ganued17a332024-08-26 17:29:18 +0530238 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100239}
240
241/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530242void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size,
243 void *fdt_blob)
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100244{
Sughosh Ganued17a332024-08-26 17:29:18 +0530245 lmb_add(base, size);
246 lmb_reserve_common(fdt_blob);
Simon Goldschmidt9cc23232019-01-26 22:13:04 +0100247}
248
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530249static long lmb_resize_regions(struct alist *lmb_rgn_lst,
250 unsigned long idx_start,
251 phys_addr_t base, phys_size_t size)
252{
253 phys_size_t rgnsize;
254 unsigned long rgn_cnt, idx, idx_end;
255 phys_addr_t rgnbase, rgnend;
256 phys_addr_t mergebase, mergeend;
257 struct lmb_region *rgn = lmb_rgn_lst->data;
258
259 rgn_cnt = 0;
260 idx = idx_start;
261 idx_end = idx_start;
262
263 /*
264 * First thing to do is to identify how many regions
265 * the requested region overlaps.
266 * If the flags match, combine all these overlapping
267 * regions into a single region, and remove the merged
268 * regions.
269 */
270 while (idx <= lmb_rgn_lst->count - 1) {
271 rgnbase = rgn[idx].base;
272 rgnsize = rgn[idx].size;
273
274 if (lmb_addrs_overlap(base, size, rgnbase,
275 rgnsize)) {
276 if (rgn[idx].flags != LMB_NONE)
277 return -1;
278 rgn_cnt++;
279 idx_end = idx;
280 }
281 idx++;
282 }
283
284 /* The merged region's base and size */
285 rgnbase = rgn[idx_start].base;
286 mergebase = min(base, rgnbase);
287 rgnend = rgn[idx_end].base + rgn[idx_end].size;
288 mergeend = max(rgnend, (base + size));
289
290 rgn[idx_start].base = mergebase;
291 rgn[idx_start].size = mergeend - mergebase;
292
293 /* Now remove the merged regions */
294 while (--rgn_cnt)
295 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
296
297 return 0;
298}
299
Sughosh Ganued17a332024-08-26 17:29:18 +0530300/**
301 * lmb_add_region_flags() - Add an lmb region to the given list
302 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
303 * @base: Start address of the region
304 * @size: Size of the region to be added
305 * @flags: Attributes of the LMB region
306 *
307 * Add a region of memory to the list. If the region does not exist, add
308 * it to the list. Depending on the attributes of the region to be added,
309 * the function might resize an already existing region or coalesce two
310 * adjacent regions.
311 *
312 *
313 * Returns: 0 if the region addition successful, -1 on failure
314 */
315static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200316 phys_size_t size, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600317{
318 unsigned long coalesced = 0;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530319 long ret, i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530320 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600321
Sughosh Ganued17a332024-08-26 17:29:18 +0530322 if (alist_err(lmb_rgn_lst))
323 return -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600324
325 /* First try and coalesce this LMB with another. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530326 for (i = 0; i < lmb_rgn_lst->count; i++) {
327 phys_addr_t rgnbase = rgn[i].base;
328 phys_size_t rgnsize = rgn[i].size;
329 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100330 phys_addr_t end = base + size - 1;
331 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons0d91c882023-02-12 16:07:05 +0100332 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200333 if (flags == rgnflags)
334 /* Already have this region, so we're done */
335 return 0;
336 else
337 return -1; /* regions with new flags */
338 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600339
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530340 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
341 if (ret > 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200342 if (flags != rgnflags)
343 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530344 rgn[i].base -= size;
345 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600346 coalesced++;
347 break;
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530348 } else if (ret < 0) {
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200349 if (flags != rgnflags)
350 break;
Sughosh Ganued17a332024-08-26 17:29:18 +0530351 rgn[i].size += size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600352 coalesced++;
353 break;
Simon Goldschmidt0f7c51a2019-01-14 22:38:16 +0100354 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530355 if (flags == LMB_NONE) {
356 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
357 size);
358 if (ret < 0)
359 return -1;
360
361 coalesced++;
362 break;
363 } else {
364 return -1;
365 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600366 }
367 }
368
Sughosh Ganued17a332024-08-26 17:29:18 +0530369 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
370 rgn = lmb_rgn_lst->data;
371 if (rgn[i].flags == rgn[i + 1].flags) {
372 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
373 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
374 coalesced++;
375 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
376 /* fix overlapping area */
377 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
378 coalesced++;
379 }
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200380 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600381 }
382
383 if (coalesced)
384 return coalesced;
Sughosh Ganued17a332024-08-26 17:29:18 +0530385
386 if (alist_full(lmb_rgn_lst) &&
387 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala4ed65522008-02-27 21:51:47 -0600388 return -1;
Sughosh Ganued17a332024-08-26 17:29:18 +0530389 rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600390
391 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530392 for (i = lmb_rgn_lst->count; i >= 0; i--) {
393 if (i && base < rgn[i - 1].base) {
394 rgn[i] = rgn[i - 1];
Kumar Gala4ed65522008-02-27 21:51:47 -0600395 } else {
Sughosh Ganued17a332024-08-26 17:29:18 +0530396 rgn[i].base = base;
397 rgn[i].size = size;
398 rgn[i].flags = flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600399 break;
400 }
401 }
402
Sughosh Ganued17a332024-08-26 17:29:18 +0530403 lmb_rgn_lst->count++;
Kumar Gala4ed65522008-02-27 21:51:47 -0600404
405 return 0;
406}
407
Sughosh Ganued17a332024-08-26 17:29:18 +0530408static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200409 phys_size_t size)
410{
Sughosh Ganued17a332024-08-26 17:29:18 +0530411 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200412}
413
Kumar Gala4ed65522008-02-27 21:51:47 -0600414/* This routine may be called with relocation disabled. */
Sughosh Ganued17a332024-08-26 17:29:18 +0530415long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600416{
Sughosh Ganued17a332024-08-26 17:29:18 +0530417 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala4ed65522008-02-27 21:51:47 -0600418
Sughosh Ganued17a332024-08-26 17:29:18 +0530419 return lmb_add_region(lmb_rgn_lst, base, size);
Kumar Gala4ed65522008-02-27 21:51:47 -0600420}
421
Sughosh Ganued17a332024-08-26 17:29:18 +0530422long lmb_free(phys_addr_t base, phys_size_t size)
Andy Fleming63796c42008-06-16 13:58:54 -0500423{
Sughosh Ganued17a332024-08-26 17:29:18 +0530424 struct lmb_region *rgn;
425 struct alist *lmb_rgn_lst = &lmb.used_mem;
Andy Fleming98874ff2008-07-07 14:24:39 -0500426 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidtd67f33c2019-01-14 22:38:15 +0100427 phys_addr_t end = base + size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500428 int i;
429
430 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganued17a332024-08-26 17:29:18 +0530431 rgn = lmb_rgn_lst->data;
Andy Fleming63796c42008-06-16 13:58:54 -0500432 /* Find the region where (base, size) belongs to */
Sughosh Ganued17a332024-08-26 17:29:18 +0530433 for (i = 0; i < lmb_rgn_lst->count; i++) {
434 rgnbegin = rgn[i].base;
435 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming63796c42008-06-16 13:58:54 -0500436
437 if ((rgnbegin <= base) && (end <= rgnend))
438 break;
439 }
440
441 /* Didn't find the region */
Sughosh Ganued17a332024-08-26 17:29:18 +0530442 if (i == lmb_rgn_lst->count)
Andy Fleming63796c42008-06-16 13:58:54 -0500443 return -1;
444
445 /* Check to see if we are removing entire region */
446 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530447 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming63796c42008-06-16 13:58:54 -0500448 return 0;
449 }
450
451 /* Check to see if region is matching at the front */
452 if (rgnbegin == base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530453 rgn[i].base = end + 1;
454 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500455 return 0;
456 }
457
458 /* Check to see if the region is matching at the end */
459 if (rgnend == end) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530460 rgn[i].size -= size;
Andy Fleming63796c42008-06-16 13:58:54 -0500461 return 0;
462 }
463
464 /*
465 * We need to split the entry - adjust the current one to the
466 * beginging of the hole and add the region after hole.
467 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530468 rgn[i].size = base - rgn[i].base;
469 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
470 rgn[i].flags);
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200471}
472
Sughosh Ganued17a332024-08-26 17:29:18 +0530473long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200474{
Sughosh Ganued17a332024-08-26 17:29:18 +0530475 struct alist *lmb_rgn_lst = &lmb.used_mem;
Patrick Delaunay59c0ea52021-05-07 14:50:29 +0200476
Sughosh Ganued17a332024-08-26 17:29:18 +0530477 return lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Andy Fleming63796c42008-06-16 13:58:54 -0500478}
479
Sughosh Ganued17a332024-08-26 17:29:18 +0530480long lmb_reserve(phys_addr_t base, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600481{
Sughosh Ganued17a332024-08-26 17:29:18 +0530482 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala4ed65522008-02-27 21:51:47 -0600483}
484
Sughosh Ganued17a332024-08-26 17:29:18 +0530485static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruce391fd932008-06-09 20:37:18 -0500486 phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600487{
488 unsigned long i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530489 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600490
Sughosh Ganued17a332024-08-26 17:29:18 +0530491 for (i = 0; i < lmb_rgn_lst->count; i++) {
492 phys_addr_t rgnbase = rgn[i].base;
493 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100494 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala4ed65522008-02-27 21:51:47 -0600495 break;
Kumar Gala4ed65522008-02-27 21:51:47 -0600496 }
497
Sughosh Ganued17a332024-08-26 17:29:18 +0530498 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala4ed65522008-02-27 21:51:47 -0600499}
500
Becky Bruce391fd932008-06-09 20:37:18 -0500501static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala4ed65522008-02-27 21:51:47 -0600502{
503 return addr & ~(size - 1);
504}
505
Sughosh Ganued17a332024-08-26 17:29:18 +0530506static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530507 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600508{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100509 long i, rgn;
Becky Bruce391fd932008-06-09 20:37:18 -0500510 phys_addr_t base = 0;
Andy Fleming7570a992008-06-16 13:58:55 -0500511 phys_addr_t res_base;
Sughosh Ganued17a332024-08-26 17:29:18 +0530512 struct lmb_region *lmb_used = lmb.used_mem.data;
513 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600514
Sughosh Ganued17a332024-08-26 17:29:18 +0530515 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
516 phys_addr_t lmbbase = lmb_memory[i].base;
517 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala4ed65522008-02-27 21:51:47 -0600518
Andy Fleming7570a992008-06-16 13:58:55 -0500519 if (lmbsize < size)
520 continue;
Kumar Gala4ed65522008-02-27 21:51:47 -0600521 if (max_addr == LMB_ALLOC_ANYWHERE)
522 base = lmb_align_down(lmbbase + lmbsize - size, align);
523 else if (lmbbase < max_addr) {
Stephen Warrenad3fda52014-07-31 13:40:07 -0600524 base = lmbbase + lmbsize;
525 if (base < lmbbase)
526 base = -1;
527 base = min(base, max_addr);
Kumar Gala4ed65522008-02-27 21:51:47 -0600528 base = lmb_align_down(base - size, align);
529 } else
530 continue;
531
Andy Fleming7570a992008-06-16 13:58:55 -0500532 while (base && lmbbase <= base) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530533 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100534 if (rgn < 0) {
Andy Fleming7570a992008-06-16 13:58:55 -0500535 /* This area isn't reserved, take it */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530536 if (lmb_add_region_flags(&lmb.used_mem, base,
537 size, flags) < 0)
Andy Fleming7570a992008-06-16 13:58:55 -0500538 return 0;
539 return base;
540 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530541
542 res_base = lmb_used[rgn].base;
Andy Fleming7570a992008-06-16 13:58:55 -0500543 if (res_base < size)
544 break;
545 base = lmb_align_down(res_base - size, align);
546 }
Kumar Gala4ed65522008-02-27 21:51:47 -0600547 }
Andy Fleming7570a992008-06-16 13:58:55 -0500548 return 0;
Kumar Gala4ed65522008-02-27 21:51:47 -0600549}
550
Sughosh Ganued17a332024-08-26 17:29:18 +0530551phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530552{
Sughosh Ganued17a332024-08-26 17:29:18 +0530553 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530554}
555
Sughosh Ganued17a332024-08-26 17:29:18 +0530556phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530557{
558 phys_addr_t alloc;
559
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530560 alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu3d679ae2024-08-26 17:29:16 +0530561
562 if (alloc == 0)
563 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
564 (ulong)size, (ulong)max_addr);
565
566 return alloc;
567}
568
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530569static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
570 enum lmb_flags flags)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100571{
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100572 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530573 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100574
575 /* Check if the requested address is in one of the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530576 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100577 if (rgn >= 0) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100578 /*
579 * Check if the requested end address is in the same memory
580 * region we found.
581 */
Sughosh Ganued17a332024-08-26 17:29:18 +0530582 if (lmb_addrs_overlap(lmb_memory[rgn].base,
583 lmb_memory[rgn].size,
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100584 base + size - 1, 1)) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100585 /* ok, reserve the memory */
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530586 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100587 return base;
588 }
589 }
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530590
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100591 return 0;
592}
593
Sughosh Ganu5e9553c2024-08-26 17:29:19 +0530594/*
595 * Try to allocate a specific address range: must be in defined memory but not
596 * reserved
597 */
598phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
599{
600 return __lmb_alloc_addr(base, size, LMB_NONE);
601}
602
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100603/* Return number of bytes from a given address that are free */
Sughosh Ganued17a332024-08-26 17:29:18 +0530604phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100605{
606 int i;
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100607 long rgn;
Sughosh Ganued17a332024-08-26 17:29:18 +0530608 struct lmb_region *lmb_used = lmb.used_mem.data;
609 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100610
611 /* check if the requested address is in the memory regions */
Sughosh Ganued17a332024-08-26 17:29:18 +0530612 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidte35d2a72019-01-21 20:29:56 +0100613 if (rgn >= 0) {
Sughosh Ganued17a332024-08-26 17:29:18 +0530614 for (i = 0; i < lmb.used_mem.count; i++) {
615 if (addr < lmb_used[i].base) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100616 /* first reserved range > requested address */
Sughosh Ganued17a332024-08-26 17:29:18 +0530617 return lmb_used[i].base - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100618 }
Sughosh Ganued17a332024-08-26 17:29:18 +0530619 if (lmb_used[i].base +
620 lmb_used[i].size > addr) {
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100621 /* requested addr is in this reserved range */
622 return 0;
623 }
624 }
625 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganued17a332024-08-26 17:29:18 +0530626 return lmb_memory[lmb.free_mem.count - 1].base +
627 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt4cc8af82019-01-14 22:38:18 +0100628 }
629 return 0;
630}
631
Sughosh Ganued17a332024-08-26 17:29:18 +0530632int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala4ed65522008-02-27 21:51:47 -0600633{
634 int i;
Sughosh Ganued17a332024-08-26 17:29:18 +0530635 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala4ed65522008-02-27 21:51:47 -0600636
Sughosh Ganued17a332024-08-26 17:29:18 +0530637 for (i = 0; i < lmb.used_mem.count; i++) {
638 phys_addr_t upper = lmb_used[i].base +
639 lmb_used[i].size - 1;
640 if (addr >= lmb_used[i].base && addr <= upper)
641 return (lmb_used[i].flags & flags) == flags;
Kumar Gala4ed65522008-02-27 21:51:47 -0600642 }
643 return 0;
644}
Mike Frysingera16028d2009-11-03 11:35:59 -0500645
Sughosh Ganued17a332024-08-26 17:29:18 +0530646__weak void board_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500647{
648 /* please define platform specific board_lmb_reserve() */
649}
Mike Frysingera16028d2009-11-03 11:35:59 -0500650
Sughosh Ganued17a332024-08-26 17:29:18 +0530651__weak void arch_lmb_reserve(void)
Mike Frysingera16028d2009-11-03 11:35:59 -0500652{
653 /* please define platform specific arch_lmb_reserve() */
654}
Sughosh Ganued17a332024-08-26 17:29:18 +0530655
656static int lmb_setup(void)
657{
658 bool ret;
659
660 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
661 (uint)LMB_ALIST_INITIAL_SIZE);
662 if (!ret) {
663 log_debug("Unable to initialise the list for LMB free memory\n");
664 return -ENOMEM;
665 }
666
667 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
668 (uint)LMB_ALIST_INITIAL_SIZE);
669 if (!ret) {
670 log_debug("Unable to initialise the list for LMB used memory\n");
671 return -ENOMEM;
672 }
673
674 return 0;
675}
676
677/**
678 * lmb_init() - Initialise the LMB module
679 *
680 * Initialise the LMB lists needed for keeping the memory map. There
681 * are two lists, in form of alloced list data structure. One for the
682 * available memory, and one for the used memory. Initialise the two
683 * lists as part of board init. Add memory to the available memory
684 * list and reserve common areas by adding them to the used memory
685 * list.
686 *
687 * Return: 0 on success, -ve on error
688 */
689int lmb_init(void)
690{
691 int ret;
692
693 ret = lmb_setup();
694 if (ret) {
695 log_info("Unable to init LMB\n");
696 return ret;
697 }
698
699 return 0;
700}
701
702#if CONFIG_IS_ENABLED(UNIT_TEST)
703struct lmb *lmb_get(void)
704{
705 return &lmb;
706}
707
708int lmb_push(struct lmb *store)
709{
710 int ret;
711
712 *store = lmb;
713 ret = lmb_setup();
714 if (ret)
715 return ret;
716
717 return 0;
718}
719
720void lmb_pop(struct lmb *store)
721{
722 alist_uninit(&lmb.free_mem);
723 alist_uninit(&lmb.used_mem);
724 lmb = *store;
725}
726#endif /* UNIT_TEST */