blob: de3f04225f409569f69537a26252e45bdd56867b [file] [log] [blame]
Heinrich Schuchardt5ad92202021-05-29 13:18:00 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written
4 * by Doug Lea and released to the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/-
6 *
7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/
8 * as file malloc-2.6.6.c.
9 */
10
Heinrich Schuchardtbe621c12020-04-15 18:46:23 +020011#if CONFIG_IS_ENABLED(UNIT_TEST)
Simon Glass6d7601e2014-07-10 22:23:33 -060012#define DEBUG
13#endif
14
Sean Anderson17868612023-10-07 22:01:56 -040015#include <common.h>
16#include <log.h>
17#include <asm/global_data.h>
18
wdenk217c9da2002-10-25 20:35:49 +000019#include <malloc.h>
Simon Glassd59476b2014-07-10 22:23:28 -060020#include <asm/io.h>
Sean Andersonbdaeea12022-03-23 14:04:49 -040021#include <valgrind/memcheck.h>
Simon Glassd59476b2014-07-10 22:23:28 -060022
Wolfgang Denkea882ba2010-06-20 23:33:59 +020023#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +000024#if __STD_C
25static void malloc_update_mallinfo (void);
26void malloc_stats (void);
27#else
28static void malloc_update_mallinfo ();
29void malloc_stats();
30#endif
Wolfgang Denkea882ba2010-06-20 23:33:59 +020031#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +000032
Wolfgang Denkd87080b2006-03-31 18:32:53 +020033DECLARE_GLOBAL_DATA_PTR;
34
wdenk217c9da2002-10-25 20:35:49 +000035/*
36 Emulation of sbrk for WIN32
37 All code within the ifdef WIN32 is untested by me.
38
39 Thanks to Martin Fong and others for supplying this.
40*/
41
42
43#ifdef WIN32
44
45#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
46~(malloc_getpagesize-1))
47#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
48
49/* resrve 64MB to insure large contiguous space */
50#define RESERVED_SIZE (1024*1024*64)
51#define NEXT_SIZE (2048*1024)
52#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
53
54struct GmListElement;
55typedef struct GmListElement GmListElement;
56
57struct GmListElement
58{
59 GmListElement* next;
60 void* base;
61};
62
63static GmListElement* head = 0;
64static unsigned int gNextAddress = 0;
65static unsigned int gAddressBase = 0;
66static unsigned int gAllocatedSize = 0;
67
68static
69GmListElement* makeGmListElement (void* bas)
70{
71 GmListElement* this;
72 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
73 assert (this);
74 if (this)
75 {
76 this->base = bas;
77 this->next = head;
78 head = this;
79 }
80 return this;
81}
82
Tom Rinif88d48c2023-02-27 17:08:34 -050083void gcleanup (void)
wdenk217c9da2002-10-25 20:35:49 +000084{
85 BOOL rval;
86 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
87 if (gAddressBase && (gNextAddress - gAddressBase))
88 {
89 rval = VirtualFree ((void*)gAddressBase,
90 gNextAddress - gAddressBase,
91 MEM_DECOMMIT);
wdenk8bde7f72003-06-27 21:31:46 +000092 assert (rval);
wdenk217c9da2002-10-25 20:35:49 +000093 }
94 while (head)
95 {
96 GmListElement* next = head->next;
97 rval = VirtualFree (head->base, 0, MEM_RELEASE);
98 assert (rval);
99 LocalFree (head);
100 head = next;
101 }
102}
103
104static
105void* findRegion (void* start_address, unsigned long size)
106{
107 MEMORY_BASIC_INFORMATION info;
108 if (size >= TOP_MEMORY) return NULL;
109
110 while ((unsigned long)start_address + size < TOP_MEMORY)
111 {
112 VirtualQuery (start_address, &info, sizeof (info));
113 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
114 return start_address;
115 else
116 {
wdenk8bde7f72003-06-27 21:31:46 +0000117 /* Requested region is not available so see if the */
118 /* next region is available. Set 'start_address' */
119 /* to the next region and call 'VirtualQuery()' */
120 /* again. */
wdenk217c9da2002-10-25 20:35:49 +0000121
122 start_address = (char*)info.BaseAddress + info.RegionSize;
123
wdenk8bde7f72003-06-27 21:31:46 +0000124 /* Make sure we start looking for the next region */
125 /* on the *next* 64K boundary. Otherwise, even if */
126 /* the new region is free according to */
127 /* 'VirtualQuery()', the subsequent call to */
128 /* 'VirtualAlloc()' (which follows the call to */
129 /* this routine in 'wsbrk()') will round *down* */
130 /* the requested address to a 64K boundary which */
131 /* we already know is an address in the */
132 /* unavailable region. Thus, the subsequent call */
133 /* to 'VirtualAlloc()' will fail and bring us back */
134 /* here, causing us to go into an infinite loop. */
wdenk217c9da2002-10-25 20:35:49 +0000135
136 start_address =
137 (void *) AlignPage64K((unsigned long) start_address);
138 }
139 }
140 return NULL;
141
142}
143
144
145void* wsbrk (long size)
146{
147 void* tmp;
148 if (size > 0)
149 {
150 if (gAddressBase == 0)
151 {
152 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
153 gNextAddress = gAddressBase =
154 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
155 MEM_RESERVE, PAGE_NOACCESS);
156 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
157gAllocatedSize))
158 {
159 long new_size = max (NEXT_SIZE, AlignPage (size));
160 void* new_address = (void*)(gAddressBase+gAllocatedSize);
161 do
162 {
163 new_address = findRegion (new_address, new_size);
164
Heinrich Schuchardta874cac2017-11-10 21:46:34 +0100165 if (!new_address)
wdenk217c9da2002-10-25 20:35:49 +0000166 return (void*)-1;
167
168 gAddressBase = gNextAddress =
169 (unsigned int)VirtualAlloc (new_address, new_size,
170 MEM_RESERVE, PAGE_NOACCESS);
wdenk8bde7f72003-06-27 21:31:46 +0000171 /* repeat in case of race condition */
172 /* The region that we found has been snagged */
173 /* by another thread */
wdenk217c9da2002-10-25 20:35:49 +0000174 }
175 while (gAddressBase == 0);
176
177 assert (new_address == (void*)gAddressBase);
178
179 gAllocatedSize = new_size;
180
181 if (!makeGmListElement ((void*)gAddressBase))
182 return (void*)-1;
183 }
184 if ((size + gNextAddress) > AlignPage (gNextAddress))
185 {
186 void* res;
187 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
188 (size + gNextAddress -
189 AlignPage (gNextAddress)),
190 MEM_COMMIT, PAGE_READWRITE);
Heinrich Schuchardta874cac2017-11-10 21:46:34 +0100191 if (!res)
wdenk217c9da2002-10-25 20:35:49 +0000192 return (void*)-1;
193 }
194 tmp = (void*)gNextAddress;
195 gNextAddress = (unsigned int)tmp + size;
196 return tmp;
197 }
198 else if (size < 0)
199 {
200 unsigned int alignedGoal = AlignPage (gNextAddress + size);
201 /* Trim by releasing the virtual memory */
202 if (alignedGoal >= gAddressBase)
203 {
204 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
205 MEM_DECOMMIT);
206 gNextAddress = gNextAddress + size;
207 return (void*)gNextAddress;
208 }
209 else
210 {
211 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
212 MEM_DECOMMIT);
213 gNextAddress = gAddressBase;
214 return (void*)-1;
215 }
216 }
217 else
218 {
219 return (void*)gNextAddress;
220 }
221}
222
223#endif
224
Simon Glassd93041a2014-07-10 22:23:25 -0600225
wdenk217c9da2002-10-25 20:35:49 +0000226
227/*
228 Type declarations
229*/
230
231
232struct malloc_chunk
233{
234 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
235 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
236 struct malloc_chunk* fd; /* double links -- used only if free. */
237 struct malloc_chunk* bk;
Joakim Tjernlund1ba91ba2010-10-14 08:51:34 +0200238} __attribute__((__may_alias__)) ;
wdenk217c9da2002-10-25 20:35:49 +0000239
240typedef struct malloc_chunk* mchunkptr;
241
242/*
243
244 malloc_chunk details:
245
246 (The following includes lightly edited explanations by Colin Plumb.)
247
248 Chunks of memory are maintained using a `boundary tag' method as
249 described in e.g., Knuth or Standish. (See the paper by Paul
250 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
251 survey of such techniques.) Sizes of free chunks are stored both
252 in the front of each chunk and at the end. This makes
253 consolidating fragmented chunks into bigger chunks very fast. The
254 size fields also hold bits representing whether chunks are free or
255 in use.
256
257 An allocated chunk looks like this:
258
259
260 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk8bde7f72003-06-27 21:31:46 +0000261 | Size of previous chunk, if allocated | |
262 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
263 | Size of chunk, in bytes |P|
wdenk217c9da2002-10-25 20:35:49 +0000264 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk8bde7f72003-06-27 21:31:46 +0000265 | User data starts here... .
266 . .
267 . (malloc_usable_space() bytes) .
268 . |
wdenk217c9da2002-10-25 20:35:49 +0000269nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk8bde7f72003-06-27 21:31:46 +0000270 | Size of chunk |
271 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000272
273
274 Where "chunk" is the front of the chunk for the purpose of most of
275 the malloc code, but "mem" is the pointer that is returned to the
276 user. "Nextchunk" is the beginning of the next contiguous chunk.
277
278 Chunks always begin on even word boundries, so the mem portion
279 (which is returned to the user) is also on an even word boundary, and
280 thus double-word aligned.
281
282 Free chunks are stored in circular doubly-linked lists, and look like this:
283
284 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk8bde7f72003-06-27 21:31:46 +0000285 | Size of previous chunk |
286 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000287 `head:' | Size of chunk, in bytes |P|
288 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk8bde7f72003-06-27 21:31:46 +0000289 | Forward pointer to next chunk in list |
290 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
291 | Back pointer to previous chunk in list |
292 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
293 | Unused space (may be 0 bytes long) .
294 . .
295 . |
Marek Bykowski9297e362020-04-29 18:23:07 +0200296
wdenk217c9da2002-10-25 20:35:49 +0000297nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
298 `foot:' | Size of chunk, in bytes |
wdenk8bde7f72003-06-27 21:31:46 +0000299 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000300
301 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
302 chunk size (which is always a multiple of two words), is an in-use
303 bit for the *previous* chunk. If that bit is *clear*, then the
304 word before the current chunk size contains the previous chunk
305 size, and can be used to find the front of the previous chunk.
306 (The very first chunk allocated always has this bit set,
307 preventing access to non-existent (or non-owned) memory.)
308
309 Note that the `foot' of the current chunk is actually represented
310 as the prev_size of the NEXT chunk. (This makes it easier to
311 deal with alignments etc).
312
313 The two exceptions to all this are
314
315 1. The special chunk `top', which doesn't bother using the
wdenk8bde7f72003-06-27 21:31:46 +0000316 trailing size field since there is no
317 next contiguous chunk that would have to index off it. (After
318 initialization, `top' is forced to always exist. If it would
319 become less than MINSIZE bytes long, it is replenished via
320 malloc_extend_top.)
wdenk217c9da2002-10-25 20:35:49 +0000321
322 2. Chunks allocated via mmap, which have the second-lowest-order
wdenk8bde7f72003-06-27 21:31:46 +0000323 bit (IS_MMAPPED) set in their size fields. Because they are
324 never merged or traversed from any other chunk, they have no
325 foot size or inuse information.
wdenk217c9da2002-10-25 20:35:49 +0000326
327 Available chunks are kept in any of several places (all declared below):
328
329 * `av': An array of chunks serving as bin headers for consolidated
330 chunks. Each bin is doubly linked. The bins are approximately
331 proportionally (log) spaced. There are a lot of these bins
332 (128). This may look excessive, but works very well in
333 practice. All procedures maintain the invariant that no
334 consolidated chunk physically borders another one. Chunks in
335 bins are kept in size order, with ties going to the
336 approximately least recently used chunk.
337
338 The chunks in each bin are maintained in decreasing sorted order by
339 size. This is irrelevant for the small bins, which all contain
340 the same-sized chunks, but facilitates best-fit allocation for
341 larger chunks. (These lists are just sequential. Keeping them in
342 order almost never requires enough traversal to warrant using
343 fancier ordered data structures.) Chunks of the same size are
344 linked with the most recently freed at the front, and allocations
345 are taken from the back. This results in LRU or FIFO allocation
346 order, which tends to give each chunk an equal opportunity to be
347 consolidated with adjacent freed chunks, resulting in larger free
348 chunks and less fragmentation.
349
350 * `top': The top-most available chunk (i.e., the one bordering the
351 end of available memory) is treated specially. It is never
352 included in any bin, is used only if no other chunk is
353 available, and is released back to the system if it is very
354 large (see M_TRIM_THRESHOLD).
355
356 * `last_remainder': A bin holding only the remainder of the
357 most recently split (non-top) chunk. This bin is checked
358 before other non-fitting chunks, so as to provide better
359 locality for runs of sequentially allocated chunks.
360
361 * Implicitly, through the host system's memory mapping tables.
362 If supported, requests greater than a threshold are usually
363 serviced via calls to mmap, and then later released via munmap.
364
365*/
Simon Glassd93041a2014-07-10 22:23:25 -0600366
wdenk217c9da2002-10-25 20:35:49 +0000367/* sizes, alignments */
368
369#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
370#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
371#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
372#define MINSIZE (sizeof(struct malloc_chunk))
373
374/* conversion from malloc headers to user pointers, and back */
375
376#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
377#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
378
379/* pad request bytes into a usable size */
380
381#define request2size(req) \
382 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
383 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
384 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
385
386/* Check if m has acceptable alignment */
387
388#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
389
390
Simon Glassd93041a2014-07-10 22:23:25 -0600391
wdenk217c9da2002-10-25 20:35:49 +0000392
393/*
394 Physical chunk operations
395*/
396
397
398/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
399
400#define PREV_INUSE 0x1
401
402/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
403
404#define IS_MMAPPED 0x2
405
406/* Bits to mask off when extracting size */
407
408#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
409
410
411/* Ptr to next physical malloc_chunk. */
412
413#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
414
415/* Ptr to previous physical malloc_chunk */
416
417#define prev_chunk(p)\
418 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
419
420
421/* Treat space at ptr + offset as a chunk */
422
423#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
424
425
Simon Glassd93041a2014-07-10 22:23:25 -0600426
wdenk217c9da2002-10-25 20:35:49 +0000427
428/*
429 Dealing with use bits
430*/
431
432/* extract p's inuse bit */
433
434#define inuse(p)\
435((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
436
437/* extract inuse bit of previous chunk */
438
439#define prev_inuse(p) ((p)->size & PREV_INUSE)
440
441/* check for mmap()'ed chunk */
442
443#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
444
445/* set/clear chunk as in use without otherwise disturbing */
446
447#define set_inuse(p)\
448((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
449
450#define clear_inuse(p)\
451((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
452
453/* check/set/clear inuse bits in known places */
454
455#define inuse_bit_at_offset(p, s)\
456 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
457
458#define set_inuse_bit_at_offset(p, s)\
459 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
460
461#define clear_inuse_bit_at_offset(p, s)\
462 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
463
464
Simon Glassd93041a2014-07-10 22:23:25 -0600465
wdenk217c9da2002-10-25 20:35:49 +0000466
467/*
468 Dealing with size fields
469*/
470
471/* Get size, ignoring use bits */
472
473#define chunksize(p) ((p)->size & ~(SIZE_BITS))
474
475/* Set size at head, without disturbing its use bit */
476
477#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
478
479/* Set size/use ignoring previous bits in header */
480
481#define set_head(p, s) ((p)->size = (s))
482
483/* Set size at footer (only when chunk is not in use) */
484
485#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
486
487
Simon Glassd93041a2014-07-10 22:23:25 -0600488
wdenk217c9da2002-10-25 20:35:49 +0000489
490
491/*
492 Bins
493
494 The bins, `av_' are an array of pairs of pointers serving as the
495 heads of (initially empty) doubly-linked lists of chunks, laid out
496 in a way so that each pair can be treated as if it were in a
497 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
498 and chunks are the same).
499
500 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
501 8 bytes apart. Larger bins are approximately logarithmically
502 spaced. (See the table below.) The `av_' array is never mentioned
503 directly in the code, but instead via bin access macros.
504
505 Bin layout:
506
507 64 bins of size 8
508 32 bins of size 64
509 16 bins of size 512
510 8 bins of size 4096
511 4 bins of size 32768
512 2 bins of size 262144
513 1 bin of size what's left
514
515 There is actually a little bit of slop in the numbers in bin_index
516 for the sake of speed. This makes no difference elsewhere.
517
518 The special chunks `top' and `last_remainder' get their own bins,
519 (this is implemented via yet more trickery with the av_ array),
520 although `top' is never properly linked to its bin since it is
521 always handled specially.
522
523*/
524
525#define NAV 128 /* number of bins */
526
527typedef struct malloc_chunk* mbinptr;
528
529/* access macros */
530
531#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
532#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
533#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
534
535/*
536 The first 2 bins are never indexed. The corresponding av_ cells are instead
537 used for bookkeeping. This is not to save space, but to simplify
538 indexing, maintain locality, and avoid some initialization tests.
539*/
540
Stefan Roesef2302d42008-08-06 14:05:38 +0200541#define top (av_[2]) /* The topmost chunk */
wdenk217c9da2002-10-25 20:35:49 +0000542#define last_remainder (bin_at(1)) /* remainder from last split */
543
544
545/*
546 Because top initially points to its own bin with initial
547 zero size, thus forcing extension on the first malloc request,
548 we avoid having any special code in malloc to check whether
549 it even exists yet. But we still need to in malloc_extend_top.
550*/
551
552#define initial_top ((mchunkptr)(bin_at(0)))
553
554/* Helper macro to initialize bins */
555
556#define IAV(i) bin_at(i), bin_at(i)
557
558static mbinptr av_[NAV * 2 + 2] = {
Kim Phillips199adb62012-10-29 13:34:32 +0000559 NULL, NULL,
wdenk217c9da2002-10-25 20:35:49 +0000560 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
561 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
562 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
563 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
564 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
565 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
566 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
567 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
568 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
569 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
570 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
571 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
572 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
573 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
574 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
575 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
576};
577
Marek Bykowski9297e362020-04-29 18:23:07 +0200578#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
579static void malloc_init(void);
580#endif
581
Peter Tyser5e93bd12009-08-21 23:05:19 -0500582ulong mem_malloc_start = 0;
583ulong mem_malloc_end = 0;
584ulong mem_malloc_brk = 0;
585
Simon Glass62d63832022-09-06 20:27:00 -0600586static bool malloc_testing; /* enable test mode */
587static int malloc_max_allocs; /* return NULL after this many calls to malloc() */
588
Peter Tyser5e93bd12009-08-21 23:05:19 -0500589void *sbrk(ptrdiff_t increment)
590{
591 ulong old = mem_malloc_brk;
592 ulong new = old + increment;
593
Kumar Gala6163f5b2010-11-15 18:41:43 -0600594 /*
595 * if we are giving memory back make sure we clear it out since
596 * we set MORECORE_CLEARS to 1
597 */
598 if (increment < 0)
599 memset((void *)new, 0, -increment);
600
Peter Tyser5e93bd12009-08-21 23:05:19 -0500601 if ((new < mem_malloc_start) || (new > mem_malloc_end))
karl.beldan@gmail.comae30b8c2010-04-06 22:18:08 +0200602 return (void *)MORECORE_FAILURE;
Peter Tyser5e93bd12009-08-21 23:05:19 -0500603
604 mem_malloc_brk = new;
605
606 return (void *)old;
607}
wdenk217c9da2002-10-25 20:35:49 +0000608
Peter Tyserd4e8ada2009-08-21 23:05:21 -0500609void mem_malloc_init(ulong start, ulong size)
610{
611 mem_malloc_start = start;
612 mem_malloc_end = start + size;
613 mem_malloc_brk = start;
614
Marek Bykowski9297e362020-04-29 18:23:07 +0200615#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
616 malloc_init();
617#endif
618
Thierry Reding868de512014-08-26 17:34:22 +0200619 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
620 mem_malloc_end);
Shengyu Quc9db9a22023-08-25 00:25:19 +0800621#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
Przemyslaw Marczak0aa8a4a2015-03-04 14:01:24 +0100622 memset((void *)mem_malloc_start, 0x0, size);
623#endif
Peter Tyserd4e8ada2009-08-21 23:05:21 -0500624}
Peter Tyserd4e8ada2009-08-21 23:05:21 -0500625
wdenk217c9da2002-10-25 20:35:49 +0000626/* field-extraction macros */
627
628#define first(b) ((b)->fd)
629#define last(b) ((b)->bk)
630
631/*
632 Indexing into bins
633*/
634
635#define bin_index(sz) \
636(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
637 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
638 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
639 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
640 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
641 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
wdenk8bde7f72003-06-27 21:31:46 +0000642 126)
wdenk217c9da2002-10-25 20:35:49 +0000643/*
644 bins for chunks < 512 are all spaced 8 bytes apart, and hold
645 identically sized chunks. This is exploited in malloc.
646*/
647
648#define MAX_SMALLBIN 63
649#define MAX_SMALLBIN_SIZE 512
650#define SMALLBIN_WIDTH 8
651
652#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
653
654/*
655 Requests are `small' if both the corresponding and the next bin are small
656*/
657
658#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
659
Simon Glassd93041a2014-07-10 22:23:25 -0600660
wdenk217c9da2002-10-25 20:35:49 +0000661
662/*
663 To help compensate for the large number of bins, a one-level index
664 structure is used for bin-by-bin searching. `binblocks' is a
665 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
666 have any (possibly) non-empty bins, so they can be skipped over
667 all at once during during traversals. The bits are NOT always
668 cleared as soon as all bins in a block are empty, but instead only
669 when all are noticed to be empty during traversal in malloc.
670*/
671
672#define BINBLOCKWIDTH 4 /* bins per block */
673
Stefan Roesef2302d42008-08-06 14:05:38 +0200674#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
675#define binblocks_w (av_[1])
wdenk217c9da2002-10-25 20:35:49 +0000676
677/* bin<->block macros */
678
679#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
Stefan Roesef2302d42008-08-06 14:05:38 +0200680#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
681#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
wdenk217c9da2002-10-25 20:35:49 +0000682
683
Simon Glassd93041a2014-07-10 22:23:25 -0600684
wdenk217c9da2002-10-25 20:35:49 +0000685
686
687/* Other static bookkeeping data */
688
689/* variables holding tunable values */
690
691static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
692static unsigned long top_pad = DEFAULT_TOP_PAD;
693static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
694static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
695
696/* The first value returned from sbrk */
697static char* sbrk_base = (char*)(-1);
698
699/* The maximum memory obtained from system via sbrk */
700static unsigned long max_sbrked_mem = 0;
701
702/* The maximum via either sbrk or mmap */
703static unsigned long max_total_mem = 0;
704
705/* internal working copy of mallinfo */
706static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
707
708/* The total memory obtained from system via sbrk */
709#define sbrked_mem (current_mallinfo.arena)
710
711/* Tracking mmaps */
712
Wolfgang Denkea882ba2010-06-20 23:33:59 +0200713#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +0000714static unsigned int n_mmaps = 0;
Wolfgang Denkea882ba2010-06-20 23:33:59 +0200715#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +0000716static unsigned long mmapped_mem = 0;
717#if HAVE_MMAP
718static unsigned int max_n_mmaps = 0;
719static unsigned long max_mmapped_mem = 0;
720#endif
721
Marek Bykowski9297e362020-04-29 18:23:07 +0200722#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
723static void malloc_init(void)
724{
725 int i, j;
Simon Glassd93041a2014-07-10 22:23:25 -0600726
Marek Bykowski9297e362020-04-29 18:23:07 +0200727 debug("bins (av_ array) are at %p\n", (void *)av_);
728
729 av_[0] = NULL; av_[1] = NULL;
730 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
731 av_[i] = bin_at(j - 2);
732 av_[i + 1] = bin_at(j - 2);
733
734 /* Just print the first few bins so that
735 * we can see there are alright.
736 */
737 if (i < 10)
738 debug("av_[%d]=%lx av_[%d]=%lx\n",
739 i, (ulong)av_[i],
740 i + 1, (ulong)av_[i + 1]);
741 }
742
743 /* Init the static bookkeeping as well */
744 sbrk_base = (char *)(-1);
745 max_sbrked_mem = 0;
746 max_total_mem = 0;
747#ifdef DEBUG
748 memset((void *)&current_mallinfo, 0, sizeof(struct mallinfo));
749#endif
750}
751#endif
wdenk217c9da2002-10-25 20:35:49 +0000752
753/*
754 Debugging support
755*/
756
757#ifdef DEBUG
758
759
760/*
761 These routines make a number of assertions about the states
762 of data structures that should be true at all times. If any
763 are not true, it's very likely that a user program has somehow
764 trashed memory. (It's also possible that there is a coding error
765 in malloc. In which case, please report it!)
766*/
767
768#if __STD_C
769static void do_check_chunk(mchunkptr p)
770#else
771static void do_check_chunk(p) mchunkptr p;
772#endif
773{
wdenk217c9da2002-10-25 20:35:49 +0000774 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000775
776 /* No checkable chunk is mmapped */
777 assert(!chunk_is_mmapped(p));
778
779 /* Check for legal address ... */
780 assert((char*)p >= sbrk_base);
781 if (p != top)
782 assert((char*)p + sz <= (char*)top);
783 else
784 assert((char*)p + sz <= sbrk_base + sbrked_mem);
785
786}
787
788
789#if __STD_C
790static void do_check_free_chunk(mchunkptr p)
791#else
792static void do_check_free_chunk(p) mchunkptr p;
793#endif
794{
795 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000796 mchunkptr next = chunk_at_offset(p, sz);
wdenk217c9da2002-10-25 20:35:49 +0000797
798 do_check_chunk(p);
799
800 /* Check whether it claims to be free ... */
801 assert(!inuse(p));
802
803 /* Unless a special marker, must have OK fields */
804 if ((long)sz >= (long)MINSIZE)
805 {
806 assert((sz & MALLOC_ALIGN_MASK) == 0);
807 assert(aligned_OK(chunk2mem(p)));
808 /* ... matching footer field */
809 assert(next->prev_size == sz);
810 /* ... and is fully consolidated */
811 assert(prev_inuse(p));
812 assert (next == top || inuse(next));
813
814 /* ... and has minimally sane links */
815 assert(p->fd->bk == p);
816 assert(p->bk->fd == p);
817 }
818 else /* markers are always of size SIZE_SZ */
819 assert(sz == SIZE_SZ);
820}
821
822#if __STD_C
823static void do_check_inuse_chunk(mchunkptr p)
824#else
825static void do_check_inuse_chunk(p) mchunkptr p;
826#endif
827{
828 mchunkptr next = next_chunk(p);
829 do_check_chunk(p);
830
831 /* Check whether it claims to be in use ... */
832 assert(inuse(p));
833
834 /* ... and is surrounded by OK chunks.
835 Since more things can be checked with free chunks than inuse ones,
836 if an inuse chunk borders them and debug is on, it's worth doing them.
837 */
838 if (!prev_inuse(p))
839 {
840 mchunkptr prv = prev_chunk(p);
841 assert(next_chunk(prv) == p);
842 do_check_free_chunk(prv);
843 }
844 if (next == top)
845 {
846 assert(prev_inuse(next));
847 assert(chunksize(next) >= MINSIZE);
848 }
849 else if (!inuse(next))
850 do_check_free_chunk(next);
851
852}
853
854#if __STD_C
855static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
856#else
857static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
858#endif
859{
wdenk217c9da2002-10-25 20:35:49 +0000860 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
861 long room = sz - s;
wdenk217c9da2002-10-25 20:35:49 +0000862
863 do_check_inuse_chunk(p);
864
865 /* Legal size ... */
866 assert((long)sz >= (long)MINSIZE);
867 assert((sz & MALLOC_ALIGN_MASK) == 0);
868 assert(room >= 0);
869 assert(room < (long)MINSIZE);
870
871 /* ... and alignment */
872 assert(aligned_OK(chunk2mem(p)));
873
874
875 /* ... and was allocated at front of an available chunk */
876 assert(prev_inuse(p));
877
878}
879
880
881#define check_free_chunk(P) do_check_free_chunk(P)
882#define check_inuse_chunk(P) do_check_inuse_chunk(P)
883#define check_chunk(P) do_check_chunk(P)
884#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
885#else
886#define check_free_chunk(P)
887#define check_inuse_chunk(P)
888#define check_chunk(P)
889#define check_malloced_chunk(P,N)
890#endif
891
Simon Glassd93041a2014-07-10 22:23:25 -0600892
wdenk217c9da2002-10-25 20:35:49 +0000893
894/*
895 Macro-based internal utilities
896*/
897
898
899/*
900 Linking chunks in bin lists.
901 Call these only with variables, not arbitrary expressions, as arguments.
902*/
903
904/*
905 Place chunk p of size s in its bin, in size order,
906 putting it ahead of others of same size.
907*/
908
909
910#define frontlink(P, S, IDX, BK, FD) \
911{ \
912 if (S < MAX_SMALLBIN_SIZE) \
913 { \
914 IDX = smallbin_index(S); \
915 mark_binblock(IDX); \
916 BK = bin_at(IDX); \
917 FD = BK->fd; \
918 P->bk = BK; \
919 P->fd = FD; \
920 FD->bk = BK->fd = P; \
921 } \
922 else \
923 { \
924 IDX = bin_index(S); \
925 BK = bin_at(IDX); \
926 FD = BK->fd; \
927 if (FD == BK) mark_binblock(IDX); \
928 else \
929 { \
930 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
931 BK = FD->bk; \
932 } \
933 P->bk = BK; \
934 P->fd = FD; \
935 FD->bk = BK->fd = P; \
936 } \
937}
938
939
940/* take a chunk off a list */
941
942#define unlink(P, BK, FD) \
943{ \
944 BK = P->bk; \
945 FD = P->fd; \
946 FD->bk = BK; \
947 BK->fd = FD; \
948} \
949
950/* Place p as the last remainder */
951
952#define link_last_remainder(P) \
953{ \
954 last_remainder->fd = last_remainder->bk = P; \
955 P->fd = P->bk = last_remainder; \
956}
957
958/* Clear the last_remainder bin */
959
960#define clear_last_remainder \
961 (last_remainder->fd = last_remainder->bk = last_remainder)
962
963
Simon Glassd93041a2014-07-10 22:23:25 -0600964
wdenk217c9da2002-10-25 20:35:49 +0000965
966
967/* Routines dealing with mmap(). */
968
969#if HAVE_MMAP
970
971#if __STD_C
972static mchunkptr mmap_chunk(size_t size)
973#else
974static mchunkptr mmap_chunk(size) size_t size;
975#endif
976{
977 size_t page_mask = malloc_getpagesize - 1;
978 mchunkptr p;
979
980#ifndef MAP_ANONYMOUS
981 static int fd = -1;
982#endif
983
984 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
985
986 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
987 * there is no following chunk whose prev_size field could be used.
988 */
989 size = (size + SIZE_SZ + page_mask) & ~page_mask;
990
991#ifdef MAP_ANONYMOUS
992 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
993 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
994#else /* !MAP_ANONYMOUS */
995 if (fd < 0)
996 {
997 fd = open("/dev/zero", O_RDWR);
998 if(fd < 0) return 0;
999 }
1000 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1001#endif
1002
1003 if(p == (mchunkptr)-1) return 0;
1004
1005 n_mmaps++;
1006 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1007
1008 /* We demand that eight bytes into a page must be 8-byte aligned. */
1009 assert(aligned_OK(chunk2mem(p)));
1010
1011 /* The offset to the start of the mmapped region is stored
1012 * in the prev_size field of the chunk; normally it is zero,
1013 * but that can be changed in memalign().
1014 */
1015 p->prev_size = 0;
1016 set_head(p, size|IS_MMAPPED);
1017
1018 mmapped_mem += size;
1019 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1020 max_mmapped_mem = mmapped_mem;
1021 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1022 max_total_mem = mmapped_mem + sbrked_mem;
1023 return p;
1024}
1025
1026#if __STD_C
1027static void munmap_chunk(mchunkptr p)
1028#else
1029static void munmap_chunk(p) mchunkptr p;
1030#endif
1031{
1032 INTERNAL_SIZE_T size = chunksize(p);
1033 int ret;
1034
1035 assert (chunk_is_mmapped(p));
1036 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1037 assert((n_mmaps > 0));
1038 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1039
1040 n_mmaps--;
1041 mmapped_mem -= (size + p->prev_size);
1042
1043 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1044
1045 /* munmap returns non-zero on failure */
1046 assert(ret == 0);
1047}
1048
1049#if HAVE_MREMAP
1050
1051#if __STD_C
1052static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1053#else
1054static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1055#endif
1056{
1057 size_t page_mask = malloc_getpagesize - 1;
1058 INTERNAL_SIZE_T offset = p->prev_size;
1059 INTERNAL_SIZE_T size = chunksize(p);
1060 char *cp;
1061
1062 assert (chunk_is_mmapped(p));
1063 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1064 assert((n_mmaps > 0));
1065 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1066
1067 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1068 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1069
1070 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1071
1072 if (cp == (char *)-1) return 0;
1073
1074 p = (mchunkptr)(cp + offset);
1075
1076 assert(aligned_OK(chunk2mem(p)));
1077
1078 assert((p->prev_size == offset));
1079 set_head(p, (new_size - offset)|IS_MMAPPED);
1080
1081 mmapped_mem -= size + offset;
1082 mmapped_mem += new_size;
1083 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1084 max_mmapped_mem = mmapped_mem;
1085 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1086 max_total_mem = mmapped_mem + sbrked_mem;
1087 return p;
1088}
1089
1090#endif /* HAVE_MREMAP */
1091
1092#endif /* HAVE_MMAP */
1093
wdenk217c9da2002-10-25 20:35:49 +00001094/*
1095 Extend the top-most chunk by obtaining memory from system.
1096 Main interface to sbrk (but see also malloc_trim).
1097*/
1098
1099#if __STD_C
1100static void malloc_extend_top(INTERNAL_SIZE_T nb)
1101#else
1102static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1103#endif
1104{
1105 char* brk; /* return value from sbrk */
1106 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1107 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1108 char* new_brk; /* return of 2nd sbrk call */
1109 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1110
1111 mchunkptr old_top = top; /* Record state of old top */
1112 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1113 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1114
1115 /* Pad request with top_pad plus minimal overhead */
1116
1117 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1118 unsigned long pagesz = malloc_getpagesize;
1119
1120 /* If not the first time through, round to preserve page boundary */
1121 /* Otherwise, we need to correct to a page size below anyway. */
1122 /* (We also correct below if an intervening foreign sbrk call.) */
1123
1124 if (sbrk_base != (char*)(-1))
1125 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1126
1127 brk = (char*)(MORECORE (sbrk_size));
1128
1129 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1130 if (brk == (char*)(MORECORE_FAILURE) ||
1131 (brk < old_end && old_top != initial_top))
1132 return;
1133
1134 sbrked_mem += sbrk_size;
1135
1136 if (brk == old_end) /* can just add bytes to current top */
1137 {
1138 top_size = sbrk_size + old_top_size;
1139 set_head(top, top_size | PREV_INUSE);
1140 }
1141 else
1142 {
1143 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1144 sbrk_base = brk;
1145 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1146 sbrked_mem += brk - (char*)old_end;
1147
1148 /* Guarantee alignment of first new chunk made from this space */
1149 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1150 if (front_misalign > 0)
1151 {
1152 correction = (MALLOC_ALIGNMENT) - front_misalign;
1153 brk += correction;
1154 }
1155 else
1156 correction = 0;
1157
1158 /* Guarantee the next brk will be at a page boundary */
1159
1160 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
wdenk8bde7f72003-06-27 21:31:46 +00001161 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
wdenk217c9da2002-10-25 20:35:49 +00001162
1163 /* Allocate correction */
1164 new_brk = (char*)(MORECORE (correction));
1165 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1166
1167 sbrked_mem += correction;
1168
1169 top = (mchunkptr)brk;
1170 top_size = new_brk - brk + correction;
1171 set_head(top, top_size | PREV_INUSE);
1172
1173 if (old_top != initial_top)
1174 {
1175
1176 /* There must have been an intervening foreign sbrk call. */
1177 /* A double fencepost is necessary to prevent consolidation */
1178
1179 /* If not enough space to do this, then user did something very wrong */
1180 if (old_top_size < MINSIZE)
1181 {
wdenk8bde7f72003-06-27 21:31:46 +00001182 set_head(top, PREV_INUSE); /* will force null return from malloc */
1183 return;
wdenk217c9da2002-10-25 20:35:49 +00001184 }
1185
1186 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1187 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1188 set_head_size(old_top, old_top_size);
1189 chunk_at_offset(old_top, old_top_size )->size =
wdenk8bde7f72003-06-27 21:31:46 +00001190 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001191 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
wdenk8bde7f72003-06-27 21:31:46 +00001192 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001193 /* If possible, release the rest. */
1194 if (old_top_size >= MINSIZE)
wdenk8bde7f72003-06-27 21:31:46 +00001195 fREe(chunk2mem(old_top));
wdenk217c9da2002-10-25 20:35:49 +00001196 }
1197 }
1198
1199 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1200 max_sbrked_mem = sbrked_mem;
1201 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1202 max_total_mem = mmapped_mem + sbrked_mem;
1203
1204 /* We always land on a page boundary */
1205 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1206}
1207
1208
Simon Glassd93041a2014-07-10 22:23:25 -06001209
wdenk217c9da2002-10-25 20:35:49 +00001210
1211/* Main public routines */
1212
1213
1214/*
1215 Malloc Algorthim:
1216
1217 The requested size is first converted into a usable form, `nb'.
1218 This currently means to add 4 bytes overhead plus possibly more to
1219 obtain 8-byte alignment and/or to obtain a size of at least
1220 MINSIZE (currently 16 bytes), the smallest allocatable size.
1221 (All fits are considered `exact' if they are within MINSIZE bytes.)
1222
1223 From there, the first successful of the following steps is taken:
1224
1225 1. The bin corresponding to the request size is scanned, and if
wdenk8bde7f72003-06-27 21:31:46 +00001226 a chunk of exactly the right size is found, it is taken.
wdenk217c9da2002-10-25 20:35:49 +00001227
1228 2. The most recently remaindered chunk is used if it is big
wdenk8bde7f72003-06-27 21:31:46 +00001229 enough. This is a form of (roving) first fit, used only in
1230 the absence of exact fits. Runs of consecutive requests use
1231 the remainder of the chunk used for the previous such request
1232 whenever possible. This limited use of a first-fit style
1233 allocation strategy tends to give contiguous chunks
1234 coextensive lifetimes, which improves locality and can reduce
1235 fragmentation in the long run.
wdenk217c9da2002-10-25 20:35:49 +00001236
1237 3. Other bins are scanned in increasing size order, using a
wdenk8bde7f72003-06-27 21:31:46 +00001238 chunk big enough to fulfill the request, and splitting off
1239 any remainder. This search is strictly by best-fit; i.e.,
1240 the smallest (with ties going to approximately the least
1241 recently used) chunk that fits is selected.
wdenk217c9da2002-10-25 20:35:49 +00001242
1243 4. If large enough, the chunk bordering the end of memory
wdenk8bde7f72003-06-27 21:31:46 +00001244 (`top') is split off. (This use of `top' is in accord with
1245 the best-fit search rule. In effect, `top' is treated as
1246 larger (and thus less well fitting) than any other available
1247 chunk since it can be extended to be as large as necessary
1248 (up to system limitations).
wdenk217c9da2002-10-25 20:35:49 +00001249
1250 5. If the request size meets the mmap threshold and the
wdenk8bde7f72003-06-27 21:31:46 +00001251 system supports mmap, and there are few enough currently
1252 allocated mmapped regions, and a call to mmap succeeds,
1253 the request is allocated via direct memory mapping.
wdenk217c9da2002-10-25 20:35:49 +00001254
1255 6. Otherwise, the top of memory is extended by
wdenk8bde7f72003-06-27 21:31:46 +00001256 obtaining more space from the system (normally using sbrk,
1257 but definable to anything else via the MORECORE macro).
1258 Memory is gathered from the system (in system page-sized
1259 units) in a way that allows chunks obtained across different
1260 sbrk calls to be consolidated, but does not require
1261 contiguous memory. Thus, it should be safe to intersperse
1262 mallocs with other sbrk calls.
wdenk217c9da2002-10-25 20:35:49 +00001263
1264
1265 All allocations are made from the the `lowest' part of any found
1266 chunk. (The implementation invariant is that prev_inuse is
1267 always true of any allocated chunk; i.e., that each allocated
1268 chunk borders either a previously allocated and still in-use chunk,
1269 or the base of its memory arena.)
1270
1271*/
1272
1273#if __STD_C
1274Void_t* mALLOc(size_t bytes)
1275#else
1276Void_t* mALLOc(bytes) size_t bytes;
1277#endif
1278{
1279 mchunkptr victim; /* inspected/selected chunk */
1280 INTERNAL_SIZE_T victim_size; /* its size */
1281 int idx; /* index for bin traversal */
1282 mbinptr bin; /* associated bin */
1283 mchunkptr remainder; /* remainder from a split */
1284 long remainder_size; /* its size */
1285 int remainder_index; /* its bin index */
1286 unsigned long block; /* block traverser bit */
1287 int startidx; /* first bin of a traversed block */
1288 mchunkptr fwd; /* misc temp for linking */
1289 mchunkptr bck; /* misc temp for linking */
1290 mbinptr q; /* misc temp */
1291
1292 INTERNAL_SIZE_T nb;
1293
Simon Glass3d6d5072023-09-26 08:14:27 -06001294#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Stephen Warrendeff6fb2016-03-05 10:30:53 -07001295 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
Simon Glassc9356be2014-11-10 17:16:43 -07001296 return malloc_simple(bytes);
Simon Glassd59476b2014-07-10 22:23:28 -06001297#endif
1298
Simon Glass62d63832022-09-06 20:27:00 -06001299 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) {
1300 if (--malloc_max_allocs < 0)
1301 return NULL;
1302 }
1303
Wolfgang Denk27405442010-01-15 11:20:10 +01001304 /* check if mem_malloc_init() was run */
1305 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1306 /* not initialized yet */
Kim Phillips199adb62012-10-29 13:34:32 +00001307 return NULL;
Wolfgang Denk27405442010-01-15 11:20:10 +01001308 }
1309
Kim Phillips199adb62012-10-29 13:34:32 +00001310 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001311
1312 nb = request2size(bytes); /* padded request size; */
1313
1314 /* Check for exact match in a bin */
1315
1316 if (is_small_request(nb)) /* Faster version for small requests */
1317 {
1318 idx = smallbin_index(nb);
1319
1320 /* No traversal or size check necessary for small bins. */
1321
1322 q = bin_at(idx);
1323 victim = last(q);
1324
1325 /* Also scan the next one, since it would have a remainder < MINSIZE */
1326 if (victim == q)
1327 {
1328 q = next_bin(q);
1329 victim = last(q);
1330 }
1331 if (victim != q)
1332 {
1333 victim_size = chunksize(victim);
1334 unlink(victim, bck, fwd);
1335 set_inuse_bit_at_offset(victim, victim_size);
1336 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001337 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001338 return chunk2mem(victim);
1339 }
1340
1341 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1342
1343 }
1344 else
1345 {
1346 idx = bin_index(nb);
1347 bin = bin_at(idx);
1348
1349 for (victim = last(bin); victim != bin; victim = victim->bk)
1350 {
1351 victim_size = chunksize(victim);
1352 remainder_size = victim_size - nb;
1353
1354 if (remainder_size >= (long)MINSIZE) /* too big */
1355 {
wdenk8bde7f72003-06-27 21:31:46 +00001356 --idx; /* adjust to rescan below after checking last remainder */
1357 break;
wdenk217c9da2002-10-25 20:35:49 +00001358 }
1359
1360 else if (remainder_size >= 0) /* exact fit */
1361 {
wdenk8bde7f72003-06-27 21:31:46 +00001362 unlink(victim, bck, fwd);
1363 set_inuse_bit_at_offset(victim, victim_size);
1364 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001365 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001366 return chunk2mem(victim);
wdenk217c9da2002-10-25 20:35:49 +00001367 }
1368 }
1369
1370 ++idx;
1371
1372 }
1373
1374 /* Try to use the last split-off remainder */
1375
1376 if ( (victim = last_remainder->fd) != last_remainder)
1377 {
1378 victim_size = chunksize(victim);
1379 remainder_size = victim_size - nb;
1380
1381 if (remainder_size >= (long)MINSIZE) /* re-split */
1382 {
1383 remainder = chunk_at_offset(victim, nb);
1384 set_head(victim, nb | PREV_INUSE);
1385 link_last_remainder(remainder);
1386 set_head(remainder, remainder_size | PREV_INUSE);
1387 set_foot(remainder, remainder_size);
1388 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001389 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001390 return chunk2mem(victim);
1391 }
1392
1393 clear_last_remainder;
1394
1395 if (remainder_size >= 0) /* exhaust */
1396 {
1397 set_inuse_bit_at_offset(victim, victim_size);
1398 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001399 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001400 return chunk2mem(victim);
1401 }
1402
1403 /* Else place in bin */
1404
1405 frontlink(victim, victim_size, remainder_index, bck, fwd);
1406 }
1407
1408 /*
1409 If there are any possibly nonempty big-enough blocks,
1410 search for best fitting chunk by scanning bins in blockwidth units.
1411 */
1412
Stefan Roesef2302d42008-08-06 14:05:38 +02001413 if ( (block = idx2binblock(idx)) <= binblocks_r)
wdenk217c9da2002-10-25 20:35:49 +00001414 {
1415
1416 /* Get to the first marked block */
1417
Stefan Roesef2302d42008-08-06 14:05:38 +02001418 if ( (block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001419 {
1420 /* force to an even block boundary */
1421 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1422 block <<= 1;
Stefan Roesef2302d42008-08-06 14:05:38 +02001423 while ((block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001424 {
wdenk8bde7f72003-06-27 21:31:46 +00001425 idx += BINBLOCKWIDTH;
1426 block <<= 1;
wdenk217c9da2002-10-25 20:35:49 +00001427 }
1428 }
1429
1430 /* For each possibly nonempty block ... */
1431 for (;;)
1432 {
1433 startidx = idx; /* (track incomplete blocks) */
1434 q = bin = bin_at(idx);
1435
1436 /* For each bin in this block ... */
1437 do
1438 {
wdenk8bde7f72003-06-27 21:31:46 +00001439 /* Find and use first big enough chunk ... */
wdenk217c9da2002-10-25 20:35:49 +00001440
wdenk8bde7f72003-06-27 21:31:46 +00001441 for (victim = last(bin); victim != bin; victim = victim->bk)
1442 {
1443 victim_size = chunksize(victim);
1444 remainder_size = victim_size - nb;
wdenk217c9da2002-10-25 20:35:49 +00001445
wdenk8bde7f72003-06-27 21:31:46 +00001446 if (remainder_size >= (long)MINSIZE) /* split */
1447 {
1448 remainder = chunk_at_offset(victim, nb);
1449 set_head(victim, nb | PREV_INUSE);
1450 unlink(victim, bck, fwd);
1451 link_last_remainder(remainder);
1452 set_head(remainder, remainder_size | PREV_INUSE);
1453 set_foot(remainder, remainder_size);
1454 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001455 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001456 return chunk2mem(victim);
1457 }
wdenk217c9da2002-10-25 20:35:49 +00001458
wdenk8bde7f72003-06-27 21:31:46 +00001459 else if (remainder_size >= 0) /* take */
1460 {
1461 set_inuse_bit_at_offset(victim, victim_size);
1462 unlink(victim, bck, fwd);
1463 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001464 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001465 return chunk2mem(victim);
1466 }
wdenk217c9da2002-10-25 20:35:49 +00001467
wdenk8bde7f72003-06-27 21:31:46 +00001468 }
wdenk217c9da2002-10-25 20:35:49 +00001469
1470 bin = next_bin(bin);
1471
1472 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1473
1474 /* Clear out the block bit. */
1475
1476 do /* Possibly backtrack to try to clear a partial block */
1477 {
wdenk8bde7f72003-06-27 21:31:46 +00001478 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1479 {
Stefan Roesef2302d42008-08-06 14:05:38 +02001480 av_[1] = (mbinptr)(binblocks_r & ~block);
wdenk8bde7f72003-06-27 21:31:46 +00001481 break;
1482 }
1483 --startidx;
wdenk217c9da2002-10-25 20:35:49 +00001484 q = prev_bin(q);
1485 } while (first(q) == q);
1486
1487 /* Get to the next possibly nonempty block */
1488
Stefan Roesef2302d42008-08-06 14:05:38 +02001489 if ( (block <<= 1) <= binblocks_r && (block != 0) )
wdenk217c9da2002-10-25 20:35:49 +00001490 {
Stefan Roesef2302d42008-08-06 14:05:38 +02001491 while ((block & binblocks_r) == 0)
wdenk8bde7f72003-06-27 21:31:46 +00001492 {
1493 idx += BINBLOCKWIDTH;
1494 block <<= 1;
1495 }
wdenk217c9da2002-10-25 20:35:49 +00001496 }
1497 else
wdenk8bde7f72003-06-27 21:31:46 +00001498 break;
wdenk217c9da2002-10-25 20:35:49 +00001499 }
1500 }
1501
1502
1503 /* Try to use top chunk */
1504
1505 /* Require that there be a remainder, ensuring top always exists */
1506 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1507 {
1508
1509#if HAVE_MMAP
1510 /* If big and would otherwise need to extend, try to use mmap instead */
1511 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
Heinrich Schuchardta874cac2017-11-10 21:46:34 +01001512 (victim = mmap_chunk(nb)))
Sean Andersonbdaeea12022-03-23 14:04:49 -04001513 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001514 return chunk2mem(victim);
1515#endif
1516
1517 /* Try to extend */
1518 malloc_extend_top(nb);
1519 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
Kim Phillips199adb62012-10-29 13:34:32 +00001520 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001521 }
1522
1523 victim = top;
1524 set_head(victim, nb | PREV_INUSE);
1525 top = chunk_at_offset(victim, nb);
1526 set_head(top, remainder_size | PREV_INUSE);
1527 check_malloced_chunk(victim, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001528 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001529 return chunk2mem(victim);
1530
1531}
1532
1533
Simon Glassd93041a2014-07-10 22:23:25 -06001534
wdenk217c9da2002-10-25 20:35:49 +00001535
1536/*
1537
1538 free() algorithm :
1539
1540 cases:
1541
1542 1. free(0) has no effect.
1543
1544 2. If the chunk was allocated via mmap, it is release via munmap().
1545
1546 3. If a returned chunk borders the current high end of memory,
wdenk8bde7f72003-06-27 21:31:46 +00001547 it is consolidated into the top, and if the total unused
1548 topmost memory exceeds the trim threshold, malloc_trim is
1549 called.
wdenk217c9da2002-10-25 20:35:49 +00001550
1551 4. Other chunks are consolidated as they arrive, and
wdenk8bde7f72003-06-27 21:31:46 +00001552 placed in corresponding bins. (This includes the case of
1553 consolidating with the current `last_remainder').
wdenk217c9da2002-10-25 20:35:49 +00001554
1555*/
1556
1557
1558#if __STD_C
1559void fREe(Void_t* mem)
1560#else
1561void fREe(mem) Void_t* mem;
1562#endif
1563{
1564 mchunkptr p; /* chunk corresponding to mem */
1565 INTERNAL_SIZE_T hd; /* its head field */
1566 INTERNAL_SIZE_T sz; /* its size */
1567 int idx; /* its bin index */
1568 mchunkptr next; /* next contiguous chunk */
1569 INTERNAL_SIZE_T nextsz; /* its size */
1570 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1571 mchunkptr bck; /* misc temp for linking */
1572 mchunkptr fwd; /* misc temp for linking */
1573 int islr; /* track whether merging with last_remainder */
1574
Simon Glass3d6d5072023-09-26 08:14:27 -06001575#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glassd59476b2014-07-10 22:23:28 -06001576 /* free() is a no-op - all the memory will be freed on relocation */
Sean Andersonbdaeea12022-03-23 14:04:49 -04001577 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1578 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
Simon Glassd59476b2014-07-10 22:23:28 -06001579 return;
Sean Andersonbdaeea12022-03-23 14:04:49 -04001580 }
Simon Glassd59476b2014-07-10 22:23:28 -06001581#endif
1582
Kim Phillips199adb62012-10-29 13:34:32 +00001583 if (mem == NULL) /* free(0) has no effect */
wdenk217c9da2002-10-25 20:35:49 +00001584 return;
1585
1586 p = mem2chunk(mem);
1587 hd = p->size;
1588
1589#if HAVE_MMAP
1590 if (hd & IS_MMAPPED) /* release mmapped memory. */
1591 {
1592 munmap_chunk(p);
1593 return;
1594 }
1595#endif
1596
1597 check_inuse_chunk(p);
1598
1599 sz = hd & ~PREV_INUSE;
1600 next = chunk_at_offset(p, sz);
1601 nextsz = chunksize(next);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001602 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
wdenk217c9da2002-10-25 20:35:49 +00001603
1604 if (next == top) /* merge with top */
1605 {
1606 sz += nextsz;
1607
1608 if (!(hd & PREV_INUSE)) /* consolidate backward */
1609 {
1610 prevsz = p->prev_size;
1611 p = chunk_at_offset(p, -((long) prevsz));
1612 sz += prevsz;
1613 unlink(p, bck, fwd);
1614 }
1615
1616 set_head(p, sz | PREV_INUSE);
1617 top = p;
1618 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1619 malloc_trim(top_pad);
1620 return;
1621 }
1622
1623 set_head(next, nextsz); /* clear inuse bit */
1624
1625 islr = 0;
1626
1627 if (!(hd & PREV_INUSE)) /* consolidate backward */
1628 {
1629 prevsz = p->prev_size;
1630 p = chunk_at_offset(p, -((long) prevsz));
1631 sz += prevsz;
1632
1633 if (p->fd == last_remainder) /* keep as last_remainder */
1634 islr = 1;
1635 else
1636 unlink(p, bck, fwd);
1637 }
1638
1639 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1640 {
1641 sz += nextsz;
1642
1643 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1644 {
1645 islr = 1;
1646 link_last_remainder(p);
1647 }
1648 else
1649 unlink(next, bck, fwd);
1650 }
1651
1652
1653 set_head(p, sz | PREV_INUSE);
1654 set_foot(p, sz);
1655 if (!islr)
1656 frontlink(p, sz, idx, bck, fwd);
1657}
1658
1659
Simon Glassd93041a2014-07-10 22:23:25 -06001660
wdenk217c9da2002-10-25 20:35:49 +00001661
1662
1663/*
1664
1665 Realloc algorithm:
1666
1667 Chunks that were obtained via mmap cannot be extended or shrunk
1668 unless HAVE_MREMAP is defined, in which case mremap is used.
1669 Otherwise, if their reallocation is for additional space, they are
1670 copied. If for less, they are just left alone.
1671
1672 Otherwise, if the reallocation is for additional space, and the
1673 chunk can be extended, it is, else a malloc-copy-free sequence is
1674 taken. There are several different ways that a chunk could be
1675 extended. All are tried:
1676
1677 * Extending forward into following adjacent free chunk.
1678 * Shifting backwards, joining preceding adjacent space
1679 * Both shifting backwards and extending forward.
1680 * Extending into newly sbrked space
1681
1682 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1683 size argument of zero (re)allocates a minimum-sized chunk.
1684
1685 If the reallocation is for less space, and the new request is for
1686 a `small' (<512 bytes) size, then the newly unused space is lopped
1687 off and freed.
1688
1689 The old unix realloc convention of allowing the last-free'd chunk
1690 to be used as an argument to realloc is no longer supported.
1691 I don't know of any programs still relying on this feature,
1692 and allowing it would also allow too many other incorrect
1693 usages of realloc to be sensible.
1694
1695
1696*/
1697
1698
1699#if __STD_C
1700Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1701#else
1702Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1703#endif
1704{
1705 INTERNAL_SIZE_T nb; /* padded request size */
1706
1707 mchunkptr oldp; /* chunk corresponding to oldmem */
1708 INTERNAL_SIZE_T oldsize; /* its size */
1709
1710 mchunkptr newp; /* chunk to return */
1711 INTERNAL_SIZE_T newsize; /* its size */
1712 Void_t* newmem; /* corresponding user mem */
1713
1714 mchunkptr next; /* next contiguous chunk after oldp */
1715 INTERNAL_SIZE_T nextsize; /* its size */
1716
1717 mchunkptr prev; /* previous contiguous chunk before oldp */
1718 INTERNAL_SIZE_T prevsize; /* its size */
1719
1720 mchunkptr remainder; /* holds split off extra space from newp */
1721 INTERNAL_SIZE_T remainder_size; /* its size */
1722
1723 mchunkptr bck; /* misc temp for linking */
1724 mchunkptr fwd; /* misc temp for linking */
1725
1726#ifdef REALLOC_ZERO_BYTES_FREES
Heinrich Schuchardta874cac2017-11-10 21:46:34 +01001727 if (!bytes) {
1728 fREe(oldmem);
1729 return NULL;
1730 }
wdenk217c9da2002-10-25 20:35:49 +00001731#endif
1732
Kim Phillips199adb62012-10-29 13:34:32 +00001733 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001734
1735 /* realloc of null is supposed to be same as malloc */
Kim Phillips199adb62012-10-29 13:34:32 +00001736 if (oldmem == NULL) return mALLOc(bytes);
wdenk217c9da2002-10-25 20:35:49 +00001737
Simon Glass3d6d5072023-09-26 08:14:27 -06001738#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glassc9356be2014-11-10 17:16:43 -07001739 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Glassd59476b2014-07-10 22:23:28 -06001740 /* This is harder to support and should not be needed */
1741 panic("pre-reloc realloc() is not supported");
1742 }
1743#endif
1744
wdenk217c9da2002-10-25 20:35:49 +00001745 newp = oldp = mem2chunk(oldmem);
1746 newsize = oldsize = chunksize(oldp);
1747
1748
1749 nb = request2size(bytes);
1750
1751#if HAVE_MMAP
1752 if (chunk_is_mmapped(oldp))
1753 {
1754#if HAVE_MREMAP
1755 newp = mremap_chunk(oldp, nb);
1756 if(newp) return chunk2mem(newp);
1757#endif
1758 /* Note the extra SIZE_SZ overhead. */
1759 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1760 /* Must alloc, copy, free. */
1761 newmem = mALLOc(bytes);
Heinrich Schuchardta874cac2017-11-10 21:46:34 +01001762 if (!newmem)
1763 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001764 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1765 munmap_chunk(oldp);
1766 return newmem;
1767 }
1768#endif
1769
1770 check_inuse_chunk(oldp);
1771
1772 if ((long)(oldsize) < (long)(nb))
1773 {
1774
1775 /* Try expanding forward */
1776
1777 next = chunk_at_offset(oldp, oldsize);
1778 if (next == top || !inuse(next))
1779 {
1780 nextsize = chunksize(next);
1781
1782 /* Forward into top only if a remainder */
1783 if (next == top)
1784 {
wdenk8bde7f72003-06-27 21:31:46 +00001785 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1786 {
1787 newsize += nextsize;
1788 top = chunk_at_offset(oldp, nb);
1789 set_head(top, (newsize - nb) | PREV_INUSE);
1790 set_head_size(oldp, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001791 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1792 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk8bde7f72003-06-27 21:31:46 +00001793 return chunk2mem(oldp);
1794 }
wdenk217c9da2002-10-25 20:35:49 +00001795 }
1796
1797 /* Forward into next chunk */
1798 else if (((long)(nextsize + newsize) >= (long)(nb)))
1799 {
wdenk8bde7f72003-06-27 21:31:46 +00001800 unlink(next, bck, fwd);
1801 newsize += nextsize;
Sean Andersonbdaeea12022-03-23 14:04:49 -04001802 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1803 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk8bde7f72003-06-27 21:31:46 +00001804 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001805 }
1806 }
1807 else
1808 {
Kim Phillips199adb62012-10-29 13:34:32 +00001809 next = NULL;
wdenk217c9da2002-10-25 20:35:49 +00001810 nextsize = 0;
1811 }
1812
1813 /* Try shifting backwards. */
1814
1815 if (!prev_inuse(oldp))
1816 {
1817 prev = prev_chunk(oldp);
1818 prevsize = chunksize(prev);
1819
1820 /* try forward + backward first to save a later consolidation */
1821
Kim Phillips199adb62012-10-29 13:34:32 +00001822 if (next != NULL)
wdenk217c9da2002-10-25 20:35:49 +00001823 {
wdenk8bde7f72003-06-27 21:31:46 +00001824 /* into top */
1825 if (next == top)
1826 {
1827 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1828 {
1829 unlink(prev, bck, fwd);
1830 newp = prev;
1831 newsize += prevsize + nextsize;
1832 newmem = chunk2mem(newp);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001833 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001834 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1835 top = chunk_at_offset(newp, nb);
1836 set_head(top, (newsize - nb) | PREV_INUSE);
1837 set_head_size(newp, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001838 VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ);
wdenk8bde7f72003-06-27 21:31:46 +00001839 return newmem;
1840 }
1841 }
wdenk217c9da2002-10-25 20:35:49 +00001842
wdenk8bde7f72003-06-27 21:31:46 +00001843 /* into next chunk */
1844 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1845 {
1846 unlink(next, bck, fwd);
1847 unlink(prev, bck, fwd);
1848 newp = prev;
1849 newsize += nextsize + prevsize;
1850 newmem = chunk2mem(newp);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001851 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001852 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1853 goto split;
1854 }
wdenk217c9da2002-10-25 20:35:49 +00001855 }
1856
1857 /* backward only */
Kim Phillips199adb62012-10-29 13:34:32 +00001858 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
wdenk217c9da2002-10-25 20:35:49 +00001859 {
wdenk8bde7f72003-06-27 21:31:46 +00001860 unlink(prev, bck, fwd);
1861 newp = prev;
1862 newsize += prevsize;
1863 newmem = chunk2mem(newp);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001864 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk8bde7f72003-06-27 21:31:46 +00001865 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1866 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001867 }
1868 }
1869
1870 /* Must allocate */
1871
1872 newmem = mALLOc (bytes);
1873
Kim Phillips199adb62012-10-29 13:34:32 +00001874 if (newmem == NULL) /* propagate failure */
1875 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001876
1877 /* Avoid copy if newp is next chunk after oldp. */
1878 /* (This can only happen when new chunk is sbrk'ed.) */
1879
1880 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1881 {
1882 newsize += chunksize(newp);
1883 newp = oldp;
1884 goto split;
1885 }
1886
1887 /* Otherwise copy, free, and exit */
1888 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1889 fREe(oldmem);
1890 return newmem;
Sean Andersonbdaeea12022-03-23 14:04:49 -04001891 } else {
1892 VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
1893 VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
wdenk217c9da2002-10-25 20:35:49 +00001894 }
1895
1896
1897 split: /* split off extra room in old or expanded chunk */
1898
1899 if (newsize - nb >= MINSIZE) /* split off remainder */
1900 {
1901 remainder = chunk_at_offset(newp, nb);
1902 remainder_size = newsize - nb;
1903 set_head_size(newp, nb);
1904 set_head(remainder, remainder_size | PREV_INUSE);
1905 set_inuse_bit_at_offset(remainder, remainder_size);
Sean Andersonbdaeea12022-03-23 14:04:49 -04001906 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
1907 false);
wdenk217c9da2002-10-25 20:35:49 +00001908 fREe(chunk2mem(remainder)); /* let free() deal with it */
1909 }
1910 else
1911 {
1912 set_head_size(newp, newsize);
1913 set_inuse_bit_at_offset(newp, newsize);
1914 }
1915
1916 check_inuse_chunk(newp);
1917 return chunk2mem(newp);
1918}
1919
1920
Simon Glassd93041a2014-07-10 22:23:25 -06001921
wdenk217c9da2002-10-25 20:35:49 +00001922
1923/*
1924
1925 memalign algorithm:
1926
1927 memalign requests more than enough space from malloc, finds a spot
1928 within that chunk that meets the alignment request, and then
1929 possibly frees the leading and trailing space.
1930
1931 The alignment argument must be a power of two. This property is not
1932 checked by memalign, so misuse may result in random runtime errors.
1933
1934 8-byte alignment is guaranteed by normal malloc calls, so don't
1935 bother calling memalign with an argument of 8 or less.
1936
1937 Overreliance on memalign is a sure way to fragment space.
1938
1939*/
1940
1941
1942#if __STD_C
1943Void_t* mEMALIGn(size_t alignment, size_t bytes)
1944#else
1945Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1946#endif
1947{
1948 INTERNAL_SIZE_T nb; /* padded request size */
1949 char* m; /* memory returned by malloc call */
1950 mchunkptr p; /* corresponding chunk */
1951 char* brk; /* alignment point within p */
1952 mchunkptr newp; /* chunk to return */
1953 INTERNAL_SIZE_T newsize; /* its size */
1954 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1955 mchunkptr remainder; /* spare room at end to split off */
1956 long remainder_size; /* its size */
1957
Kim Phillips199adb62012-10-29 13:34:32 +00001958 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001959
Simon Glass3d6d5072023-09-26 08:14:27 -06001960#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Ley Foon Tanee038c52018-05-18 18:03:12 +08001961 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Andreas Dannenberg4c6be012019-03-27 13:17:26 -05001962 return memalign_simple(alignment, bytes);
Ley Foon Tanee038c52018-05-18 18:03:12 +08001963 }
1964#endif
1965
wdenk217c9da2002-10-25 20:35:49 +00001966 /* If need less alignment than we give anyway, just relay to malloc */
1967
1968 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1969
1970 /* Otherwise, ensure that it is at least a minimum chunk size */
1971
1972 if (alignment < MINSIZE) alignment = MINSIZE;
1973
1974 /* Call malloc with worst case padding to hit alignment. */
1975
1976 nb = request2size(bytes);
1977 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1978
Stephen Warren4f144a42016-01-25 14:03:42 -07001979 /*
1980 * The attempt to over-allocate (with a size large enough to guarantee the
1981 * ability to find an aligned region within allocated memory) failed.
1982 *
1983 * Try again, this time only allocating exactly the size the user wants. If
1984 * the allocation now succeeds and just happens to be aligned, we can still
1985 * fulfill the user's request.
1986 */
1987 if (m == NULL) {
Stephen Warren034eda82016-04-25 15:55:42 -06001988 size_t extra, extra2;
Stephen Warren4f144a42016-01-25 14:03:42 -07001989 /*
1990 * Use bytes not nb, since mALLOc internally calls request2size too, and
1991 * each call increases the size to allocate, to account for the header.
1992 */
1993 m = (char*)(mALLOc(bytes));
1994 /* Aligned -> return it */
1995 if ((((unsigned long)(m)) % alignment) == 0)
1996 return m;
Stephen Warren034eda82016-04-25 15:55:42 -06001997 /*
1998 * Otherwise, try again, requesting enough extra space to be able to
1999 * acquire alignment.
2000 */
Stephen Warren4f144a42016-01-25 14:03:42 -07002001 fREe(m);
Stephen Warren034eda82016-04-25 15:55:42 -06002002 /* Add in extra bytes to match misalignment of unexpanded allocation */
2003 extra = alignment - (((unsigned long)(m)) % alignment);
2004 m = (char*)(mALLOc(bytes + extra));
2005 /*
2006 * m might not be the same as before. Validate that the previous value of
2007 * extra still works for the current value of m.
2008 * If (!m), extra2=alignment so
2009 */
2010 if (m) {
2011 extra2 = alignment - (((unsigned long)(m)) % alignment);
2012 if (extra2 > extra) {
2013 fREe(m);
2014 m = NULL;
2015 }
2016 }
2017 /* Fall through to original NULL check and chunk splitting logic */
Stephen Warren4f144a42016-01-25 14:03:42 -07002018 }
2019
Kim Phillips199adb62012-10-29 13:34:32 +00002020 if (m == NULL) return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00002021
2022 p = mem2chunk(m);
2023
2024 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2025 {
2026#if HAVE_MMAP
2027 if(chunk_is_mmapped(p))
2028 return chunk2mem(p); /* nothing more to do */
2029#endif
2030 }
2031 else /* misaligned */
2032 {
2033 /*
2034 Find an aligned spot inside chunk.
2035 Since we need to give back leading space in a chunk of at
2036 least MINSIZE, if the first calculation places us at
2037 a spot with less than MINSIZE leader, we can move to the
2038 next aligned spot -- we've allocated enough total room so that
2039 this is always possible.
2040 */
2041
2042 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2043 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2044
2045 newp = (mchunkptr)brk;
2046 leadsize = brk - (char*)(p);
2047 newsize = chunksize(p) - leadsize;
2048
2049#if HAVE_MMAP
2050 if(chunk_is_mmapped(p))
2051 {
2052 newp->prev_size = p->prev_size + leadsize;
2053 set_head(newp, newsize|IS_MMAPPED);
2054 return chunk2mem(newp);
2055 }
2056#endif
2057
2058 /* give back leader, use the rest */
2059
2060 set_head(newp, newsize | PREV_INUSE);
2061 set_inuse_bit_at_offset(newp, newsize);
2062 set_head_size(p, leadsize);
2063 fREe(chunk2mem(p));
2064 p = newp;
Sean Andersonbdaeea12022-03-23 14:04:49 -04002065 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00002066
2067 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2068 }
2069
2070 /* Also give back spare room at the end */
2071
2072 remainder_size = chunksize(p) - nb;
2073
2074 if (remainder_size >= (long)MINSIZE)
2075 {
2076 remainder = chunk_at_offset(p, nb);
2077 set_head(remainder, remainder_size | PREV_INUSE);
2078 set_head_size(p, nb);
Sean Andersonbdaeea12022-03-23 14:04:49 -04002079 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
2080 false);
wdenk217c9da2002-10-25 20:35:49 +00002081 fREe(chunk2mem(remainder));
2082 }
2083
2084 check_inuse_chunk(p);
2085 return chunk2mem(p);
2086
2087}
2088
Simon Glassd93041a2014-07-10 22:23:25 -06002089
wdenk217c9da2002-10-25 20:35:49 +00002090
2091
2092/*
2093 valloc just invokes memalign with alignment argument equal
2094 to the page size of the system (or as near to this as can
2095 be figured out from all the includes/defines above.)
2096*/
2097
2098#if __STD_C
2099Void_t* vALLOc(size_t bytes)
2100#else
2101Void_t* vALLOc(bytes) size_t bytes;
2102#endif
2103{
2104 return mEMALIGn (malloc_getpagesize, bytes);
2105}
2106
2107/*
2108 pvalloc just invokes valloc for the nearest pagesize
2109 that will accommodate request
2110*/
2111
2112
2113#if __STD_C
2114Void_t* pvALLOc(size_t bytes)
2115#else
2116Void_t* pvALLOc(bytes) size_t bytes;
2117#endif
2118{
2119 size_t pagesize = malloc_getpagesize;
2120 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2121}
2122
2123/*
2124
2125 calloc calls malloc, then zeroes out the allocated chunk.
2126
2127*/
2128
2129#if __STD_C
2130Void_t* cALLOc(size_t n, size_t elem_size)
2131#else
2132Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2133#endif
2134{
2135 mchunkptr p;
2136 INTERNAL_SIZE_T csz;
2137
2138 INTERNAL_SIZE_T sz = n * elem_size;
2139
2140
2141 /* check if expand_top called, in which case don't need to clear */
Shengyu Quc9db9a22023-08-25 00:25:19 +08002142#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
wdenk217c9da2002-10-25 20:35:49 +00002143#if MORECORE_CLEARS
2144 mchunkptr oldtop = top;
2145 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2146#endif
Przemyslaw Marczak0aa8a4a2015-03-04 14:01:24 +01002147#endif
wdenk217c9da2002-10-25 20:35:49 +00002148 Void_t* mem = mALLOc (sz);
2149
Kim Phillips199adb62012-10-29 13:34:32 +00002150 if ((long)n < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002151
Kim Phillips199adb62012-10-29 13:34:32 +00002152 if (mem == NULL)
2153 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002154 else
2155 {
Simon Glass3d6d5072023-09-26 08:14:27 -06002156#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glassc9356be2014-11-10 17:16:43 -07002157 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Goldschmidtbb71a2d2019-10-25 21:23:35 +02002158 memset(mem, 0, sz);
Simon Glassd59476b2014-07-10 22:23:28 -06002159 return mem;
2160 }
2161#endif
wdenk217c9da2002-10-25 20:35:49 +00002162 p = mem2chunk(mem);
2163
2164 /* Two optional cases in which clearing not necessary */
2165
2166
2167#if HAVE_MMAP
2168 if (chunk_is_mmapped(p)) return mem;
2169#endif
2170
2171 csz = chunksize(p);
2172
Shengyu Quc9db9a22023-08-25 00:25:19 +08002173#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
wdenk217c9da2002-10-25 20:35:49 +00002174#if MORECORE_CLEARS
2175 if (p == oldtop && csz > oldtopsize)
2176 {
2177 /* clear only the bytes from non-freshly-sbrked memory */
2178 csz = oldtopsize;
2179 }
2180#endif
Przemyslaw Marczak0aa8a4a2015-03-04 14:01:24 +01002181#endif
wdenk217c9da2002-10-25 20:35:49 +00002182
2183 MALLOC_ZERO(mem, csz - SIZE_SZ);
Sean Andersonbdaeea12022-03-23 14:04:49 -04002184 VALGRIND_MAKE_MEM_DEFINED(mem, sz);
wdenk217c9da2002-10-25 20:35:49 +00002185 return mem;
2186 }
2187}
2188
2189/*
2190
2191 cfree just calls free. It is needed/defined on some systems
2192 that pair it with calloc, presumably for odd historical reasons.
2193
2194*/
2195
2196#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2197#if __STD_C
2198void cfree(Void_t *mem)
2199#else
2200void cfree(mem) Void_t *mem;
2201#endif
2202{
2203 fREe(mem);
2204}
2205#endif
2206
Simon Glassd93041a2014-07-10 22:23:25 -06002207
wdenk217c9da2002-10-25 20:35:49 +00002208
2209/*
2210
2211 Malloc_trim gives memory back to the system (via negative
2212 arguments to sbrk) if there is unused memory at the `high' end of
2213 the malloc pool. You can call this after freeing large blocks of
2214 memory to potentially reduce the system-level memory requirements
2215 of a program. However, it cannot guarantee to reduce memory. Under
2216 some allocation patterns, some large free blocks of memory will be
2217 locked between two used chunks, so they cannot be given back to
2218 the system.
2219
2220 The `pad' argument to malloc_trim represents the amount of free
2221 trailing space to leave untrimmed. If this argument is zero,
2222 only the minimum amount of memory to maintain internal data
2223 structures will be left (one page or less). Non-zero arguments
2224 can be supplied to maintain enough trailing space to service
2225 future expected allocations without having to re-obtain memory
2226 from the system.
2227
2228 Malloc_trim returns 1 if it actually released any memory, else 0.
2229
2230*/
2231
2232#if __STD_C
2233int malloc_trim(size_t pad)
2234#else
2235int malloc_trim(pad) size_t pad;
2236#endif
2237{
2238 long top_size; /* Amount of top-most memory */
2239 long extra; /* Amount to release */
2240 char* current_brk; /* address returned by pre-check sbrk call */
2241 char* new_brk; /* address returned by negative sbrk call */
2242
2243 unsigned long pagesz = malloc_getpagesize;
2244
2245 top_size = chunksize(top);
2246 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2247
2248 if (extra < (long)pagesz) /* Not enough memory to release */
2249 return 0;
2250
2251 else
2252 {
2253 /* Test to make sure no one else called sbrk */
2254 current_brk = (char*)(MORECORE (0));
2255 if (current_brk != (char*)(top) + top_size)
2256 return 0; /* Apparently we don't own memory; must fail */
2257
2258 else
2259 {
2260 new_brk = (char*)(MORECORE (-extra));
2261
2262 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2263 {
wdenk8bde7f72003-06-27 21:31:46 +00002264 /* Try to figure out what we have */
2265 current_brk = (char*)(MORECORE (0));
2266 top_size = current_brk - (char*)top;
2267 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2268 {
2269 sbrked_mem = current_brk - sbrk_base;
2270 set_head(top, top_size | PREV_INUSE);
2271 }
2272 check_chunk(top);
2273 return 0;
wdenk217c9da2002-10-25 20:35:49 +00002274 }
2275
2276 else
2277 {
wdenk8bde7f72003-06-27 21:31:46 +00002278 /* Success. Adjust top accordingly. */
2279 set_head(top, (top_size - extra) | PREV_INUSE);
2280 sbrked_mem -= extra;
2281 check_chunk(top);
2282 return 1;
wdenk217c9da2002-10-25 20:35:49 +00002283 }
2284 }
2285 }
2286}
2287
Simon Glassd93041a2014-07-10 22:23:25 -06002288
wdenk217c9da2002-10-25 20:35:49 +00002289
2290/*
2291 malloc_usable_size:
2292
2293 This routine tells you how many bytes you can actually use in an
2294 allocated chunk, which may be more than you requested (although
2295 often not). You can use this many bytes without worrying about
2296 overwriting other allocated objects. Not a particularly great
2297 programming practice, but still sometimes useful.
2298
2299*/
2300
2301#if __STD_C
2302size_t malloc_usable_size(Void_t* mem)
2303#else
2304size_t malloc_usable_size(mem) Void_t* mem;
2305#endif
2306{
2307 mchunkptr p;
Kim Phillips199adb62012-10-29 13:34:32 +00002308 if (mem == NULL)
wdenk217c9da2002-10-25 20:35:49 +00002309 return 0;
2310 else
2311 {
2312 p = mem2chunk(mem);
2313 if(!chunk_is_mmapped(p))
2314 {
2315 if (!inuse(p)) return 0;
2316 check_inuse_chunk(p);
2317 return chunksize(p) - SIZE_SZ;
2318 }
2319 return chunksize(p) - 2*SIZE_SZ;
2320 }
2321}
2322
2323
Simon Glassd93041a2014-07-10 22:23:25 -06002324
wdenk217c9da2002-10-25 20:35:49 +00002325
2326/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2327
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002328#ifdef DEBUG
Tom Rinif88d48c2023-02-27 17:08:34 -05002329static void malloc_update_mallinfo(void)
wdenk217c9da2002-10-25 20:35:49 +00002330{
2331 int i;
2332 mbinptr b;
2333 mchunkptr p;
2334#ifdef DEBUG
2335 mchunkptr q;
2336#endif
2337
2338 INTERNAL_SIZE_T avail = chunksize(top);
2339 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2340
2341 for (i = 1; i < NAV; ++i)
2342 {
2343 b = bin_at(i);
2344 for (p = last(b); p != b; p = p->bk)
2345 {
2346#ifdef DEBUG
2347 check_free_chunk(p);
2348 for (q = next_chunk(p);
wdenk8bde7f72003-06-27 21:31:46 +00002349 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2350 q = next_chunk(q))
2351 check_inuse_chunk(q);
wdenk217c9da2002-10-25 20:35:49 +00002352#endif
2353 avail += chunksize(p);
2354 navail++;
2355 }
2356 }
2357
2358 current_mallinfo.ordblks = navail;
2359 current_mallinfo.uordblks = sbrked_mem - avail;
2360 current_mallinfo.fordblks = avail;
2361 current_mallinfo.hblks = n_mmaps;
2362 current_mallinfo.hblkhd = mmapped_mem;
2363 current_mallinfo.keepcost = chunksize(top);
2364
2365}
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002366#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002367
Simon Glassd93041a2014-07-10 22:23:25 -06002368
wdenk217c9da2002-10-25 20:35:49 +00002369
2370/*
2371
2372 malloc_stats:
2373
2374 Prints on the amount of space obtain from the system (both
2375 via sbrk and mmap), the maximum amount (which may be more than
2376 current if malloc_trim and/or munmap got called), the maximum
2377 number of simultaneous mmap regions used, and the current number
2378 of bytes allocated via malloc (or realloc, etc) but not yet
2379 freed. (Note that this is the number of bytes allocated, not the
2380 number requested. It will be larger than the number requested
2381 because of alignment and bookkeeping overhead.)
2382
2383*/
2384
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002385#ifdef DEBUG
Tom Rinif88d48c2023-02-27 17:08:34 -05002386void malloc_stats(void)
wdenk217c9da2002-10-25 20:35:49 +00002387{
2388 malloc_update_mallinfo();
2389 printf("max system bytes = %10u\n",
wdenk8bde7f72003-06-27 21:31:46 +00002390 (unsigned int)(max_total_mem));
wdenk217c9da2002-10-25 20:35:49 +00002391 printf("system bytes = %10u\n",
wdenk8bde7f72003-06-27 21:31:46 +00002392 (unsigned int)(sbrked_mem + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002393 printf("in use bytes = %10u\n",
wdenk8bde7f72003-06-27 21:31:46 +00002394 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002395#if HAVE_MMAP
2396 printf("max mmap regions = %10u\n",
wdenk8bde7f72003-06-27 21:31:46 +00002397 (unsigned int)max_n_mmaps);
wdenk217c9da2002-10-25 20:35:49 +00002398#endif
2399}
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002400#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002401
2402/*
2403 mallinfo returns a copy of updated current mallinfo.
2404*/
2405
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002406#ifdef DEBUG
Tom Rinif88d48c2023-02-27 17:08:34 -05002407struct mallinfo mALLINFo(void)
wdenk217c9da2002-10-25 20:35:49 +00002408{
2409 malloc_update_mallinfo();
2410 return current_mallinfo;
2411}
Wolfgang Denkea882ba2010-06-20 23:33:59 +02002412#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002413
2414
Simon Glassd93041a2014-07-10 22:23:25 -06002415
wdenk217c9da2002-10-25 20:35:49 +00002416
2417/*
2418 mallopt:
2419
2420 mallopt is the general SVID/XPG interface to tunable parameters.
2421 The format is to provide a (parameter-number, parameter-value) pair.
2422 mallopt then sets the corresponding parameter to the argument
2423 value if it can (i.e., so long as the value is meaningful),
2424 and returns 1 if successful else 0.
2425
2426 See descriptions of tunable parameters above.
2427
2428*/
2429
2430#if __STD_C
2431int mALLOPt(int param_number, int value)
2432#else
2433int mALLOPt(param_number, value) int param_number; int value;
2434#endif
2435{
2436 switch(param_number)
2437 {
2438 case M_TRIM_THRESHOLD:
2439 trim_threshold = value; return 1;
2440 case M_TOP_PAD:
2441 top_pad = value; return 1;
2442 case M_MMAP_THRESHOLD:
2443 mmap_threshold = value; return 1;
2444 case M_MMAP_MAX:
2445#if HAVE_MMAP
2446 n_mmaps_max = value; return 1;
2447#else
2448 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2449#endif
2450
2451 default:
2452 return 0;
2453 }
2454}
2455
Simon Glassfb5cf7f2015-02-27 22:06:36 -07002456int initf_malloc(void)
2457{
Simon Glass3d6d5072023-09-26 08:14:27 -06002458#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glassfb5cf7f2015-02-27 22:06:36 -07002459 assert(gd->malloc_base); /* Set up by crt0.S */
Andy Yanf1896c42017-07-24 17:43:34 +08002460 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
Simon Glassfb5cf7f2015-02-27 22:06:36 -07002461 gd->malloc_ptr = 0;
2462#endif
2463
2464 return 0;