blob: 985e88eb3976203b06de5d478c0965375a44fd17 [file] [log] [blame]
Heiko Schocher0c06db52014-06-24 10:10:03 +02001
Simon Glass336d4612020-02-03 07:36:16 -07002#include <malloc.h>
Masahiro Yamadae3332e12018-08-24 19:30:15 +09003#include <memalign.h>
Simon Glass90526e92020-05-10 11:39:56 -06004#include <asm/cache.h>
Heiko Schocher0c06db52014-06-24 10:10:03 +02005#include <linux/compat.h>
6
7struct p_current cur = {
8 .pid = 1,
9};
10__maybe_unused struct p_current *current = &cur;
11
12unsigned long copy_from_user(void *dest, const void *src,
13 unsigned long count)
14{
15 memcpy((void *)dest, (void *)src, count);
16 return 0;
17}
18
19void *kmalloc(size_t size, int flags)
20{
Masahiro Yamada6b9f9ea2015-07-13 13:17:07 +090021 void *p;
Heiko Schocher0c06db52014-06-24 10:10:03 +020022
Masahiro Yamadae3332e12018-08-24 19:30:15 +090023 p = malloc_cache_aligned(size);
Marek Szyprowski2ad98ab2019-10-02 14:37:20 +020024 if (p && flags & __GFP_ZERO)
Masahiro Yamada6b9f9ea2015-07-13 13:17:07 +090025 memset(p, 0, size);
26
27 return p;
Heiko Schocher0c06db52014-06-24 10:10:03 +020028}
29
Heiko Schocher0c06db52014-06-24 10:10:03 +020030struct kmem_cache *get_mem(int element_sz)
31{
32 struct kmem_cache *ret;
33
34 ret = memalign(ARCH_DMA_MINALIGN, sizeof(struct kmem_cache));
35 ret->sz = element_sz;
36
37 return ret;
38}
39
40void *kmem_cache_alloc(struct kmem_cache *obj, int flag)
41{
Masahiro Yamadae3332e12018-08-24 19:30:15 +090042 return malloc_cache_aligned(obj->sz);
Heiko Schocher0c06db52014-06-24 10:10:03 +020043}
AKASHI Takahiro4839e862019-11-13 09:44:47 +090044
45/**
46 * kmemdup - duplicate region of memory
47 *
48 * @src: memory region to duplicate
49 * @len: memory region length
50 * @gfp: GFP mask to use
51 *
52 * Return: newly allocated copy of @src or %NULL in case of error
53 */
54void *kmemdup(const void *src, size_t len, gfp_t gfp)
55{
56 void *p;
57
58 p = kmalloc(len, gfp);
59 if (p)
60 memcpy(p, src, len);
61 return p;
62}