blob: c83426f59dc208fd51f53d1c3f447c52e955602b [file] [log] [blame]
Heiko Schocher0c06db52014-06-24 10:10:03 +02001
2#include <common.h>
Simon Glass336d4612020-02-03 07:36:16 -07003#include <malloc.h>
Masahiro Yamadae3332e12018-08-24 19:30:15 +09004#include <memalign.h>
Simon Glass90526e92020-05-10 11:39:56 -06005#include <asm/cache.h>
Heiko Schocher0c06db52014-06-24 10:10:03 +02006#include <linux/compat.h>
7
8struct p_current cur = {
9 .pid = 1,
10};
11__maybe_unused struct p_current *current = &cur;
12
13unsigned long copy_from_user(void *dest, const void *src,
14 unsigned long count)
15{
16 memcpy((void *)dest, (void *)src, count);
17 return 0;
18}
19
20void *kmalloc(size_t size, int flags)
21{
Masahiro Yamada6b9f9ea2015-07-13 13:17:07 +090022 void *p;
Heiko Schocher0c06db52014-06-24 10:10:03 +020023
Masahiro Yamadae3332e12018-08-24 19:30:15 +090024 p = malloc_cache_aligned(size);
Marek Szyprowski2ad98ab2019-10-02 14:37:20 +020025 if (p && flags & __GFP_ZERO)
Masahiro Yamada6b9f9ea2015-07-13 13:17:07 +090026 memset(p, 0, size);
27
28 return p;
Heiko Schocher0c06db52014-06-24 10:10:03 +020029}
30
Heiko Schocher0c06db52014-06-24 10:10:03 +020031struct kmem_cache *get_mem(int element_sz)
32{
33 struct kmem_cache *ret;
34
35 ret = memalign(ARCH_DMA_MINALIGN, sizeof(struct kmem_cache));
36 ret->sz = element_sz;
37
38 return ret;
39}
40
41void *kmem_cache_alloc(struct kmem_cache *obj, int flag)
42{
Masahiro Yamadae3332e12018-08-24 19:30:15 +090043 return malloc_cache_aligned(obj->sz);
Heiko Schocher0c06db52014-06-24 10:10:03 +020044}
AKASHI Takahiro4839e862019-11-13 09:44:47 +090045
46/**
47 * kmemdup - duplicate region of memory
48 *
49 * @src: memory region to duplicate
50 * @len: memory region length
51 * @gfp: GFP mask to use
52 *
53 * Return: newly allocated copy of @src or %NULL in case of error
54 */
55void *kmemdup(const void *src, size_t len, gfp_t gfp)
56{
57 void *p;
58
59 p = kmalloc(len, gfp);
60 if (p)
61 memcpy(p, src, len);
62 return p;
63}