Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 2 | #ifndef _LINUX_LMB_H |
| 3 | #define _LINUX_LMB_H |
| 4 | #ifdef __KERNEL__ |
| 5 | |
| 6 | #include <asm/types.h> |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 7 | #include <asm/u-boot.h> |
| 8 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 9 | /* |
| 10 | * Logical memory blocks. |
| 11 | * |
| 12 | * Copyright (C) 2001 Peter Bergner, IBM Corp. |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #define MAX_LMB_REGIONS 8 |
| 16 | |
| 17 | struct lmb_property { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 18 | phys_addr_t base; |
| 19 | phys_size_t size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 20 | }; |
| 21 | |
| 22 | struct lmb_region { |
| 23 | unsigned long cnt; |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 24 | phys_size_t size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 25 | struct lmb_property region[MAX_LMB_REGIONS+1]; |
| 26 | }; |
| 27 | |
| 28 | struct lmb { |
| 29 | struct lmb_region memory; |
| 30 | struct lmb_region reserved; |
| 31 | }; |
| 32 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 33 | extern void lmb_init(struct lmb *lmb); |
Masahiro Yamada | b75d8dc | 2020-06-26 15:13:33 +0900 | [diff] [blame] | 34 | extern void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, |
| 35 | void *fdt_blob); |
Simon Goldschmidt | 9cc2323 | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 36 | extern void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, |
| 37 | phys_size_t size, void *fdt_blob); |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 38 | extern long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size); |
| 39 | extern long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size); |
| 40 | extern phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align); |
| 41 | extern phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, |
| 42 | phys_addr_t max_addr); |
| 43 | extern phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, |
| 44 | phys_addr_t max_addr); |
Simon Goldschmidt | 4cc8af8 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 45 | extern phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, |
| 46 | phys_size_t size); |
Simon Goldschmidt | 65304aa | 2019-01-21 20:29:55 +0100 | [diff] [blame] | 47 | extern phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr); |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 48 | extern int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr); |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 49 | extern long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 50 | |
| 51 | extern void lmb_dump_all(struct lmb *lmb); |
Tero Kristo | 9996cea | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 52 | extern void lmb_dump_all_force(struct lmb *lmb); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 53 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 54 | static inline phys_size_t |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 55 | lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) |
| 56 | { |
| 57 | return type->region[region_nr].size; |
| 58 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 59 | |
| 60 | void board_lmb_reserve(struct lmb *lmb); |
| 61 | void arch_lmb_reserve(struct lmb *lmb); |
| 62 | |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 63 | #endif /* __KERNEL__ */ |
| 64 | |
| 65 | #endif /* _LINUX_LMB_H */ |