Qu Wenruo | ab5c304 | 2020-06-24 18:02:51 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | |
| 3 | /* |
| 4 | * Crossported from the same named file of btrfs-progs. |
| 5 | * |
| 6 | * Minor modification to include headers. |
| 7 | */ |
| 8 | #ifndef __BTRFS_EXTENT_CACHE_H__ |
| 9 | #define __BTRFS_EXTENT_CACHE_H__ |
| 10 | |
| 11 | #include <linux/rbtree.h> |
| 12 | #include <linux/types.h> |
| 13 | |
| 14 | struct cache_tree { |
| 15 | struct rb_root root; |
| 16 | }; |
| 17 | |
| 18 | struct cache_extent { |
| 19 | struct rb_node rb_node; |
| 20 | u64 objectid; |
| 21 | u64 start; |
| 22 | u64 size; |
| 23 | }; |
| 24 | |
| 25 | void cache_tree_init(struct cache_tree *tree); |
| 26 | |
| 27 | struct cache_extent *first_cache_extent(struct cache_tree *tree); |
| 28 | struct cache_extent *last_cache_extent(struct cache_tree *tree); |
| 29 | struct cache_extent *prev_cache_extent(struct cache_extent *pe); |
| 30 | struct cache_extent *next_cache_extent(struct cache_extent *pe); |
| 31 | |
| 32 | /* |
| 33 | * Find a cache_extent which covers start. |
| 34 | * |
| 35 | * If not found, return next cache_extent if possible. |
| 36 | */ |
| 37 | struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start); |
| 38 | |
| 39 | /* |
| 40 | * Find a cache_extent which restrictly covers start. |
| 41 | * |
| 42 | * If not found, return NULL. |
| 43 | */ |
| 44 | struct cache_extent *lookup_cache_extent(struct cache_tree *tree, |
| 45 | u64 start, u64 size); |
| 46 | |
| 47 | /* |
| 48 | * Add an non-overlap extent into cache tree |
| 49 | * |
| 50 | * If [start, start+size) overlap with existing one, it will return -EEXIST. |
| 51 | */ |
| 52 | int add_cache_extent(struct cache_tree *tree, u64 start, u64 size); |
| 53 | |
| 54 | /* |
| 55 | * Same with add_cache_extent, but with cache_extent strcut. |
| 56 | */ |
| 57 | int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe); |
| 58 | void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe); |
| 59 | |
| 60 | static inline int cache_tree_empty(struct cache_tree *tree) |
| 61 | { |
| 62 | return RB_EMPTY_ROOT(&tree->root); |
| 63 | } |
| 64 | |
| 65 | typedef void (*free_cache_extent)(struct cache_extent *pe); |
| 66 | |
| 67 | void cache_tree_free_extents(struct cache_tree *tree, |
| 68 | free_cache_extent free_func); |
| 69 | |
| 70 | #define FREE_EXTENT_CACHE_BASED_TREE(name, free_func) \ |
| 71 | static void free_##name##_tree(struct cache_tree *tree) \ |
| 72 | { \ |
| 73 | cache_tree_free_extents(tree, free_func); \ |
| 74 | } |
| 75 | |
| 76 | void free_extent_cache_tree(struct cache_tree *tree); |
| 77 | |
| 78 | /* |
| 79 | * Search a cache_extent with same objectid, and covers start. |
| 80 | * |
| 81 | * If not found, return next if possible. |
| 82 | */ |
| 83 | struct cache_extent *search_cache_extent2(struct cache_tree *tree, |
| 84 | u64 objectid, u64 start); |
| 85 | /* |
| 86 | * Search a cache_extent with same objectid, and covers the range |
| 87 | * [start, start + size) |
| 88 | * |
| 89 | * If not found, return next cache_extent if possible. |
| 90 | */ |
| 91 | struct cache_extent *lookup_cache_extent2(struct cache_tree *tree, |
| 92 | u64 objectid, u64 start, u64 size); |
| 93 | int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe); |
| 94 | |
| 95 | /* |
| 96 | * Insert a cache_extent range [start, start + size). |
| 97 | * |
| 98 | * This function may merge with existing cache_extent. |
| 99 | * NOTE: caller must ensure the inserted range won't cover with any existing |
| 100 | * range. |
| 101 | */ |
| 102 | int add_merge_cache_extent(struct cache_tree *tree, u64 start, u64 size); |
| 103 | |
| 104 | #endif |