blob: bc8cf3a52279d656c97987830b4e0ee32b40a391 [file] [log] [blame]
Qu Wenruoab5c3042020-06-24 18:02:51 +02001// SPDX-License-Identifier: GPL-2.0+
2
3/*
4 * Crossported from the same named file of btrfs-progs.
5 *
6 * Minor modification to include headers.
7 */
8#include <linux/kernel.h>
9#include <linux/rbtree.h>
10#include <linux/errno.h>
11#include <linux/bug.h>
12#include <stdlib.h>
13#include "extent-cache.h"
14#include "common/rbtree-utils.h"
15
16struct cache_extent_search_range {
17 u64 objectid;
18 u64 start;
19 u64 size;
20};
21
22static int cache_tree_comp_range(struct rb_node *node, void *data)
23{
24 struct cache_extent *entry;
25 struct cache_extent_search_range *range;
26
27 range = (struct cache_extent_search_range *)data;
28 entry = rb_entry(node, struct cache_extent, rb_node);
29
30 if (entry->start + entry->size <= range->start)
31 return 1;
32 else if (range->start + range->size <= entry->start)
33 return -1;
34 else
35 return 0;
36}
37
38static int cache_tree_comp_nodes(struct rb_node *node1, struct rb_node *node2)
39{
40 struct cache_extent *entry;
41 struct cache_extent_search_range range;
42
43 entry = rb_entry(node2, struct cache_extent, rb_node);
44 range.start = entry->start;
45 range.size = entry->size;
46
47 return cache_tree_comp_range(node1, (void *)&range);
48}
49
50static int cache_tree_comp_range2(struct rb_node *node, void *data)
51{
52 struct cache_extent *entry;
53 struct cache_extent_search_range *range;
54
55 range = (struct cache_extent_search_range *)data;
56 entry = rb_entry(node, struct cache_extent, rb_node);
57
58 if (entry->objectid < range->objectid)
59 return 1;
60 else if (entry->objectid > range->objectid)
61 return -1;
62 else if (entry->start + entry->size <= range->start)
63 return 1;
64 else if (range->start + range->size <= entry->start)
65 return -1;
66 else
67 return 0;
68}
69
70static int cache_tree_comp_nodes2(struct rb_node *node1, struct rb_node *node2)
71{
72 struct cache_extent *entry;
73 struct cache_extent_search_range range;
74
75 entry = rb_entry(node2, struct cache_extent, rb_node);
76 range.objectid = entry->objectid;
77 range.start = entry->start;
78 range.size = entry->size;
79
80 return cache_tree_comp_range2(node1, (void *)&range);
81}
82
83void cache_tree_init(struct cache_tree *tree)
84{
85 tree->root = RB_ROOT;
86}
87
88static struct cache_extent *alloc_cache_extent(u64 start, u64 size)
89{
90 struct cache_extent *pe = malloc(sizeof(*pe));
91
92 if (!pe)
93 return pe;
94
95 pe->objectid = 0;
96 pe->start = start;
97 pe->size = size;
98 return pe;
99}
100
101int add_cache_extent(struct cache_tree *tree, u64 start, u64 size)
102{
103 struct cache_extent *pe = alloc_cache_extent(start, size);
104 int ret;
105
106 if (!pe)
107 return -ENOMEM;
108
109 ret = insert_cache_extent(tree, pe);
110 if (ret)
111 free(pe);
112
113 return ret;
114}
115
116int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
117{
118 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes);
119}
120
121int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe)
122{
123 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes2);
124}
125
126struct cache_extent *lookup_cache_extent(struct cache_tree *tree,
127 u64 start, u64 size)
128{
129 struct rb_node *node;
130 struct cache_extent *entry;
131 struct cache_extent_search_range range;
132
133 range.start = start;
134 range.size = size;
135 node = rb_search(&tree->root, &range, cache_tree_comp_range, NULL);
136 if (!node)
137 return NULL;
138
139 entry = rb_entry(node, struct cache_extent, rb_node);
140 return entry;
141}
142
143struct cache_extent *lookup_cache_extent2(struct cache_tree *tree,
144 u64 objectid, u64 start, u64 size)
145{
146 struct rb_node *node;
147 struct cache_extent *entry;
148 struct cache_extent_search_range range;
149
150 range.objectid = objectid;
151 range.start = start;
152 range.size = size;
153 node = rb_search(&tree->root, &range, cache_tree_comp_range2, NULL);
154 if (!node)
155 return NULL;
156
157 entry = rb_entry(node, struct cache_extent, rb_node);
158 return entry;
159}
160
161struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start)
162{
163 struct rb_node *next;
164 struct rb_node *node;
165 struct cache_extent *entry;
166 struct cache_extent_search_range range;
167
168 range.start = start;
169 range.size = 1;
170 node = rb_search(&tree->root, &range, cache_tree_comp_range, &next);
171 if (!node)
172 node = next;
173 if (!node)
174 return NULL;
175
176 entry = rb_entry(node, struct cache_extent, rb_node);
177 return entry;
178}
179
180struct cache_extent *search_cache_extent2(struct cache_tree *tree,
181 u64 objectid, u64 start)
182{
183 struct rb_node *next;
184 struct rb_node *node;
185 struct cache_extent *entry;
186 struct cache_extent_search_range range;
187
188 range.objectid = objectid;
189 range.start = start;
190 range.size = 1;
191 node = rb_search(&tree->root, &range, cache_tree_comp_range2, &next);
192 if (!node)
193 node = next;
194 if (!node)
195 return NULL;
196
197 entry = rb_entry(node, struct cache_extent, rb_node);
198 return entry;
199}
200
201struct cache_extent *first_cache_extent(struct cache_tree *tree)
202{
203 struct rb_node *node = rb_first(&tree->root);
204
205 if (!node)
206 return NULL;
207 return rb_entry(node, struct cache_extent, rb_node);
208}
209
210struct cache_extent *last_cache_extent(struct cache_tree *tree)
211{
212 struct rb_node *node = rb_last(&tree->root);
213
214 if (!node)
215 return NULL;
216 return rb_entry(node, struct cache_extent, rb_node);
217}
218
219struct cache_extent *prev_cache_extent(struct cache_extent *pe)
220{
221 struct rb_node *node = rb_prev(&pe->rb_node);
222
223 if (!node)
224 return NULL;
225 return rb_entry(node, struct cache_extent, rb_node);
226}
227
228struct cache_extent *next_cache_extent(struct cache_extent *pe)
229{
230 struct rb_node *node = rb_next(&pe->rb_node);
231
232 if (!node)
233 return NULL;
234 return rb_entry(node, struct cache_extent, rb_node);
235}
236
237void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
238{
239 rb_erase(&pe->rb_node, &tree->root);
240}
241
242void cache_tree_free_extents(struct cache_tree *tree,
243 free_cache_extent free_func)
244{
245 struct cache_extent *ce;
246
247 while ((ce = first_cache_extent(tree))) {
248 remove_cache_extent(tree, ce);
249 free_func(ce);
250 }
251}
252
253static void free_extent_cache(struct cache_extent *pe)
254{
255 free(pe);
256}
257
258void free_extent_cache_tree(struct cache_tree *tree)
259{
260 cache_tree_free_extents(tree, free_extent_cache);
261}
262
263int add_merge_cache_extent(struct cache_tree *tree, u64 start, u64 size)
264{
265 struct cache_extent *cache;
266 struct cache_extent *next = NULL;
267 struct cache_extent *prev = NULL;
268 int next_merged = 0;
269 int prev_merged = 0;
270 int ret = 0;
271
272 if (cache_tree_empty(tree))
273 goto insert;
274
275 cache = search_cache_extent(tree, start);
276 if (!cache) {
277 /*
278 * Either the tree is completely empty, or the no range after
279 * start.
280 * Either way, the last cache_extent should be prev.
281 */
282 prev = last_cache_extent(tree);
283 } else if (start <= cache->start) {
284 next = cache;
285 prev = prev_cache_extent(cache);
286 } else {
287 prev = cache;
288 next = next_cache_extent(cache);
289 }
290
291 /*
292 * Ensure the range to be inserted won't cover with existings
293 * Or we will need extra loop to do merge
294 */
295 BUG_ON(next && start + size > next->start);
296 BUG_ON(prev && prev->start + prev->size > start);
297
298 if (next && start + size == next->start) {
299 next_merged = 1;
300 next->size = next->start + next->size - start;
301 next->start = start;
302 }
303 if (prev && prev->start + prev->size == start) {
304 prev_merged = 1;
305 if (next_merged) {
306 next->size = next->start + next->size - prev->start;
307 next->start = prev->start;
308 remove_cache_extent(tree, prev);
309 free(prev);
310 } else {
311 prev->size = start + size - prev->start;
312 }
313 }
314insert:
315 if (!prev_merged && !next_merged)
316 ret = add_cache_extent(tree, start, size);
317 return ret;
318}