blob: f4d56a084b96c13879ff33066fab919da2e482cf [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Behún21a14fa2017-09-03 17:00:28 +02002/*
3 * BTRFS filesystem implementation for U-Boot
4 *
Marek Behún61143f72022-06-01 17:17:06 +02005 * 2017 Marek Behún, CZ.NIC, kabel@kernel.org
Marek Behún21a14fa2017-09-03 17:00:28 +02006 */
7
Qu Wenruo9a9be5e2020-06-24 18:02:52 +02008#include <linux/kernel.h>
9#include <linux/bug.h>
Marek Behún21a14fa2017-09-03 17:00:28 +020010#include <malloc.h>
Marek Vasutc9795392018-09-22 04:13:35 +020011#include <memalign.h>
Qu Wenruo9a9be5e2020-06-24 18:02:52 +020012#include "btrfs.h"
13#include "ctree.h"
14#include "extent-io.h"
15#include "disk-io.h"
Marek Behún21a14fa2017-09-03 17:00:28 +020016
Qu Wenruo9a9be5e2020-06-24 18:02:52 +020017void extent_io_tree_init(struct extent_io_tree *tree)
18{
19 cache_tree_init(&tree->state);
20 cache_tree_init(&tree->cache);
21 tree->cache_size = 0;
22}
23
24static struct extent_state *alloc_extent_state(void)
25{
26 struct extent_state *state;
27
28 state = malloc(sizeof(*state));
29 if (!state)
30 return NULL;
31 state->cache_node.objectid = 0;
32 state->refs = 1;
33 state->state = 0;
34 state->xprivate = 0;
35 return state;
36}
37
38static void btrfs_free_extent_state(struct extent_state *state)
39{
40 state->refs--;
41 BUG_ON(state->refs < 0);
42 if (state->refs == 0)
43 free(state);
44}
45
46static void free_extent_state_func(struct cache_extent *cache)
47{
48 struct extent_state *es;
49
50 es = container_of(cache, struct extent_state, cache_node);
51 btrfs_free_extent_state(es);
52}
53
54static void free_extent_buffer_final(struct extent_buffer *eb);
55void extent_io_tree_cleanup(struct extent_io_tree *tree)
56{
57 cache_tree_free_extents(&tree->state, free_extent_state_func);
58}
59
60static inline void update_extent_state(struct extent_state *state)
61{
62 state->cache_node.start = state->start;
63 state->cache_node.size = state->end + 1 - state->start;
64}
65
66/*
67 * Utility function to look for merge candidates inside a given range.
68 * Any extents with matching state are merged together into a single
69 * extent in the tree. Extents with EXTENT_IO in their state field are
70 * not merged
71 */
72static int merge_state(struct extent_io_tree *tree,
73 struct extent_state *state)
74{
75 struct extent_state *other;
76 struct cache_extent *other_node;
77
78 if (state->state & EXTENT_IOBITS)
79 return 0;
80
81 other_node = prev_cache_extent(&state->cache_node);
82 if (other_node) {
83 other = container_of(other_node, struct extent_state,
84 cache_node);
85 if (other->end == state->start - 1 &&
86 other->state == state->state) {
87 state->start = other->start;
88 update_extent_state(state);
89 remove_cache_extent(&tree->state, &other->cache_node);
90 btrfs_free_extent_state(other);
91 }
92 }
93 other_node = next_cache_extent(&state->cache_node);
94 if (other_node) {
95 other = container_of(other_node, struct extent_state,
96 cache_node);
97 if (other->start == state->end + 1 &&
98 other->state == state->state) {
99 other->start = state->start;
100 update_extent_state(other);
101 remove_cache_extent(&tree->state, &state->cache_node);
102 btrfs_free_extent_state(state);
103 }
104 }
105 return 0;
106}
107
108/*
109 * insert an extent_state struct into the tree. 'bits' are set on the
110 * struct before it is inserted.
111 */
112static int insert_state(struct extent_io_tree *tree,
113 struct extent_state *state, u64 start, u64 end,
114 int bits)
115{
116 int ret;
117
118 BUG_ON(end < start);
119 state->state |= bits;
120 state->start = start;
121 state->end = end;
122 update_extent_state(state);
123 ret = insert_cache_extent(&tree->state, &state->cache_node);
124 BUG_ON(ret);
125 merge_state(tree, state);
126 return 0;
127}
128
129/*
130 * split a given extent state struct in two, inserting the preallocated
131 * struct 'prealloc' as the newly created second half. 'split' indicates an
132 * offset inside 'orig' where it should be split.
133 */
134static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
135 struct extent_state *prealloc, u64 split)
136{
137 int ret;
138 prealloc->start = orig->start;
139 prealloc->end = split - 1;
140 prealloc->state = orig->state;
141 update_extent_state(prealloc);
142 orig->start = split;
143 update_extent_state(orig);
144 ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
145 BUG_ON(ret);
146 return 0;
147}
148
149/*
150 * clear some bits on a range in the tree.
151 */
152static int clear_state_bit(struct extent_io_tree *tree,
153 struct extent_state *state, int bits)
154{
155 int ret = state->state & bits;
156
157 state->state &= ~bits;
158 if (state->state == 0) {
159 remove_cache_extent(&tree->state, &state->cache_node);
160 btrfs_free_extent_state(state);
161 } else {
162 merge_state(tree, state);
163 }
164 return ret;
165}
166
167/*
168 * extent_buffer_bitmap_set - set an area of a bitmap
169 * @eb: the extent buffer
170 * @start: offset of the bitmap item in the extent buffer
171 * @pos: bit number of the first bit
172 * @len: number of bits to set
173 */
174void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
175 unsigned long pos, unsigned long len)
176{
177 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
178 const unsigned int size = pos + len;
179 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
180 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
181
182 while (len >= bits_to_set) {
183 *p |= mask_to_set;
184 len -= bits_to_set;
185 bits_to_set = BITS_PER_BYTE;
186 mask_to_set = ~0;
187 p++;
188 }
189 if (len) {
190 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
191 *p |= mask_to_set;
192 }
193}
194
195/*
196 * extent_buffer_bitmap_clear - clear an area of a bitmap
197 * @eb: the extent buffer
198 * @start: offset of the bitmap item in the extent buffer
199 * @pos: bit number of the first bit
200 * @len: number of bits to clear
201 */
202void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
203 unsigned long pos, unsigned long len)
204{
205 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
206 const unsigned int size = pos + len;
207 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
208 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
209
210 while (len >= bits_to_clear) {
211 *p &= ~mask_to_clear;
212 len -= bits_to_clear;
213 bits_to_clear = BITS_PER_BYTE;
214 mask_to_clear = ~0;
215 p++;
216 }
217 if (len) {
218 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
219 *p &= ~mask_to_clear;
220 }
221}
222
223/*
224 * clear some bits on a range in the tree.
225 */
226int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
227{
228 struct extent_state *state;
229 struct extent_state *prealloc = NULL;
230 struct cache_extent *node;
231 u64 last_end;
232 int err;
233 int set = 0;
234
235again:
236 if (!prealloc) {
237 prealloc = alloc_extent_state();
238 if (!prealloc)
239 return -ENOMEM;
240 }
241
242 /*
243 * this search will find the extents that end after
244 * our range starts
245 */
246 node = search_cache_extent(&tree->state, start);
247 if (!node)
248 goto out;
249 state = container_of(node, struct extent_state, cache_node);
250 if (state->start > end)
251 goto out;
252 last_end = state->end;
253
254 /*
255 * | ---- desired range ---- |
256 * | state | or
257 * | ------------- state -------------- |
258 *
259 * We need to split the extent we found, and may flip
260 * bits on second half.
261 *
262 * If the extent we found extends past our range, we
263 * just split and search again. It'll get split again
264 * the next time though.
265 *
266 * If the extent we found is inside our range, we clear
267 * the desired bit on it.
268 */
269 if (state->start < start) {
270 err = split_state(tree, state, prealloc, start);
271 BUG_ON(err == -EEXIST);
272 prealloc = NULL;
273 if (err)
274 goto out;
275 if (state->end <= end) {
276 set |= clear_state_bit(tree, state, bits);
277 if (last_end == (u64)-1)
278 goto out;
279 start = last_end + 1;
280 } else {
281 start = state->start;
282 }
283 goto search_again;
284 }
285 /*
286 * | ---- desired range ---- |
287 * | state |
288 * We need to split the extent, and clear the bit
289 * on the first half
290 */
291 if (state->start <= end && state->end > end) {
292 err = split_state(tree, state, prealloc, end + 1);
293 BUG_ON(err == -EEXIST);
294
295 set |= clear_state_bit(tree, prealloc, bits);
296 prealloc = NULL;
297 goto out;
298 }
299
300 start = state->end + 1;
301 set |= clear_state_bit(tree, state, bits);
302 if (last_end == (u64)-1)
303 goto out;
304 start = last_end + 1;
305 goto search_again;
306out:
307 if (prealloc)
308 btrfs_free_extent_state(prealloc);
309 return set;
310
311search_again:
312 if (start > end)
313 goto out;
314 goto again;
315}
316
317/*
318 * set some bits on a range in the tree.
319 */
320int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
321{
322 struct extent_state *state;
323 struct extent_state *prealloc = NULL;
324 struct cache_extent *node;
325 int err = 0;
326 u64 last_start;
327 u64 last_end;
328again:
329 if (!prealloc) {
330 prealloc = alloc_extent_state();
331 if (!prealloc)
332 return -ENOMEM;
333 }
334
335 /*
336 * this search will find the extents that end after
337 * our range starts
338 */
339 node = search_cache_extent(&tree->state, start);
340 if (!node) {
341 err = insert_state(tree, prealloc, start, end, bits);
342 BUG_ON(err == -EEXIST);
343 prealloc = NULL;
344 goto out;
345 }
346
347 state = container_of(node, struct extent_state, cache_node);
348 last_start = state->start;
349 last_end = state->end;
350
351 /*
352 * | ---- desired range ---- |
353 * | state |
354 *
355 * Just lock what we found and keep going
356 */
357 if (state->start == start && state->end <= end) {
358 state->state |= bits;
359 merge_state(tree, state);
360 if (last_end == (u64)-1)
361 goto out;
362 start = last_end + 1;
363 goto search_again;
364 }
365 /*
366 * | ---- desired range ---- |
367 * | state |
368 * or
369 * | ------------- state -------------- |
370 *
371 * We need to split the extent we found, and may flip bits on
372 * second half.
373 *
374 * If the extent we found extends past our
375 * range, we just split and search again. It'll get split
376 * again the next time though.
377 *
378 * If the extent we found is inside our range, we set the
379 * desired bit on it.
380 */
381 if (state->start < start) {
382 err = split_state(tree, state, prealloc, start);
383 BUG_ON(err == -EEXIST);
384 prealloc = NULL;
385 if (err)
386 goto out;
387 if (state->end <= end) {
388 state->state |= bits;
389 start = state->end + 1;
390 merge_state(tree, state);
391 if (last_end == (u64)-1)
392 goto out;
393 start = last_end + 1;
394 } else {
395 start = state->start;
396 }
397 goto search_again;
398 }
399 /*
400 * | ---- desired range ---- |
401 * | state | or | state |
402 *
403 * There's a hole, we need to insert something in it and
404 * ignore the extent we found.
405 */
406 if (state->start > start) {
407 u64 this_end;
408 if (end < last_start)
409 this_end = end;
410 else
411 this_end = last_start -1;
412 err = insert_state(tree, prealloc, start, this_end,
413 bits);
414 BUG_ON(err == -EEXIST);
415 prealloc = NULL;
416 if (err)
417 goto out;
418 start = this_end + 1;
419 goto search_again;
420 }
421 /*
422 * | ---- desired range ---- |
423 * | ---------- state ---------- |
424 * We need to split the extent, and set the bit
425 * on the first half
426 */
427 err = split_state(tree, state, prealloc, end + 1);
428 BUG_ON(err == -EEXIST);
429
430 state->state |= bits;
431 merge_state(tree, prealloc);
432 prealloc = NULL;
433out:
434 if (prealloc)
435 btrfs_free_extent_state(prealloc);
436 return err;
437search_again:
438 if (start > end)
439 goto out;
440 goto again;
441}
442
443int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
444{
445 return set_extent_bits(tree, start, end, EXTENT_DIRTY);
446}
447
448int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
449{
450 return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
451}
452
453int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
454 u64 *start_ret, u64 *end_ret, int bits)
455{
456 struct cache_extent *node;
457 struct extent_state *state;
458 int ret = 1;
459
460 /*
461 * this search will find all the extents that end after
462 * our range starts.
463 */
464 node = search_cache_extent(&tree->state, start);
465 if (!node)
466 goto out;
467
468 while(1) {
469 state = container_of(node, struct extent_state, cache_node);
470 if (state->end >= start && (state->state & bits)) {
471 *start_ret = state->start;
472 *end_ret = state->end;
473 ret = 0;
474 break;
475 }
476 node = next_cache_extent(node);
477 if (!node)
478 break;
479 }
480out:
481 return ret;
482}
483
484int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
485 int bits, int filled)
486{
487 struct extent_state *state = NULL;
488 struct cache_extent *node;
489 int bitset = 0;
490
491 node = search_cache_extent(&tree->state, start);
492 while (node && start <= end) {
493 state = container_of(node, struct extent_state, cache_node);
494
495 if (filled && state->start > start) {
496 bitset = 0;
497 break;
498 }
499 if (state->start > end)
500 break;
501 if (state->state & bits) {
502 bitset = 1;
503 if (!filled)
504 break;
505 } else if (filled) {
506 bitset = 0;
507 break;
508 }
509 start = state->end + 1;
510 if (start > end)
511 break;
512 node = next_cache_extent(node);
513 if (!node) {
514 if (filled)
515 bitset = 0;
516 break;
517 }
518 }
519 return bitset;
520}
521
522int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
523{
524 struct cache_extent *node;
525 struct extent_state *state;
526 int ret = 0;
527
528 node = search_cache_extent(&tree->state, start);
529 if (!node) {
530 ret = -ENOENT;
531 goto out;
532 }
533 state = container_of(node, struct extent_state, cache_node);
534 if (state->start != start) {
535 ret = -ENOENT;
536 goto out;
537 }
538 state->xprivate = private;
539out:
540 return ret;
541}
542
543int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
544{
545 struct cache_extent *node;
546 struct extent_state *state;
547 int ret = 0;
548
549 node = search_cache_extent(&tree->state, start);
550 if (!node) {
551 ret = -ENOENT;
552 goto out;
553 }
554 state = container_of(node, struct extent_state, cache_node);
555 if (state->start != start) {
556 ret = -ENOENT;
557 goto out;
558 }
559 *private = state->xprivate;
560out:
561 return ret;
562}
563
564static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
565 u64 bytenr, u32 blocksize)
566{
567 struct extent_buffer *eb;
568
569 eb = calloc(1, sizeof(struct extent_buffer));
570 if (!eb)
571 return NULL;
572 eb->data = malloc_cache_aligned(blocksize);
573 if (!eb->data) {
574 free(eb);
575 return NULL;
576 }
577
578 eb->start = bytenr;
579 eb->len = blocksize;
580 eb->refs = 1;
581 eb->flags = 0;
582 eb->cache_node.start = bytenr;
583 eb->cache_node.size = blocksize;
584 eb->fs_info = info;
585 memset_extent_buffer(eb, 0, 0, blocksize);
586
587 return eb;
588}
589
590struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
591{
592 struct extent_buffer *new;
593
594 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
595 if (!new)
596 return NULL;
597
598 copy_extent_buffer(new, src, 0, 0, src->len);
599 new->flags |= EXTENT_BUFFER_DUMMY;
600
601 return new;
602}
603
604static void free_extent_buffer_final(struct extent_buffer *eb)
605{
606 BUG_ON(eb->refs);
607 if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
608 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
609
610 remove_cache_extent(&tree->cache, &eb->cache_node);
611 BUG_ON(tree->cache_size < eb->len);
612 tree->cache_size -= eb->len;
613 }
614 free(eb->data);
615 free(eb);
616}
617
618static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
619{
620 if (!eb || IS_ERR(eb))
621 return;
622
623 eb->refs--;
624 BUG_ON(eb->refs < 0);
625 if (eb->refs == 0) {
626 if (eb->flags & EXTENT_DIRTY) {
627 error(
628 "dirty eb leak (aborted trans): start %llu len %u",
629 eb->start, eb->len);
630 }
631 if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
632 free_extent_buffer_final(eb);
633 }
634}
635
636void free_extent_buffer(struct extent_buffer *eb)
637{
638 free_extent_buffer_internal(eb, 1);
639}
640
641struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
642 u64 bytenr, u32 blocksize)
643{
644 struct extent_buffer *eb = NULL;
645 struct cache_extent *cache;
646
647 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
648 if (cache && cache->start == bytenr &&
649 cache->size == blocksize) {
650 eb = container_of(cache, struct extent_buffer, cache_node);
651 eb->refs++;
652 }
653 return eb;
654}
655
656struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
657 u64 start)
658{
659 struct extent_buffer *eb = NULL;
660 struct cache_extent *cache;
661
662 cache = search_cache_extent(&tree->cache, start);
663 if (cache) {
664 eb = container_of(cache, struct extent_buffer, cache_node);
665 eb->refs++;
666 }
667 return eb;
668}
669
670struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
671 u64 bytenr, u32 blocksize)
672{
673 struct extent_buffer *eb;
674 struct extent_io_tree *tree = &fs_info->extent_cache;
675 struct cache_extent *cache;
676
677 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
678 if (cache && cache->start == bytenr &&
679 cache->size == blocksize) {
680 eb = container_of(cache, struct extent_buffer, cache_node);
681 eb->refs++;
682 } else {
683 int ret;
684
685 if (cache) {
686 eb = container_of(cache, struct extent_buffer,
687 cache_node);
688 free_extent_buffer(eb);
689 }
690 eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
691 if (!eb)
692 return NULL;
693 ret = insert_cache_extent(&tree->cache, &eb->cache_node);
694 if (ret) {
695 free(eb);
696 return NULL;
697 }
698 tree->cache_size += blocksize;
699 }
700 return eb;
701}
702
703/*
704 * Allocate a dummy extent buffer which won't be inserted into extent buffer
705 * cache.
706 *
707 * This mostly allows super block read write using existing eb infrastructure
708 * without pulluting the eb cache.
709 *
710 * This is especially important to avoid injecting eb->start == SZ_64K, as
711 * fuzzed image could have invalid tree bytenr covers super block range,
712 * and cause ref count underflow.
713 */
714struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
715 u64 bytenr, u32 blocksize)
716{
717 struct extent_buffer *ret;
718
719 ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
720 if (!ret)
721 return NULL;
722
723 ret->flags |= EXTENT_BUFFER_DUMMY;
724
725 return ret;
726}
727
728int read_extent_from_disk(struct blk_desc *desc, struct disk_partition *part,
729 u64 physical, struct extent_buffer *eb,
730 unsigned long offset, unsigned long len)
731{
732 int ret;
733
734 ret = __btrfs_devread(desc, part, eb->data + offset, len, physical);
735 if (ret < 0)
736 goto out;
737 if (ret != len) {
738 ret = -EIO;
739 goto out;
740 }
741 ret = 0;
742out:
743 return ret;
744}
745
746int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
747 unsigned long start, unsigned long len)
748{
749 return memcmp(eb->data + start, ptrv, len);
750}
751
752void read_extent_buffer(const struct extent_buffer *eb, void *dst,
753 unsigned long start, unsigned long len)
754{
755 memcpy(dst, eb->data + start, len);
756}
757
758void write_extent_buffer(struct extent_buffer *eb, const void *src,
759 unsigned long start, unsigned long len)
760{
761 memcpy(eb->data + start, src, len);
762}
763
764void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
765 unsigned long dst_offset, unsigned long src_offset,
766 unsigned long len)
767{
768 memcpy(dst->data + dst_offset, src->data + src_offset, len);
769}
770
771void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
772 unsigned long src_offset, unsigned long len)
773{
774 memmove(dst->data + dst_offset, dst->data + src_offset, len);
775}
776
777void memset_extent_buffer(struct extent_buffer *eb, char c,
778 unsigned long start, unsigned long len)
779{
780 memset(eb->data + start, c, len);
781}
782
783int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
784 unsigned long nr)
785{
786 return le_test_bit(nr, (u8 *)eb->data + start);
787}
788
789int set_extent_buffer_dirty(struct extent_buffer *eb)
790{
791 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
792 if (!(eb->flags & EXTENT_DIRTY)) {
793 eb->flags |= EXTENT_DIRTY;
794 set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
795 extent_buffer_get(eb);
796 }
797 return 0;
798}
799
800int clear_extent_buffer_dirty(struct extent_buffer *eb)
801{
802 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
803 if (eb->flags & EXTENT_DIRTY) {
804 eb->flags &= ~EXTENT_DIRTY;
805 clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
806 free_extent_buffer(eb);
807 }
808 return 0;
809}