blob: eec89d152ee42c3d8939d55bb1b4ec893dcdb420 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Behún21a14fa2017-09-03 17:00:28 +02002/*
3 * BTRFS filesystem implementation for U-Boot
4 *
5 * 2017 Marek Behun, CZ.NIC, marek.behun@nic.cz
Marek Behún21a14fa2017-09-03 17:00:28 +02006 */
7
Qu Wenruo9a9be5e2020-06-24 18:02:52 +02008#include <linux/kernel.h>
9#include <linux/bug.h>
Marek Behún21a14fa2017-09-03 17:00:28 +020010#include <malloc.h>
Marek Vasutc9795392018-09-22 04:13:35 +020011#include <memalign.h>
Qu Wenruo9a9be5e2020-06-24 18:02:52 +020012#include "btrfs.h"
13#include "ctree.h"
14#include "extent-io.h"
15#include "disk-io.h"
Marek Behún21a14fa2017-09-03 17:00:28 +020016
17u64 btrfs_read_extent_inline(struct btrfs_path *path,
18 struct btrfs_file_extent_item *extent, u64 offset,
19 u64 size, char *out)
20{
21 u32 clen, dlen, orig_size = size, res;
22 const char *cbuf;
23 char *dbuf;
24 const int data_off = offsetof(struct btrfs_file_extent_item,
25 disk_bytenr);
26
27 clen = btrfs_path_item_size(path) - data_off;
28 cbuf = (const char *) extent + data_off;
29 dlen = extent->ram_bytes;
30
31 if (offset > dlen)
32 return -1ULL;
33
34 if (size > dlen - offset)
35 size = dlen - offset;
36
37 if (extent->compression == BTRFS_COMPRESS_NONE) {
38 memcpy(out, cbuf + offset, size);
39 return size;
40 }
41
42 if (dlen > orig_size) {
43 dbuf = malloc(dlen);
44 if (!dbuf)
45 return -1ULL;
46 } else {
47 dbuf = out;
48 }
49
50 res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
51 if (res == -1 || res != dlen)
52 goto err;
53
54 if (dlen > orig_size) {
55 memcpy(out, dbuf + offset, size);
56 free(dbuf);
57 } else if (offset) {
58 memmove(out, dbuf + offset, size);
59 }
60
61 return size;
62
63err:
64 if (dlen > orig_size)
65 free(dbuf);
66 return -1ULL;
67}
68
69u64 btrfs_read_extent_reg(struct btrfs_path *path,
70 struct btrfs_file_extent_item *extent, u64 offset,
71 u64 size, char *out)
72{
73 u64 physical, clen, dlen, orig_size = size;
74 u32 res;
75 char *cbuf, *dbuf;
76
77 clen = extent->disk_num_bytes;
78 dlen = extent->num_bytes;
79
80 if (offset > dlen)
81 return -1ULL;
82
83 if (size > dlen - offset)
84 size = dlen - offset;
85
Marek Behún48180e12020-03-30 18:48:42 +020086 /* sparse extent */
87 if (extent->disk_bytenr == 0) {
88 memset(out, 0, size);
89 return size;
90 }
91
Marek Behún21a14fa2017-09-03 17:00:28 +020092 physical = btrfs_map_logical_to_physical(extent->disk_bytenr);
93 if (physical == -1ULL)
94 return -1ULL;
95
96 if (extent->compression == BTRFS_COMPRESS_NONE) {
97 physical += extent->offset + offset;
98 if (!btrfs_devread(physical, size, out))
99 return -1ULL;
100
101 return size;
102 }
103
Marek Vasutc9795392018-09-22 04:13:35 +0200104 cbuf = malloc_cache_aligned(dlen > size ? clen + dlen : clen);
Marek Behún21a14fa2017-09-03 17:00:28 +0200105 if (!cbuf)
106 return -1ULL;
107
108 if (dlen > orig_size)
109 dbuf = cbuf + clen;
110 else
111 dbuf = out;
112
113 if (!btrfs_devread(physical, clen, cbuf))
114 goto err;
115
116 res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
117 if (res == -1)
118 goto err;
119
120 if (dlen > orig_size)
121 memcpy(out, dbuf + offset, size);
122 else
123 memmove(out, dbuf + offset, size);
124
125 free(cbuf);
126 return res;
127
128err:
129 free(cbuf);
130 return -1ULL;
131}
Qu Wenruo9a9be5e2020-06-24 18:02:52 +0200132
133void extent_io_tree_init(struct extent_io_tree *tree)
134{
135 cache_tree_init(&tree->state);
136 cache_tree_init(&tree->cache);
137 tree->cache_size = 0;
138}
139
140static struct extent_state *alloc_extent_state(void)
141{
142 struct extent_state *state;
143
144 state = malloc(sizeof(*state));
145 if (!state)
146 return NULL;
147 state->cache_node.objectid = 0;
148 state->refs = 1;
149 state->state = 0;
150 state->xprivate = 0;
151 return state;
152}
153
154static void btrfs_free_extent_state(struct extent_state *state)
155{
156 state->refs--;
157 BUG_ON(state->refs < 0);
158 if (state->refs == 0)
159 free(state);
160}
161
162static void free_extent_state_func(struct cache_extent *cache)
163{
164 struct extent_state *es;
165
166 es = container_of(cache, struct extent_state, cache_node);
167 btrfs_free_extent_state(es);
168}
169
170static void free_extent_buffer_final(struct extent_buffer *eb);
171void extent_io_tree_cleanup(struct extent_io_tree *tree)
172{
173 cache_tree_free_extents(&tree->state, free_extent_state_func);
174}
175
176static inline void update_extent_state(struct extent_state *state)
177{
178 state->cache_node.start = state->start;
179 state->cache_node.size = state->end + 1 - state->start;
180}
181
182/*
183 * Utility function to look for merge candidates inside a given range.
184 * Any extents with matching state are merged together into a single
185 * extent in the tree. Extents with EXTENT_IO in their state field are
186 * not merged
187 */
188static int merge_state(struct extent_io_tree *tree,
189 struct extent_state *state)
190{
191 struct extent_state *other;
192 struct cache_extent *other_node;
193
194 if (state->state & EXTENT_IOBITS)
195 return 0;
196
197 other_node = prev_cache_extent(&state->cache_node);
198 if (other_node) {
199 other = container_of(other_node, struct extent_state,
200 cache_node);
201 if (other->end == state->start - 1 &&
202 other->state == state->state) {
203 state->start = other->start;
204 update_extent_state(state);
205 remove_cache_extent(&tree->state, &other->cache_node);
206 btrfs_free_extent_state(other);
207 }
208 }
209 other_node = next_cache_extent(&state->cache_node);
210 if (other_node) {
211 other = container_of(other_node, struct extent_state,
212 cache_node);
213 if (other->start == state->end + 1 &&
214 other->state == state->state) {
215 other->start = state->start;
216 update_extent_state(other);
217 remove_cache_extent(&tree->state, &state->cache_node);
218 btrfs_free_extent_state(state);
219 }
220 }
221 return 0;
222}
223
224/*
225 * insert an extent_state struct into the tree. 'bits' are set on the
226 * struct before it is inserted.
227 */
228static int insert_state(struct extent_io_tree *tree,
229 struct extent_state *state, u64 start, u64 end,
230 int bits)
231{
232 int ret;
233
234 BUG_ON(end < start);
235 state->state |= bits;
236 state->start = start;
237 state->end = end;
238 update_extent_state(state);
239 ret = insert_cache_extent(&tree->state, &state->cache_node);
240 BUG_ON(ret);
241 merge_state(tree, state);
242 return 0;
243}
244
245/*
246 * split a given extent state struct in two, inserting the preallocated
247 * struct 'prealloc' as the newly created second half. 'split' indicates an
248 * offset inside 'orig' where it should be split.
249 */
250static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
251 struct extent_state *prealloc, u64 split)
252{
253 int ret;
254 prealloc->start = orig->start;
255 prealloc->end = split - 1;
256 prealloc->state = orig->state;
257 update_extent_state(prealloc);
258 orig->start = split;
259 update_extent_state(orig);
260 ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
261 BUG_ON(ret);
262 return 0;
263}
264
265/*
266 * clear some bits on a range in the tree.
267 */
268static int clear_state_bit(struct extent_io_tree *tree,
269 struct extent_state *state, int bits)
270{
271 int ret = state->state & bits;
272
273 state->state &= ~bits;
274 if (state->state == 0) {
275 remove_cache_extent(&tree->state, &state->cache_node);
276 btrfs_free_extent_state(state);
277 } else {
278 merge_state(tree, state);
279 }
280 return ret;
281}
282
283/*
284 * extent_buffer_bitmap_set - set an area of a bitmap
285 * @eb: the extent buffer
286 * @start: offset of the bitmap item in the extent buffer
287 * @pos: bit number of the first bit
288 * @len: number of bits to set
289 */
290void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
291 unsigned long pos, unsigned long len)
292{
293 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
294 const unsigned int size = pos + len;
295 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
296 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
297
298 while (len >= bits_to_set) {
299 *p |= mask_to_set;
300 len -= bits_to_set;
301 bits_to_set = BITS_PER_BYTE;
302 mask_to_set = ~0;
303 p++;
304 }
305 if (len) {
306 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
307 *p |= mask_to_set;
308 }
309}
310
311/*
312 * extent_buffer_bitmap_clear - clear an area of a bitmap
313 * @eb: the extent buffer
314 * @start: offset of the bitmap item in the extent buffer
315 * @pos: bit number of the first bit
316 * @len: number of bits to clear
317 */
318void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
319 unsigned long pos, unsigned long len)
320{
321 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
322 const unsigned int size = pos + len;
323 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
324 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
325
326 while (len >= bits_to_clear) {
327 *p &= ~mask_to_clear;
328 len -= bits_to_clear;
329 bits_to_clear = BITS_PER_BYTE;
330 mask_to_clear = ~0;
331 p++;
332 }
333 if (len) {
334 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
335 *p &= ~mask_to_clear;
336 }
337}
338
339/*
340 * clear some bits on a range in the tree.
341 */
342int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
343{
344 struct extent_state *state;
345 struct extent_state *prealloc = NULL;
346 struct cache_extent *node;
347 u64 last_end;
348 int err;
349 int set = 0;
350
351again:
352 if (!prealloc) {
353 prealloc = alloc_extent_state();
354 if (!prealloc)
355 return -ENOMEM;
356 }
357
358 /*
359 * this search will find the extents that end after
360 * our range starts
361 */
362 node = search_cache_extent(&tree->state, start);
363 if (!node)
364 goto out;
365 state = container_of(node, struct extent_state, cache_node);
366 if (state->start > end)
367 goto out;
368 last_end = state->end;
369
370 /*
371 * | ---- desired range ---- |
372 * | state | or
373 * | ------------- state -------------- |
374 *
375 * We need to split the extent we found, and may flip
376 * bits on second half.
377 *
378 * If the extent we found extends past our range, we
379 * just split and search again. It'll get split again
380 * the next time though.
381 *
382 * If the extent we found is inside our range, we clear
383 * the desired bit on it.
384 */
385 if (state->start < start) {
386 err = split_state(tree, state, prealloc, start);
387 BUG_ON(err == -EEXIST);
388 prealloc = NULL;
389 if (err)
390 goto out;
391 if (state->end <= end) {
392 set |= clear_state_bit(tree, state, bits);
393 if (last_end == (u64)-1)
394 goto out;
395 start = last_end + 1;
396 } else {
397 start = state->start;
398 }
399 goto search_again;
400 }
401 /*
402 * | ---- desired range ---- |
403 * | state |
404 * We need to split the extent, and clear the bit
405 * on the first half
406 */
407 if (state->start <= end && state->end > end) {
408 err = split_state(tree, state, prealloc, end + 1);
409 BUG_ON(err == -EEXIST);
410
411 set |= clear_state_bit(tree, prealloc, bits);
412 prealloc = NULL;
413 goto out;
414 }
415
416 start = state->end + 1;
417 set |= clear_state_bit(tree, state, bits);
418 if (last_end == (u64)-1)
419 goto out;
420 start = last_end + 1;
421 goto search_again;
422out:
423 if (prealloc)
424 btrfs_free_extent_state(prealloc);
425 return set;
426
427search_again:
428 if (start > end)
429 goto out;
430 goto again;
431}
432
433/*
434 * set some bits on a range in the tree.
435 */
436int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
437{
438 struct extent_state *state;
439 struct extent_state *prealloc = NULL;
440 struct cache_extent *node;
441 int err = 0;
442 u64 last_start;
443 u64 last_end;
444again:
445 if (!prealloc) {
446 prealloc = alloc_extent_state();
447 if (!prealloc)
448 return -ENOMEM;
449 }
450
451 /*
452 * this search will find the extents that end after
453 * our range starts
454 */
455 node = search_cache_extent(&tree->state, start);
456 if (!node) {
457 err = insert_state(tree, prealloc, start, end, bits);
458 BUG_ON(err == -EEXIST);
459 prealloc = NULL;
460 goto out;
461 }
462
463 state = container_of(node, struct extent_state, cache_node);
464 last_start = state->start;
465 last_end = state->end;
466
467 /*
468 * | ---- desired range ---- |
469 * | state |
470 *
471 * Just lock what we found and keep going
472 */
473 if (state->start == start && state->end <= end) {
474 state->state |= bits;
475 merge_state(tree, state);
476 if (last_end == (u64)-1)
477 goto out;
478 start = last_end + 1;
479 goto search_again;
480 }
481 /*
482 * | ---- desired range ---- |
483 * | state |
484 * or
485 * | ------------- state -------------- |
486 *
487 * We need to split the extent we found, and may flip bits on
488 * second half.
489 *
490 * If the extent we found extends past our
491 * range, we just split and search again. It'll get split
492 * again the next time though.
493 *
494 * If the extent we found is inside our range, we set the
495 * desired bit on it.
496 */
497 if (state->start < start) {
498 err = split_state(tree, state, prealloc, start);
499 BUG_ON(err == -EEXIST);
500 prealloc = NULL;
501 if (err)
502 goto out;
503 if (state->end <= end) {
504 state->state |= bits;
505 start = state->end + 1;
506 merge_state(tree, state);
507 if (last_end == (u64)-1)
508 goto out;
509 start = last_end + 1;
510 } else {
511 start = state->start;
512 }
513 goto search_again;
514 }
515 /*
516 * | ---- desired range ---- |
517 * | state | or | state |
518 *
519 * There's a hole, we need to insert something in it and
520 * ignore the extent we found.
521 */
522 if (state->start > start) {
523 u64 this_end;
524 if (end < last_start)
525 this_end = end;
526 else
527 this_end = last_start -1;
528 err = insert_state(tree, prealloc, start, this_end,
529 bits);
530 BUG_ON(err == -EEXIST);
531 prealloc = NULL;
532 if (err)
533 goto out;
534 start = this_end + 1;
535 goto search_again;
536 }
537 /*
538 * | ---- desired range ---- |
539 * | ---------- state ---------- |
540 * We need to split the extent, and set the bit
541 * on the first half
542 */
543 err = split_state(tree, state, prealloc, end + 1);
544 BUG_ON(err == -EEXIST);
545
546 state->state |= bits;
547 merge_state(tree, prealloc);
548 prealloc = NULL;
549out:
550 if (prealloc)
551 btrfs_free_extent_state(prealloc);
552 return err;
553search_again:
554 if (start > end)
555 goto out;
556 goto again;
557}
558
559int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
560{
561 return set_extent_bits(tree, start, end, EXTENT_DIRTY);
562}
563
564int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
565{
566 return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
567}
568
569int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
570 u64 *start_ret, u64 *end_ret, int bits)
571{
572 struct cache_extent *node;
573 struct extent_state *state;
574 int ret = 1;
575
576 /*
577 * this search will find all the extents that end after
578 * our range starts.
579 */
580 node = search_cache_extent(&tree->state, start);
581 if (!node)
582 goto out;
583
584 while(1) {
585 state = container_of(node, struct extent_state, cache_node);
586 if (state->end >= start && (state->state & bits)) {
587 *start_ret = state->start;
588 *end_ret = state->end;
589 ret = 0;
590 break;
591 }
592 node = next_cache_extent(node);
593 if (!node)
594 break;
595 }
596out:
597 return ret;
598}
599
600int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
601 int bits, int filled)
602{
603 struct extent_state *state = NULL;
604 struct cache_extent *node;
605 int bitset = 0;
606
607 node = search_cache_extent(&tree->state, start);
608 while (node && start <= end) {
609 state = container_of(node, struct extent_state, cache_node);
610
611 if (filled && state->start > start) {
612 bitset = 0;
613 break;
614 }
615 if (state->start > end)
616 break;
617 if (state->state & bits) {
618 bitset = 1;
619 if (!filled)
620 break;
621 } else if (filled) {
622 bitset = 0;
623 break;
624 }
625 start = state->end + 1;
626 if (start > end)
627 break;
628 node = next_cache_extent(node);
629 if (!node) {
630 if (filled)
631 bitset = 0;
632 break;
633 }
634 }
635 return bitset;
636}
637
638int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
639{
640 struct cache_extent *node;
641 struct extent_state *state;
642 int ret = 0;
643
644 node = search_cache_extent(&tree->state, start);
645 if (!node) {
646 ret = -ENOENT;
647 goto out;
648 }
649 state = container_of(node, struct extent_state, cache_node);
650 if (state->start != start) {
651 ret = -ENOENT;
652 goto out;
653 }
654 state->xprivate = private;
655out:
656 return ret;
657}
658
659int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
660{
661 struct cache_extent *node;
662 struct extent_state *state;
663 int ret = 0;
664
665 node = search_cache_extent(&tree->state, start);
666 if (!node) {
667 ret = -ENOENT;
668 goto out;
669 }
670 state = container_of(node, struct extent_state, cache_node);
671 if (state->start != start) {
672 ret = -ENOENT;
673 goto out;
674 }
675 *private = state->xprivate;
676out:
677 return ret;
678}
679
680static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
681 u64 bytenr, u32 blocksize)
682{
683 struct extent_buffer *eb;
684
685 eb = calloc(1, sizeof(struct extent_buffer));
686 if (!eb)
687 return NULL;
688 eb->data = malloc_cache_aligned(blocksize);
689 if (!eb->data) {
690 free(eb);
691 return NULL;
692 }
693
694 eb->start = bytenr;
695 eb->len = blocksize;
696 eb->refs = 1;
697 eb->flags = 0;
698 eb->cache_node.start = bytenr;
699 eb->cache_node.size = blocksize;
700 eb->fs_info = info;
701 memset_extent_buffer(eb, 0, 0, blocksize);
702
703 return eb;
704}
705
706struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
707{
708 struct extent_buffer *new;
709
710 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
711 if (!new)
712 return NULL;
713
714 copy_extent_buffer(new, src, 0, 0, src->len);
715 new->flags |= EXTENT_BUFFER_DUMMY;
716
717 return new;
718}
719
720static void free_extent_buffer_final(struct extent_buffer *eb)
721{
722 BUG_ON(eb->refs);
723 if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
724 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
725
726 remove_cache_extent(&tree->cache, &eb->cache_node);
727 BUG_ON(tree->cache_size < eb->len);
728 tree->cache_size -= eb->len;
729 }
730 free(eb->data);
731 free(eb);
732}
733
734static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
735{
736 if (!eb || IS_ERR(eb))
737 return;
738
739 eb->refs--;
740 BUG_ON(eb->refs < 0);
741 if (eb->refs == 0) {
742 if (eb->flags & EXTENT_DIRTY) {
743 error(
744 "dirty eb leak (aborted trans): start %llu len %u",
745 eb->start, eb->len);
746 }
747 if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
748 free_extent_buffer_final(eb);
749 }
750}
751
752void free_extent_buffer(struct extent_buffer *eb)
753{
754 free_extent_buffer_internal(eb, 1);
755}
756
757struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
758 u64 bytenr, u32 blocksize)
759{
760 struct extent_buffer *eb = NULL;
761 struct cache_extent *cache;
762
763 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
764 if (cache && cache->start == bytenr &&
765 cache->size == blocksize) {
766 eb = container_of(cache, struct extent_buffer, cache_node);
767 eb->refs++;
768 }
769 return eb;
770}
771
772struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
773 u64 start)
774{
775 struct extent_buffer *eb = NULL;
776 struct cache_extent *cache;
777
778 cache = search_cache_extent(&tree->cache, start);
779 if (cache) {
780 eb = container_of(cache, struct extent_buffer, cache_node);
781 eb->refs++;
782 }
783 return eb;
784}
785
786struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
787 u64 bytenr, u32 blocksize)
788{
789 struct extent_buffer *eb;
790 struct extent_io_tree *tree = &fs_info->extent_cache;
791 struct cache_extent *cache;
792
793 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
794 if (cache && cache->start == bytenr &&
795 cache->size == blocksize) {
796 eb = container_of(cache, struct extent_buffer, cache_node);
797 eb->refs++;
798 } else {
799 int ret;
800
801 if (cache) {
802 eb = container_of(cache, struct extent_buffer,
803 cache_node);
804 free_extent_buffer(eb);
805 }
806 eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
807 if (!eb)
808 return NULL;
809 ret = insert_cache_extent(&tree->cache, &eb->cache_node);
810 if (ret) {
811 free(eb);
812 return NULL;
813 }
814 tree->cache_size += blocksize;
815 }
816 return eb;
817}
818
819/*
820 * Allocate a dummy extent buffer which won't be inserted into extent buffer
821 * cache.
822 *
823 * This mostly allows super block read write using existing eb infrastructure
824 * without pulluting the eb cache.
825 *
826 * This is especially important to avoid injecting eb->start == SZ_64K, as
827 * fuzzed image could have invalid tree bytenr covers super block range,
828 * and cause ref count underflow.
829 */
830struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
831 u64 bytenr, u32 blocksize)
832{
833 struct extent_buffer *ret;
834
835 ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
836 if (!ret)
837 return NULL;
838
839 ret->flags |= EXTENT_BUFFER_DUMMY;
840
841 return ret;
842}
843
844int read_extent_from_disk(struct blk_desc *desc, struct disk_partition *part,
845 u64 physical, struct extent_buffer *eb,
846 unsigned long offset, unsigned long len)
847{
848 int ret;
849
850 ret = __btrfs_devread(desc, part, eb->data + offset, len, physical);
851 if (ret < 0)
852 goto out;
853 if (ret != len) {
854 ret = -EIO;
855 goto out;
856 }
857 ret = 0;
858out:
859 return ret;
860}
861
862int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
863 unsigned long start, unsigned long len)
864{
865 return memcmp(eb->data + start, ptrv, len);
866}
867
868void read_extent_buffer(const struct extent_buffer *eb, void *dst,
869 unsigned long start, unsigned long len)
870{
871 memcpy(dst, eb->data + start, len);
872}
873
874void write_extent_buffer(struct extent_buffer *eb, const void *src,
875 unsigned long start, unsigned long len)
876{
877 memcpy(eb->data + start, src, len);
878}
879
880void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
881 unsigned long dst_offset, unsigned long src_offset,
882 unsigned long len)
883{
884 memcpy(dst->data + dst_offset, src->data + src_offset, len);
885}
886
887void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
888 unsigned long src_offset, unsigned long len)
889{
890 memmove(dst->data + dst_offset, dst->data + src_offset, len);
891}
892
893void memset_extent_buffer(struct extent_buffer *eb, char c,
894 unsigned long start, unsigned long len)
895{
896 memset(eb->data + start, c, len);
897}
898
899int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
900 unsigned long nr)
901{
902 return le_test_bit(nr, (u8 *)eb->data + start);
903}
904
905int set_extent_buffer_dirty(struct extent_buffer *eb)
906{
907 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
908 if (!(eb->flags & EXTENT_DIRTY)) {
909 eb->flags |= EXTENT_DIRTY;
910 set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
911 extent_buffer_get(eb);
912 }
913 return 0;
914}
915
916int clear_extent_buffer_dirty(struct extent_buffer *eb)
917{
918 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
919 if (eb->flags & EXTENT_DIRTY) {
920 eb->flags &= ~EXTENT_DIRTY;
921 clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
922 free_extent_buffer(eb);
923 }
924 return 0;
925}