blob: 0c2b2b57054098f392c3f84773388c8a353a4041 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Behún21a14fa2017-09-03 17:00:28 +02002/*
3 * BTRFS filesystem implementation for U-Boot
4 *
5 * 2017 Marek Behun, CZ.NIC, marek.behun@nic.cz
Marek Behún21a14fa2017-09-03 17:00:28 +02006 */
7
Qu Wenruoa26a6be2020-06-24 18:03:09 +02008#include <linux/kernel.h>
Marek Behún21a14fa2017-09-03 17:00:28 +02009#include <malloc.h>
Qu Wenruoa26a6be2020-06-24 18:03:09 +020010#include <memalign.h>
Qu Wenruo92bc1792020-06-24 18:03:03 +020011#include "btrfs.h"
12#include "disk-io.h"
Qu Wenruoa26a6be2020-06-24 18:03:09 +020013#include "volumes.h"
Marek Behún21a14fa2017-09-03 17:00:28 +020014
Qu Wenruocafffc52020-06-24 18:03:02 +020015u64 __btrfs_lookup_inode_ref(struct __btrfs_root *root, u64 inr,
Marek Behún21a14fa2017-09-03 17:00:28 +020016 struct btrfs_inode_ref *refp, char *name)
17{
Qu Wenruo33966de2020-06-24 18:02:56 +020018 struct __btrfs_path path;
Marek Behún21a14fa2017-09-03 17:00:28 +020019 struct btrfs_key *key;
20 struct btrfs_inode_ref *ref;
21 u64 res = -1ULL;
22
23 key = btrfs_search_tree_key_type(root, inr, BTRFS_INODE_REF_KEY,
24 &path);
25
26 if (!key)
27 return -1ULL;
28
29 ref = btrfs_path_item_ptr(&path, struct btrfs_inode_ref);
30 btrfs_inode_ref_to_cpu(ref);
31
32 if (refp)
33 *refp = *ref;
34
35 if (name) {
Qu Wenruo3b4b40c2020-06-24 18:02:47 +020036 if (ref->name_len > BTRFS_NAME_LEN) {
Marek Behún21a14fa2017-09-03 17:00:28 +020037 printf("%s: inode name too long: %u\n", __func__,
38 ref->name_len);
39 goto out;
40 }
41
42 memcpy(name, ref + 1, ref->name_len);
43 }
44
45 res = key->offset;
46out:
Qu Wenruo33966de2020-06-24 18:02:56 +020047 __btrfs_free_path(&path);
Marek Behún21a14fa2017-09-03 17:00:28 +020048 return res;
49}
50
Qu Wenruocafffc52020-06-24 18:03:02 +020051int __btrfs_lookup_inode(const struct __btrfs_root *root,
Marek Behún21a14fa2017-09-03 17:00:28 +020052 struct btrfs_key *location,
53 struct btrfs_inode_item *item,
Qu Wenruo207011b2020-06-24 18:02:57 +020054 struct __btrfs_root *new_root)
Marek Behún21a14fa2017-09-03 17:00:28 +020055{
Qu Wenruo207011b2020-06-24 18:02:57 +020056 struct __btrfs_root tmp_root = *root;
Qu Wenruo33966de2020-06-24 18:02:56 +020057 struct __btrfs_path path;
Marek Behún21a14fa2017-09-03 17:00:28 +020058 int res = -1;
59
60 if (location->type == BTRFS_ROOT_ITEM_KEY) {
61 if (btrfs_find_root(location->objectid, &tmp_root, NULL))
62 return -1;
63
64 location->objectid = tmp_root.root_dirid;
65 location->type = BTRFS_INODE_ITEM_KEY;
66 location->offset = 0;
67 }
68
69 if (btrfs_search_tree(&tmp_root, location, &path))
70 return res;
71
Qu Wenruo75b08172020-06-24 18:02:55 +020072 if (__btrfs_comp_keys(location, btrfs_path_leaf_key(&path)))
Marek Behún21a14fa2017-09-03 17:00:28 +020073 goto out;
74
75 if (item) {
76 *item = *btrfs_path_item_ptr(&path, struct btrfs_inode_item);
77 btrfs_inode_item_to_cpu(item);
78 }
79
80 if (new_root)
81 *new_root = tmp_root;
82
83 res = 0;
84
85out:
Qu Wenruo33966de2020-06-24 18:02:56 +020086 __btrfs_free_path(&path);
Marek Behún21a14fa2017-09-03 17:00:28 +020087 return res;
88}
89
Qu Wenruo92bc1792020-06-24 18:03:03 +020090/*
91 * Read the content of symlink inode @ino of @root, into @target.
92 * NOTE: @target will not be \0 termiated, caller should handle it properly.
93 *
94 * Return the number of read data.
95 * Return <0 for error.
96 */
97int btrfs_readlink(struct btrfs_root *root, u64 ino, char *target)
Marek Behún21a14fa2017-09-03 17:00:28 +020098{
Qu Wenruo92bc1792020-06-24 18:03:03 +020099 struct btrfs_path path;
Marek Behún21a14fa2017-09-03 17:00:28 +0200100 struct btrfs_key key;
Qu Wenruo92bc1792020-06-24 18:03:03 +0200101 struct btrfs_file_extent_item *fi;
102 int ret;
Marek Behún21a14fa2017-09-03 17:00:28 +0200103
Qu Wenruo92bc1792020-06-24 18:03:03 +0200104 key.objectid = ino;
Marek Behún21a14fa2017-09-03 17:00:28 +0200105 key.type = BTRFS_EXTENT_DATA_KEY;
106 key.offset = 0;
Qu Wenruo92bc1792020-06-24 18:03:03 +0200107 btrfs_init_path(&path);
Marek Behún21a14fa2017-09-03 17:00:28 +0200108
Qu Wenruo92bc1792020-06-24 18:03:03 +0200109 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
110 if (ret < 0)
111 return ret;
112 if (ret > 0) {
113 ret = -ENOENT;
114 goto out;
115 }
116 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
117 struct btrfs_file_extent_item);
118 if (btrfs_file_extent_type(path.nodes[0], fi) !=
119 BTRFS_FILE_EXTENT_INLINE) {
120 ret = -EUCLEAN;
121 error("Extent for symlink %llu must be INLINE type!", ino);
122 goto out;
123 }
124 if (btrfs_file_extent_compression(path.nodes[0], fi) !=
125 BTRFS_COMPRESS_NONE) {
126 ret = -EUCLEAN;
127 error("Extent for symlink %llu must not be compressed!", ino);
128 goto out;
129 }
130 if (btrfs_file_extent_ram_bytes(path.nodes[0], fi) >=
131 root->fs_info->sectorsize) {
132 ret = -EUCLEAN;
133 error("Symlink %llu extent data too large (%llu)!\n",
134 ino, btrfs_file_extent_ram_bytes(path.nodes[0], fi));
135 goto out;
136 }
137 read_extent_buffer(path.nodes[0], target,
138 btrfs_file_extent_inline_start(fi),
139 btrfs_file_extent_ram_bytes(path.nodes[0], fi));
140 ret = btrfs_file_extent_ram_bytes(path.nodes[0], fi);
141out:
142 btrfs_release_path(&path);
143 return ret;
144}
145
146int __btrfs_readlink(const struct __btrfs_root *root, u64 inr, char *target)
147{
148 struct btrfs_root *subvolume;
149 struct btrfs_fs_info *fs_info = current_fs_info;
150 struct btrfs_key key;
151 int ret;
152
153 ASSERT(fs_info);
154 key.objectid = root->objectid;
155 key.type = BTRFS_ROOT_ITEM_KEY;
156 key.offset = (u64)-1;
157 subvolume = btrfs_read_fs_root(fs_info, &key);
158 if (IS_ERR(subvolume))
Marek Behún21a14fa2017-09-03 17:00:28 +0200159 return -1;
160
Qu Wenruo92bc1792020-06-24 18:03:03 +0200161 ret = btrfs_readlink(subvolume, inr, target);
162 if (ret < 0)
163 return -1;
164 target[ret] = '\0';
165 return 0;
Marek Behún21a14fa2017-09-03 17:00:28 +0200166}
167
Qu Wenruoc921aa22020-06-24 18:03:05 +0200168static int lookup_root_ref(struct btrfs_fs_info *fs_info,
169 u64 rootid, u64 *root_ret, u64 *dir_ret)
170{
171 struct btrfs_root *root = fs_info->tree_root;
172 struct btrfs_root_ref *root_ref;
173 struct btrfs_path path;
174 struct btrfs_key key;
175 int ret;
176
177 btrfs_init_path(&path);
178 key.objectid = rootid;
179 key.type = BTRFS_ROOT_BACKREF_KEY;
180 key.offset = (u64)-1;
181
182 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
183 if (ret < 0)
184 return ret;
185 /* Should not happen */
186 if (ret == 0) {
187 ret = -EUCLEAN;
188 goto out;
189 }
190 ret = btrfs_previous_item(root, &path, rootid, BTRFS_ROOT_BACKREF_KEY);
191 if (ret < 0)
192 goto out;
193 if (ret > 0) {
194 ret = -ENOENT;
195 goto out;
196 }
197 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
198 root_ref = btrfs_item_ptr(path.nodes[0], path.slots[0],
199 struct btrfs_root_ref);
200 *root_ret = key.offset;
201 *dir_ret = btrfs_root_ref_dirid(path.nodes[0], root_ref);
202out:
203 btrfs_release_path(&path);
204 return ret;
205}
206
207/*
208 * To get the parent inode of @ino of @root.
209 *
210 * @root_ret and @ino_ret will be filled.
211 *
212 * NOTE: This function is not reliable. It can only get one parent inode.
213 * The get the proper parent inode, we need a full VFS inodes stack to
214 * resolve properly.
215 */
216static int get_parent_inode(struct btrfs_root *root, u64 ino,
217 struct btrfs_root **root_ret, u64 *ino_ret)
218{
219 struct btrfs_fs_info *fs_info = root->fs_info;
220 struct btrfs_path path;
221 struct btrfs_key key;
222 int ret;
223
224 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
225 u64 parent_root = -1;
226
227 /* It's top level already, no more parent */
228 if (root->root_key.objectid == BTRFS_FS_TREE_OBJECTID) {
229 *root_ret = fs_info->fs_root;
230 *ino_ret = BTRFS_FIRST_FREE_OBJECTID;
231 return 0;
232 }
233
234 ret = lookup_root_ref(fs_info, root->root_key.objectid,
235 &parent_root, ino_ret);
236 if (ret < 0)
237 return ret;
238
239 key.objectid = parent_root;
240 key.type = BTRFS_ROOT_ITEM_KEY;
241 key.offset = (u64)-1;
242 *root_ret = btrfs_read_fs_root(fs_info, &key);
243 if (IS_ERR(*root_ret))
244 return PTR_ERR(*root_ret);
245
246 return 0;
247 }
248
249 btrfs_init_path(&path);
250 key.objectid = ino;
251 key.type = BTRFS_INODE_REF_KEY;
252 key.offset = (u64)-1;
253
254 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
255 if (ret < 0)
256 return ret;
257 /* Should not happen */
258 if (ret == 0) {
259 ret = -EUCLEAN;
260 goto out;
261 }
262 ret = btrfs_previous_item(root, &path, ino, BTRFS_INODE_REF_KEY);
263 if (ret < 0)
264 goto out;
265 if (ret > 0) {
266 ret = -ENOENT;
267 goto out;
268 }
269 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
270 *root_ret = root;
271 *ino_ret = key.offset;
272out:
273 btrfs_release_path(&path);
274 return ret;
275}
276
Marek Behún21a14fa2017-09-03 17:00:28 +0200277/* inr must be a directory (for regular files with multiple hard links this
278 function returns only one of the parents of the file) */
Qu Wenruocafffc52020-06-24 18:03:02 +0200279static u64 __get_parent_inode(struct __btrfs_root *root, u64 inr,
Marek Behún21a14fa2017-09-03 17:00:28 +0200280 struct btrfs_inode_item *inode_item)
281{
282 struct btrfs_key key;
283 u64 res;
284
285 if (inr == BTRFS_FIRST_FREE_OBJECTID) {
286 if (root->objectid != btrfs_info.fs_root.objectid) {
287 u64 parent;
288 struct btrfs_root_ref ref;
289
290 parent = btrfs_lookup_root_ref(root->objectid, &ref,
291 NULL);
292 if (parent == -1ULL)
293 return -1ULL;
294
295 if (btrfs_find_root(parent, root, NULL))
296 return -1ULL;
297
298 inr = ref.dirid;
299 }
300
301 if (inode_item) {
302 key.objectid = inr;
303 key.type = BTRFS_INODE_ITEM_KEY;
304 key.offset = 0;
305
Qu Wenruocafffc52020-06-24 18:03:02 +0200306 if (__btrfs_lookup_inode(root, &key, inode_item, NULL))
Marek Behún21a14fa2017-09-03 17:00:28 +0200307 return -1ULL;
308 }
309
310 return inr;
311 }
312
Qu Wenruocafffc52020-06-24 18:03:02 +0200313 res = __btrfs_lookup_inode_ref(root, inr, NULL, NULL);
Marek Behún21a14fa2017-09-03 17:00:28 +0200314 if (res == -1ULL)
315 return -1ULL;
316
317 if (inode_item) {
318 key.objectid = res;
319 key.type = BTRFS_INODE_ITEM_KEY;
320 key.offset = 0;
321
Qu Wenruocafffc52020-06-24 18:03:02 +0200322 if (__btrfs_lookup_inode(root, &key, inode_item, NULL))
Marek Behún21a14fa2017-09-03 17:00:28 +0200323 return -1ULL;
324 }
325
326 return res;
327}
328
329static inline int next_length(const char *path)
330{
331 int res = 0;
Qu Wenruo5bdcb372020-06-24 18:03:04 +0200332 while (*path != '\0' && *path != '/') {
333 ++res;
334 ++path;
335 if (res > BTRFS_NAME_LEN)
336 break;
337 }
Marek Behún21a14fa2017-09-03 17:00:28 +0200338 return res;
339}
340
341static inline const char *skip_current_directories(const char *cur)
342{
343 while (1) {
344 if (cur[0] == '/')
345 ++cur;
346 else if (cur[0] == '.' && cur[1] == '/')
347 cur += 2;
348 else
349 break;
350 }
351
352 return cur;
353}
354
Qu Wenruoc921aa22020-06-24 18:03:05 +0200355/*
356 * Resolve one filename of @ino of @root.
357 *
358 * key_ret: The child key (either INODE_ITEM or ROOT_ITEM type)
359 * type_ret: BTRFS_FT_* of the child inode.
360 *
361 * Return 0 with above members filled.
362 * Return <0 for error.
363 */
364static int resolve_one_filename(struct btrfs_root *root, u64 ino,
365 const char *name, int namelen,
366 struct btrfs_key *key_ret, u8 *type_ret)
367{
368 struct btrfs_dir_item *dir_item;
369 struct btrfs_path path;
370 int ret = 0;
371
372 btrfs_init_path(&path);
373
374 dir_item = btrfs_lookup_dir_item(NULL, root, &path, ino, name,
375 namelen, 0);
376 if (IS_ERR(dir_item)) {
377 ret = PTR_ERR(dir_item);
378 goto out;
379 }
380
381 btrfs_dir_item_key_to_cpu(path.nodes[0], dir_item, key_ret);
382 *type_ret = btrfs_dir_type(path.nodes[0], dir_item);
383out:
384 btrfs_release_path(&path);
385 return ret;
386}
387
388/*
389 * Resolve a full path @filename. The start point is @ino of @root.
390 *
391 * The result will be filled into @root_ret, @ino_ret and @type_ret.
392 */
393int btrfs_lookup_path(struct btrfs_root *root, u64 ino, const char *filename,
394 struct btrfs_root **root_ret, u64 *ino_ret,
395 u8 *type_ret, int symlink_limit)
396{
397 struct btrfs_fs_info *fs_info = root->fs_info;
398 struct btrfs_root *next_root;
399 struct btrfs_key key;
400 const char *cur = filename;
401 u64 next_ino;
402 u8 next_type;
403 u8 type;
404 int len;
405 int ret = 0;
406
407 /* If the path is absolute path, also search from fs root */
408 if (*cur == '/') {
409 root = fs_info->fs_root;
410 ino = btrfs_root_dirid(&root->root_item);
411 type = BTRFS_FT_DIR;
412 }
413
414 while (*cur != '\0') {
415 cur = skip_current_directories(cur);
416
417 len = next_length(cur);
418 if (len > BTRFS_NAME_LEN) {
419 error("%s: Name too long at \"%.*s\"", __func__,
420 BTRFS_NAME_LEN, cur);
421 return -ENAMETOOLONG;
422 }
423
424 if (len == 1 && cur[0] == '.')
425 break;
426
427 if (len == 2 && cur[0] == '.' && cur[1] == '.') {
428 /* Go one level up */
429 ret = get_parent_inode(root, ino, &next_root, &next_ino);
430 if (ret < 0)
431 return ret;
432 root = next_root;
433 ino = next_ino;
434 goto next;
435 }
436
437 if (!*cur)
438 break;
439
440 ret = resolve_one_filename(root, ino, cur, len, &key, &type);
441 if (ret < 0)
442 return ret;
443
444 if (key.type == BTRFS_ROOT_ITEM_KEY) {
445 /* Child inode is a subvolume */
446
447 next_root = btrfs_read_fs_root(fs_info, &key);
448 if (IS_ERR(next_root))
449 return PTR_ERR(next_root);
450 root = next_root;
451 ino = btrfs_root_dirid(&root->root_item);
452 } else if (type == BTRFS_FT_SYMLINK && symlink_limit >= 0) {
453 /* Child inode is a symlink */
454
455 char *target;
456
457 if (symlink_limit == 0) {
458 error("%s: Too much symlinks!", __func__);
459 return -EMLINK;
460 }
461 target = malloc(fs_info->sectorsize);
462 if (!target)
463 return -ENOMEM;
464 ret = btrfs_readlink(root, key.objectid, target);
465 if (ret < 0) {
466 free(target);
467 return ret;
468 }
469 target[ret] = '\0';
470
471 ret = btrfs_lookup_path(root, ino, target, &next_root,
472 &next_ino, &next_type,
473 symlink_limit);
474 if (ret < 0)
475 return ret;
476 root = next_root;
477 ino = next_ino;
478 type = next_type;
479 } else {
480 /* Child inode is an inode */
481 ino = key.objectid;
482 }
483next:
484 cur += len;
485 }
486
487 if (!ret) {
488 *root_ret = root;
489 *ino_ret = ino;
490 *type_ret = type;
491 }
492
493 return ret;
494}
495
Qu Wenruocafffc52020-06-24 18:03:02 +0200496u64 __btrfs_lookup_path(struct __btrfs_root *root, u64 inr, const char *path,
Marek Behún21a14fa2017-09-03 17:00:28 +0200497 u8 *type_p, struct btrfs_inode_item *inode_item_p,
498 int symlink_limit)
499{
500 struct btrfs_dir_item item;
501 struct btrfs_inode_item inode_item;
502 u8 type = BTRFS_FT_DIR;
503 int len, have_inode = 0;
504 const char *cur = path;
505
506 if (*cur == '/') {
507 ++cur;
508 inr = root->root_dirid;
509 }
510
511 do {
512 cur = skip_current_directories(cur);
513
514 len = next_length(cur);
515 if (len > BTRFS_NAME_LEN) {
516 printf("%s: Name too long at \"%.*s\"\n", __func__,
517 BTRFS_NAME_LEN, cur);
518 return -1ULL;
519 }
520
521 if (len == 1 && cur[0] == '.')
522 break;
523
524 if (len == 2 && cur[0] == '.' && cur[1] == '.') {
525 cur += 2;
Qu Wenruocafffc52020-06-24 18:03:02 +0200526 inr = __get_parent_inode(root, inr, &inode_item);
Marek Behún21a14fa2017-09-03 17:00:28 +0200527 if (inr == -1ULL)
528 return -1ULL;
529
530 type = BTRFS_FT_DIR;
531 continue;
532 }
533
534 if (!*cur)
535 break;
536
Qu Wenruocafffc52020-06-24 18:03:02 +0200537 if (__btrfs_lookup_dir_item(root, inr, cur, len, &item))
Marek Behún21a14fa2017-09-03 17:00:28 +0200538 return -1ULL;
539
540 type = item.type;
541 have_inode = 1;
Qu Wenruocafffc52020-06-24 18:03:02 +0200542 if (__btrfs_lookup_inode(root, (struct btrfs_key *)&item.location,
Qu Wenruo3b4b40c2020-06-24 18:02:47 +0200543 &inode_item, root))
Marek Behún21a14fa2017-09-03 17:00:28 +0200544 return -1ULL;
545
546 if (item.type == BTRFS_FT_SYMLINK && symlink_limit >= 0) {
547 char *target;
548
549 if (!symlink_limit) {
550 printf("%s: Too much symlinks!\n", __func__);
551 return -1ULL;
552 }
553
554 target = malloc(min(inode_item.size + 1,
555 (u64) btrfs_info.sb.sectorsize));
556 if (!target)
557 return -1ULL;
558
Qu Wenruocafffc52020-06-24 18:03:02 +0200559 if (__btrfs_readlink(root, item.location.objectid,
Marek Behún21a14fa2017-09-03 17:00:28 +0200560 target)) {
561 free(target);
562 return -1ULL;
563 }
564
Qu Wenruocafffc52020-06-24 18:03:02 +0200565 inr = __btrfs_lookup_path(root, inr, target, &type,
Marek Behún21a14fa2017-09-03 17:00:28 +0200566 &inode_item, symlink_limit - 1);
567
568 free(target);
569
570 if (inr == -1ULL)
571 return -1ULL;
572 } else if (item.type != BTRFS_FT_DIR && cur[len]) {
573 printf("%s: \"%.*s\" not a directory\n", __func__,
574 (int) (cur - path + len), path);
575 return -1ULL;
576 } else {
577 inr = item.location.objectid;
578 }
579
580 cur += len;
581 } while (*cur);
582
583 if (type_p)
584 *type_p = type;
585
586 if (inode_item_p) {
587 if (!have_inode) {
588 struct btrfs_key key;
589
590 key.objectid = inr;
591 key.type = BTRFS_INODE_ITEM_KEY;
592 key.offset = 0;
593
Qu Wenruocafffc52020-06-24 18:03:02 +0200594 if (__btrfs_lookup_inode(root, &key, &inode_item, NULL))
Marek Behún21a14fa2017-09-03 17:00:28 +0200595 return -1ULL;
596 }
597
598 *inode_item_p = inode_item;
599 }
600
601 return inr;
602}
603
Qu Wenruo0cc8fc62020-06-24 18:03:08 +0200604u64 __btrfs_file_read(const struct __btrfs_root *root, u64 inr, u64 offset,
Marek Behún21a14fa2017-09-03 17:00:28 +0200605 u64 size, char *buf)
606{
Qu Wenruo33966de2020-06-24 18:02:56 +0200607 struct __btrfs_path path;
Marek Behún21a14fa2017-09-03 17:00:28 +0200608 struct btrfs_key key;
609 struct btrfs_file_extent_item *extent;
Marek Behúnecab8812017-10-06 15:04:57 +0200610 int res = 0;
Marek Behún21a14fa2017-09-03 17:00:28 +0200611 u64 rd, rd_all = -1ULL;
612
613 key.objectid = inr;
614 key.type = BTRFS_EXTENT_DATA_KEY;
615 key.offset = offset;
616
617 if (btrfs_search_tree(root, &key, &path))
618 return -1ULL;
619
Qu Wenruo75b08172020-06-24 18:02:55 +0200620 if (__btrfs_comp_keys(&key, btrfs_path_leaf_key(&path)) < 0) {
Marek Behún21a14fa2017-09-03 17:00:28 +0200621 if (btrfs_prev_slot(&path))
622 goto out;
623
624 if (btrfs_comp_keys_type(&key, btrfs_path_leaf_key(&path)))
625 goto out;
626 }
627
628 rd_all = 0;
629
630 do {
631 if (btrfs_comp_keys_type(&key, btrfs_path_leaf_key(&path)))
632 break;
633
634 extent = btrfs_path_item_ptr(&path,
635 struct btrfs_file_extent_item);
636
637 if (extent->type == BTRFS_FILE_EXTENT_INLINE) {
638 btrfs_file_extent_item_to_cpu_inl(extent);
Qu Wenruo0cc8fc62020-06-24 18:03:08 +0200639 rd = __btrfs_read_extent_inline(&path, extent, offset,
Marek Behún21a14fa2017-09-03 17:00:28 +0200640 size, buf);
641 } else {
642 btrfs_file_extent_item_to_cpu(extent);
Qu Wenruo0cc8fc62020-06-24 18:03:08 +0200643 rd = __btrfs_read_extent_reg(&path, extent, offset, size,
Marek Behún21a14fa2017-09-03 17:00:28 +0200644 buf);
645 }
646
647 if (rd == -1ULL) {
648 printf("%s: Error reading extent\n", __func__);
649 rd_all = -1;
650 goto out;
651 }
652
653 offset = 0;
654 buf += rd;
655 rd_all += rd;
656 size -= rd;
657
658 if (!size)
659 break;
660 } while (!(res = btrfs_next_slot(&path)));
661
662 if (res)
663 return -1ULL;
664
665out:
Qu Wenruo33966de2020-06-24 18:02:56 +0200666 __btrfs_free_path(&path);
Marek Behún21a14fa2017-09-03 17:00:28 +0200667 return rd_all;
668}
Qu Wenruoa26a6be2020-06-24 18:03:09 +0200669
670/*
671 * Read out inline extent.
672 *
673 * Since inline extent should only exist for offset 0, no need for extra
674 * parameters.
675 * Truncating should be handled by the caller.
676 *
677 * Return the number of bytes read.
678 * Return <0 for error.
679 */
680int btrfs_read_extent_inline(struct btrfs_path *path,
681 struct btrfs_file_extent_item *fi, char *dest)
682{
683 struct extent_buffer *leaf = path->nodes[0];
684 int slot = path->slots[0];
685 char *cbuf = NULL;
686 char *dbuf = NULL;
687 u32 csize;
688 u32 dsize;
689 int ret;
690
691 csize = btrfs_file_extent_inline_item_len(leaf, btrfs_item_nr(slot));
692 if (btrfs_file_extent_compression(leaf, fi) == BTRFS_COMPRESS_NONE) {
693 /* Uncompressed, just read it out */
694 read_extent_buffer(leaf, dest,
695 btrfs_file_extent_inline_start(fi),
696 csize);
697 return csize;
698 }
699
700 /* Compressed extent, prepare the compressed and data buffer */
701 dsize = btrfs_file_extent_ram_bytes(leaf, fi);
702 cbuf = malloc(csize);
703 dbuf = malloc(dsize);
704 if (!cbuf || !dbuf) {
705 ret = -ENOMEM;
706 goto out;
707 }
708 read_extent_buffer(leaf, cbuf, btrfs_file_extent_inline_start(fi),
709 csize);
710 ret = btrfs_decompress(btrfs_file_extent_compression(leaf, fi),
711 cbuf, csize, dbuf, dsize);
712 if (ret < 0 || ret != dsize) {
713 ret = -EIO;
714 goto out;
715 }
716 memcpy(dest, dbuf, dsize);
717 ret = dsize;
718out:
719 free(cbuf);
720 free(dbuf);
721 return ret;
722}
723
724/*
725 * Read out regular extent.
726 *
727 * Truncating should be handled by the caller.
728 *
729 * @offset and @len should not cross the extent boundary.
730 * Return the number of bytes read.
731 * Return <0 for error.
732 */
733int btrfs_read_extent_reg(struct btrfs_path *path,
734 struct btrfs_file_extent_item *fi, u64 offset,
735 int len, char *dest)
736{
737 struct extent_buffer *leaf = path->nodes[0];
738 struct btrfs_fs_info *fs_info = leaf->fs_info;
739 struct btrfs_key key;
740 u64 extent_num_bytes;
741 u64 disk_bytenr;
742 u64 read;
743 char *cbuf = NULL;
744 char *dbuf = NULL;
745 u32 csize;
746 u32 dsize;
747 bool finished = false;
748 int num_copies;
749 int i;
750 int slot = path->slots[0];
751 int ret;
752
753 btrfs_item_key_to_cpu(leaf, &key, slot);
754 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
755 ASSERT(IS_ALIGNED(offset, fs_info->sectorsize) &&
756 IS_ALIGNED(len, fs_info->sectorsize));
757 ASSERT(offset >= key.offset &&
758 offset + len <= key.offset + extent_num_bytes);
759
760 /* Preallocated or hole , fill @dest with zero */
761 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_PREALLOC ||
762 btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
763 memset(dest, 0, len);
764 return len;
765 }
766
767 if (btrfs_file_extent_compression(leaf, fi) == BTRFS_COMPRESS_NONE) {
768 u64 logical;
769
770 logical = btrfs_file_extent_disk_bytenr(leaf, fi) +
771 btrfs_file_extent_offset(leaf, fi) +
772 offset - key.offset;
773 read = len;
774
775 num_copies = btrfs_num_copies(fs_info, logical, len);
776 for (i = 1; i <= num_copies; i++) {
777 ret = read_extent_data(fs_info, dest, logical, &read, i);
778 if (ret < 0 || read != len)
779 continue;
780 finished = true;
781 break;
782 }
783 if (!finished)
784 return -EIO;
785 return len;
786 }
787
788 csize = btrfs_file_extent_disk_num_bytes(leaf, fi);
789 dsize = btrfs_file_extent_ram_bytes(leaf, fi);
790 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
791 num_copies = btrfs_num_copies(fs_info, disk_bytenr, csize);
792
793 cbuf = malloc_cache_aligned(csize);
794 dbuf = malloc_cache_aligned(dsize);
795 if (!cbuf || !dbuf) {
796 ret = -ENOMEM;
797 goto out;
798 }
799 /* For compressed extent, we must read the whole on-disk extent */
800 for (i = 1; i <= num_copies; i++) {
801 read = csize;
802 ret = read_extent_data(fs_info, cbuf, disk_bytenr,
803 &read, i);
804 if (ret < 0 || read != csize)
805 continue;
806 finished = true;
807 break;
808 }
809 if (!finished) {
810 ret = -EIO;
811 goto out;
812 }
813
814 ret = btrfs_decompress(btrfs_file_extent_compression(leaf, fi), cbuf,
815 csize, dbuf, dsize);
816 if (ret != dsize) {
817 ret = -EIO;
818 goto out;
819 }
820 /* Then copy the needed part */
821 memcpy(dest, dbuf + btrfs_file_extent_offset(leaf, fi), len);
822 ret = len;
823out:
824 free(cbuf);
825 free(dbuf);
826 return ret;
827}
Qu Wenruo01347f62020-06-24 18:03:10 +0200828
829/*
830 * Get the first file extent that covers bytenr @file_offset.
831 *
832 * @file_offset must be aligned to sectorsize.
833 *
834 * return 0 for found, and path points to the file extent.
835 * return >0 for not found, and fill @next_offset.
836 * @next_offset can be 0 if there is no next file extent.
837 * return <0 for error.
838 */
839static int lookup_data_extent(struct btrfs_root *root, struct btrfs_path *path,
840 u64 ino, u64 file_offset, u64 *next_offset)
841{
842 struct btrfs_key key;
843 struct btrfs_file_extent_item *fi;
844 u8 extent_type;
845 int ret = 0;
846
847 ASSERT(IS_ALIGNED(file_offset, root->fs_info->sectorsize));
848 key.objectid = ino;
849 key.type = BTRFS_EXTENT_DATA_KEY;
850 key.offset = file_offset;
851
852 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
853 /* Error or we're already at the file extent */
854 if (ret <= 0)
855 return ret;
856 if (ret > 0) {
857 /* Check previous file extent */
858 ret = btrfs_previous_item(root, path, ino,
859 BTRFS_EXTENT_DATA_KEY);
860 if (ret < 0)
861 return ret;
862 if (ret > 0)
863 goto check_next;
864 }
865 /* Now the key.offset must be smaller than @file_offset */
866 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
867 if (key.objectid != ino ||
868 key.type != BTRFS_EXTENT_DATA_KEY)
869 goto check_next;
870
871 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
872 struct btrfs_file_extent_item);
873 extent_type = btrfs_file_extent_type(path->nodes[0], fi);
874 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
875 if (file_offset == 0)
876 return 0;
877 /* Inline extent should be the only extent, no next extent. */
878 *next_offset = 0;
879 return 1;
880 }
881
882 /* This file extent covers @file_offset */
883 if (key.offset <= file_offset && key.offset +
884 btrfs_file_extent_num_bytes(path->nodes[0], fi) > file_offset)
885 return 0;
886check_next:
887 ret = btrfs_next_item(root, path);
888 if (ret < 0)
889 return ret;
890 if (ret > 0) {
891 *next_offset = 0;
892 return 1;
893 }
894
895 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
896 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
897 struct btrfs_file_extent_item);
898 /* Next next data extent */
899 if (key.objectid != ino ||
900 key.type != BTRFS_EXTENT_DATA_KEY) {
901 *next_offset = 0;
902 return 1;
903 }
904 /* Current file extent already beyond @file_offset */
905 if (key.offset > file_offset) {
906 *next_offset = key.offset;
907 return 1;
908 }
909 /* This file extent covers @file_offset */
910 if (key.offset <= file_offset && key.offset +
911 btrfs_file_extent_num_bytes(path->nodes[0], fi) > file_offset)
912 return 0;
913 /* This file extent ends before @file_offset, check next */
914 ret = btrfs_next_item(root, path);
915 if (ret < 0)
916 return ret;
917 if (ret > 0) {
918 *next_offset = 0;
919 return 1;
920 }
921 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
922 if (key.type != BTRFS_EXTENT_DATA_KEY || key.objectid != ino) {
923 *next_offset = 0;
924 return 1;
925 }
926 *next_offset = key.offset;
927 return 1;
928}
Qu Wenruoe3427182020-06-24 18:03:11 +0200929
930static int read_and_truncate_page(struct btrfs_path *path,
931 struct btrfs_file_extent_item *fi,
932 int start, int len, char *dest)
933{
934 struct extent_buffer *leaf = path->nodes[0];
935 struct btrfs_fs_info *fs_info = leaf->fs_info;
936 u64 aligned_start = round_down(start, fs_info->sectorsize);
937 u8 extent_type;
938 char *buf;
939 int page_off = start - aligned_start;
940 int page_len = fs_info->sectorsize - page_off;
941 int ret;
942
943 ASSERT(start + len <= aligned_start + fs_info->sectorsize);
944 buf = malloc_cache_aligned(fs_info->sectorsize);
945 if (!buf)
946 return -ENOMEM;
947
948 extent_type = btrfs_file_extent_type(leaf, fi);
949 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
950 ret = btrfs_read_extent_inline(path, fi, buf);
951 memcpy(dest, buf + page_off, min(page_len, ret));
952 free(buf);
953 return len;
954 }
955
956 ret = btrfs_read_extent_reg(path, fi,
957 round_down(start, fs_info->sectorsize),
958 fs_info->sectorsize, buf);
959 if (ret < 0) {
960 free(buf);
961 return ret;
962 }
963 memcpy(dest, buf + page_off, page_len);
964 free(buf);
965 return len;
966}
967
968int btrfs_file_read(struct btrfs_root *root, u64 ino, u64 file_offset, u64 len,
969 char *dest)
970{
971 struct btrfs_fs_info *fs_info = root->fs_info;
972 struct btrfs_file_extent_item *fi;
973 struct btrfs_path path;
974 struct btrfs_key key;
975 u64 aligned_start = round_down(file_offset, fs_info->sectorsize);
976 u64 aligned_end = round_down(file_offset + len, fs_info->sectorsize);
977 u64 next_offset;
978 u64 cur = aligned_start;
979 int ret = 0;
980
981 btrfs_init_path(&path);
982
983 /* Set the whole dest all zero, so we won't need to bother holes */
984 memset(dest, 0, len);
985
986 /* Read out the leading unaligned part */
987 if (aligned_start != file_offset) {
988 ret = lookup_data_extent(root, &path, ino, aligned_start,
989 &next_offset);
990 if (ret < 0)
991 goto out;
992 if (ret == 0) {
993 /* Read the unaligned part out*/
994 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
995 struct btrfs_file_extent_item);
996 ret = read_and_truncate_page(&path, fi, file_offset,
997 round_up(file_offset, fs_info->sectorsize) -
998 file_offset, dest);
999 if (ret < 0)
1000 goto out;
1001 cur += fs_info->sectorsize;
1002 } else {
1003 /* The whole file is a hole */
1004 if (!next_offset) {
1005 memset(dest, 0, len);
1006 return len;
1007 }
1008 cur = next_offset;
1009 }
1010 }
1011
1012 /* Read the aligned part */
1013 while (cur < aligned_end) {
1014 u64 extent_num_bytes;
1015 u8 type;
1016
1017 btrfs_release_path(&path);
1018 ret = lookup_data_extent(root, &path, ino, cur, &next_offset);
1019 if (ret < 0)
1020 goto out;
1021 if (ret > 0) {
1022 /* No next, direct exit */
1023 if (!next_offset) {
1024 ret = 0;
1025 goto out;
1026 }
1027 }
1028 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
1029 struct btrfs_file_extent_item);
1030 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1031 type = btrfs_file_extent_type(path.nodes[0], fi);
1032 if (type == BTRFS_FILE_EXTENT_INLINE) {
1033 ret = btrfs_read_extent_inline(&path, fi, dest);
1034 goto out;
1035 }
1036 /* Skip holes, as we have zeroed the dest */
1037 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
1038 btrfs_file_extent_disk_bytenr(path.nodes[0], fi) == 0) {
1039 cur = key.offset + btrfs_file_extent_num_bytes(
1040 path.nodes[0], fi);
1041 continue;
1042 }
1043
1044 /* Read the remaining part of the extent */
1045 extent_num_bytes = btrfs_file_extent_num_bytes(path.nodes[0],
1046 fi);
1047 ret = btrfs_read_extent_reg(&path, fi, cur,
1048 min(extent_num_bytes, aligned_end - cur),
1049 dest + cur - file_offset);
1050 if (ret < 0)
1051 goto out;
1052 cur += min(extent_num_bytes, aligned_end - cur);
1053 }
1054
1055 /* Read the tailing unaligned part*/
1056 if (file_offset + len != aligned_end) {
1057 btrfs_release_path(&path);
1058 ret = lookup_data_extent(root, &path, ino, aligned_end,
1059 &next_offset);
1060 /* <0 is error, >0 means no extent */
1061 if (ret)
1062 goto out;
1063 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
1064 struct btrfs_file_extent_item);
1065 ret = read_and_truncate_page(&path, fi, aligned_end,
1066 file_offset + len - aligned_end,
1067 dest + aligned_end - file_offset);
1068 }
1069out:
1070 btrfs_release_path(&path);
1071 if (ret < 0)
1072 return ret;
1073 return len;
1074}