blob: 42f22a487e113f68c868427156f51637e90d95d9 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Anton Habegger040cc7b2015-01-22 22:29:11 +01002/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
Anton Habegger040cc7b2015-01-22 22:29:11 +01007 * Authors: Adrian Hunter
8 * Artem Bityutskiy (Битюцкий Артём)
9 */
10
11/*
12 * This file implements garbage collection. The procedure for garbage collection
13 * is different depending on whether a LEB as an index LEB (contains index
14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
16 * nodes to the journal, at which point the garbage-collected LEB is free to be
17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
18 * dirty in the TNC, and after the next commit, the garbage-collected LEB is
19 * to be reused. Garbage collection will cause the number of dirty index nodes
20 * to grow, however sufficient space is reserved for the index to ensure the
21 * commit will never run out of space.
22 *
23 * Notes about dead watermark. At current UBIFS implementation we assume that
24 * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
25 * and not worth garbage-collecting. The dead watermark is one min. I/O unit
26 * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
27 * Garbage Collector has to synchronize the GC head's write buffer before
28 * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
29 * actually reclaim even very small pieces of dirty space by garbage collecting
30 * enough dirty LEBs, but we do not bother doing this at this implementation.
31 *
32 * Notes about dark watermark. The results of GC work depends on how big are
33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
35 * have to waste large pieces of free space at the end of LEB B, because nodes
36 * from LEB A would not fit. And the worst situation is when all nodes are of
37 * maximum size. So dark watermark is the amount of free + dirty space in LEB
38 * which are guaranteed to be reclaimable. If LEB has less space, the GC might
39 * be unable to reclaim it. So, LEBs with free + dirty greater than dark
40 * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
41 * good, and GC takes extra care when moving them.
42 */
43#ifndef __UBOOT__
44#include <linux/slab.h>
45#include <linux/pagemap.h>
46#include <linux/list_sort.h>
47#endif
48#include "ubifs.h"
49
50#ifndef __UBOOT__
51/*
52 * GC may need to move more than one LEB to make progress. The below constants
53 * define "soft" and "hard" limits on the number of LEBs the garbage collector
54 * may move.
55 */
56#define SOFT_LEBS_LIMIT 4
57#define HARD_LEBS_LIMIT 32
58
59/**
60 * switch_gc_head - switch the garbage collection journal head.
61 * @c: UBIFS file-system description object
62 * @buf: buffer to write
63 * @len: length of the buffer to write
64 * @lnum: LEB number written is returned here
65 * @offs: offset written is returned here
66 *
67 * This function switch the GC head to the next LEB which is reserved in
68 * @c->gc_lnum. Returns %0 in case of success, %-EAGAIN if commit is required,
69 * and other negative error code in case of failures.
70 */
71static int switch_gc_head(struct ubifs_info *c)
72{
73 int err, gc_lnum = c->gc_lnum;
74 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
75
76 ubifs_assert(gc_lnum != -1);
77 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
78 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
79 c->leb_size - wbuf->offs - wbuf->used);
80
81 err = ubifs_wbuf_sync_nolock(wbuf);
82 if (err)
83 return err;
84
85 /*
86 * The GC write-buffer was synchronized, we may safely unmap
87 * 'c->gc_lnum'.
88 */
89 err = ubifs_leb_unmap(c, gc_lnum);
90 if (err)
91 return err;
92
93 err = ubifs_wbuf_sync_nolock(wbuf);
94 if (err)
95 return err;
96
97 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
98 if (err)
99 return err;
100
101 c->gc_lnum = -1;
102 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
103 return err;
104}
105
106/**
107 * data_nodes_cmp - compare 2 data nodes.
108 * @priv: UBIFS file-system description object
109 * @a: first data node
110 * @a: second data node
111 *
112 * This function compares data nodes @a and @b. Returns %1 if @a has greater
113 * inode or block number, and %-1 otherwise.
114 */
115static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
116{
117 ino_t inuma, inumb;
118 struct ubifs_info *c = priv;
119 struct ubifs_scan_node *sa, *sb;
120
121 cond_resched();
122 if (a == b)
123 return 0;
124
125 sa = list_entry(a, struct ubifs_scan_node, list);
126 sb = list_entry(b, struct ubifs_scan_node, list);
127
128 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
129 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
130 ubifs_assert(sa->type == UBIFS_DATA_NODE);
131 ubifs_assert(sb->type == UBIFS_DATA_NODE);
132
133 inuma = key_inum(c, &sa->key);
134 inumb = key_inum(c, &sb->key);
135
136 if (inuma == inumb) {
137 unsigned int blka = key_block(c, &sa->key);
138 unsigned int blkb = key_block(c, &sb->key);
139
140 if (blka <= blkb)
141 return -1;
142 } else if (inuma <= inumb)
143 return -1;
144
145 return 1;
146}
147
148/*
149 * nondata_nodes_cmp - compare 2 non-data nodes.
150 * @priv: UBIFS file-system description object
151 * @a: first node
152 * @a: second node
153 *
154 * This function compares nodes @a and @b. It makes sure that inode nodes go
155 * first and sorted by length in descending order. Directory entry nodes go
156 * after inode nodes and are sorted in ascending hash valuer order.
157 */
158static int nondata_nodes_cmp(void *priv, struct list_head *a,
159 struct list_head *b)
160{
161 ino_t inuma, inumb;
162 struct ubifs_info *c = priv;
163 struct ubifs_scan_node *sa, *sb;
164
165 cond_resched();
166 if (a == b)
167 return 0;
168
169 sa = list_entry(a, struct ubifs_scan_node, list);
170 sb = list_entry(b, struct ubifs_scan_node, list);
171
172 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
173 key_type(c, &sb->key) != UBIFS_DATA_KEY);
174 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
175 sb->type != UBIFS_DATA_NODE);
176
177 /* Inodes go before directory entries */
178 if (sa->type == UBIFS_INO_NODE) {
179 if (sb->type == UBIFS_INO_NODE)
180 return sb->len - sa->len;
181 return -1;
182 }
183 if (sb->type == UBIFS_INO_NODE)
184 return 1;
185
186 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
187 key_type(c, &sa->key) == UBIFS_XENT_KEY);
188 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
189 key_type(c, &sb->key) == UBIFS_XENT_KEY);
190 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
191 sa->type == UBIFS_XENT_NODE);
192 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
193 sb->type == UBIFS_XENT_NODE);
194
195 inuma = key_inum(c, &sa->key);
196 inumb = key_inum(c, &sb->key);
197
198 if (inuma == inumb) {
199 uint32_t hasha = key_hash(c, &sa->key);
200 uint32_t hashb = key_hash(c, &sb->key);
201
202 if (hasha <= hashb)
203 return -1;
204 } else if (inuma <= inumb)
205 return -1;
206
207 return 1;
208}
209
210/**
211 * sort_nodes - sort nodes for GC.
212 * @c: UBIFS file-system description object
213 * @sleb: describes nodes to sort and contains the result on exit
214 * @nondata: contains non-data nodes on exit
215 * @min: minimum node size is returned here
216 *
217 * This function sorts the list of inodes to garbage collect. First of all, it
218 * kills obsolete nodes and separates data and non-data nodes to the
219 * @sleb->nodes and @nondata lists correspondingly.
220 *
221 * Data nodes are then sorted in block number order - this is important for
222 * bulk-read; data nodes with lower inode number go before data nodes with
223 * higher inode number, and data nodes with lower block number go before data
224 * nodes with higher block number;
225 *
226 * Non-data nodes are sorted as follows.
227 * o First go inode nodes - they are sorted in descending length order.
228 * o Then go directory entry nodes - they are sorted in hash order, which
229 * should supposedly optimize 'readdir()'. Direntry nodes with lower parent
230 * inode number go before direntry nodes with higher parent inode number,
231 * and direntry nodes with lower name hash values go before direntry nodes
232 * with higher name hash values.
233 *
234 * This function returns zero in case of success and a negative error code in
235 * case of failure.
236 */
237static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
238 struct list_head *nondata, int *min)
239{
240 int err;
241 struct ubifs_scan_node *snod, *tmp;
242
243 *min = INT_MAX;
244
245 /* Separate data nodes and non-data nodes */
246 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
247 ubifs_assert(snod->type == UBIFS_INO_NODE ||
248 snod->type == UBIFS_DATA_NODE ||
249 snod->type == UBIFS_DENT_NODE ||
250 snod->type == UBIFS_XENT_NODE ||
251 snod->type == UBIFS_TRUN_NODE);
252
253 if (snod->type != UBIFS_INO_NODE &&
254 snod->type != UBIFS_DATA_NODE &&
255 snod->type != UBIFS_DENT_NODE &&
256 snod->type != UBIFS_XENT_NODE) {
257 /* Probably truncation node, zap it */
258 list_del(&snod->list);
259 kfree(snod);
260 continue;
261 }
262
263 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
264 key_type(c, &snod->key) == UBIFS_INO_KEY ||
265 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
266 key_type(c, &snod->key) == UBIFS_XENT_KEY);
267
268 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
269 snod->offs, 0);
270 if (err < 0)
271 return err;
272
273 if (!err) {
274 /* The node is obsolete, remove it from the list */
275 list_del(&snod->list);
276 kfree(snod);
277 continue;
278 }
279
280 if (snod->len < *min)
281 *min = snod->len;
282
283 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
284 list_move_tail(&snod->list, nondata);
285 }
286
287 /* Sort data and non-data nodes */
288 list_sort(c, &sleb->nodes, &data_nodes_cmp);
289 list_sort(c, nondata, &nondata_nodes_cmp);
290
291 err = dbg_check_data_nodes_order(c, &sleb->nodes);
292 if (err)
293 return err;
294 err = dbg_check_nondata_nodes_order(c, nondata);
295 if (err)
296 return err;
297 return 0;
298}
299
300/**
301 * move_node - move a node.
302 * @c: UBIFS file-system description object
303 * @sleb: describes the LEB to move nodes from
304 * @snod: the mode to move
305 * @wbuf: write-buffer to move node to
306 *
307 * This function moves node @snod to @wbuf, changes TNC correspondingly, and
308 * destroys @snod. Returns zero in case of success and a negative error code in
309 * case of failure.
310 */
311static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
312 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
313{
314 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
315
316 cond_resched();
317 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
318 if (err)
319 return err;
320
321 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
322 snod->offs, new_lnum, new_offs,
323 snod->len);
324 list_del(&snod->list);
325 kfree(snod);
326 return err;
327}
328
329/**
330 * move_nodes - move nodes.
331 * @c: UBIFS file-system description object
332 * @sleb: describes the LEB to move nodes from
333 *
334 * This function moves valid nodes from data LEB described by @sleb to the GC
335 * journal head. This function returns zero in case of success, %-EAGAIN if
336 * commit is required, and other negative error codes in case of other
337 * failures.
338 */
339static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
340{
341 int err, min;
342 LIST_HEAD(nondata);
343 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
344
345 if (wbuf->lnum == -1) {
346 /*
347 * The GC journal head is not set, because it is the first GC
348 * invocation since mount.
349 */
350 err = switch_gc_head(c);
351 if (err)
352 return err;
353 }
354
355 err = sort_nodes(c, sleb, &nondata, &min);
356 if (err)
357 goto out;
358
359 /* Write nodes to their new location. Use the first-fit strategy */
360 while (1) {
361 int avail;
362 struct ubifs_scan_node *snod, *tmp;
363
364 /* Move data nodes */
365 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
366 avail = c->leb_size - wbuf->offs - wbuf->used;
367 if (snod->len > avail)
368 /*
369 * Do not skip data nodes in order to optimize
370 * bulk-read.
371 */
372 break;
373
374 err = move_node(c, sleb, snod, wbuf);
375 if (err)
376 goto out;
377 }
378
379 /* Move non-data nodes */
380 list_for_each_entry_safe(snod, tmp, &nondata, list) {
381 avail = c->leb_size - wbuf->offs - wbuf->used;
382 if (avail < min)
383 break;
384
385 if (snod->len > avail) {
386 /*
387 * Keep going only if this is an inode with
388 * some data. Otherwise stop and switch the GC
389 * head. IOW, we assume that data-less inode
390 * nodes and direntry nodes are roughly of the
391 * same size.
392 */
393 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
394 snod->len == UBIFS_INO_NODE_SZ)
395 break;
396 continue;
397 }
398
399 err = move_node(c, sleb, snod, wbuf);
400 if (err)
401 goto out;
402 }
403
404 if (list_empty(&sleb->nodes) && list_empty(&nondata))
405 break;
406
407 /*
408 * Waste the rest of the space in the LEB and switch to the
409 * next LEB.
410 */
411 err = switch_gc_head(c);
412 if (err)
413 goto out;
414 }
415
416 return 0;
417
418out:
419 list_splice_tail(&nondata, &sleb->nodes);
420 return err;
421}
422
423/**
424 * gc_sync_wbufs - sync write-buffers for GC.
425 * @c: UBIFS file-system description object
426 *
427 * We must guarantee that obsoleting nodes are on flash. Unfortunately they may
428 * be in a write-buffer instead. That is, a node could be written to a
429 * write-buffer, obsoleting another node in a LEB that is GC'd. If that LEB is
430 * erased before the write-buffer is sync'd and then there is an unclean
431 * unmount, then an existing node is lost. To avoid this, we sync all
432 * write-buffers.
433 *
434 * This function returns %0 on success or a negative error code on failure.
435 */
436static int gc_sync_wbufs(struct ubifs_info *c)
437{
438 int err, i;
439
440 for (i = 0; i < c->jhead_cnt; i++) {
441 if (i == GCHD)
442 continue;
443 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
444 if (err)
445 return err;
446 }
447 return 0;
448}
449
450/**
451 * ubifs_garbage_collect_leb - garbage-collect a logical eraseblock.
452 * @c: UBIFS file-system description object
453 * @lp: describes the LEB to garbage collect
454 *
455 * This function garbage-collects an LEB and returns one of the @LEB_FREED,
456 * @LEB_RETAINED, etc positive codes in case of success, %-EAGAIN if commit is
457 * required, and other negative error codes in case of failures.
458 */
459int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
460{
461 struct ubifs_scan_leb *sleb;
462 struct ubifs_scan_node *snod;
463 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
464 int err = 0, lnum = lp->lnum;
465
466 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
467 c->need_recovery);
468 ubifs_assert(c->gc_lnum != lnum);
469 ubifs_assert(wbuf->lnum != lnum);
470
471 if (lp->free + lp->dirty == c->leb_size) {
472 /* Special case - a free LEB */
473 dbg_gc("LEB %d is free, return it", lp->lnum);
474 ubifs_assert(!(lp->flags & LPROPS_INDEX));
475
476 if (lp->free != c->leb_size) {
477 /*
478 * Write buffers must be sync'd before unmapping
479 * freeable LEBs, because one of them may contain data
480 * which obsoletes something in 'lp->pnum'.
481 */
482 err = gc_sync_wbufs(c);
483 if (err)
484 return err;
485 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
486 0, 0, 0, 0);
487 if (err)
488 return err;
489 }
490 err = ubifs_leb_unmap(c, lp->lnum);
491 if (err)
492 return err;
493
494 if (c->gc_lnum == -1) {
495 c->gc_lnum = lnum;
496 return LEB_RETAINED;
497 }
498
499 return LEB_FREED;
500 }
501
502 /*
503 * We scan the entire LEB even though we only really need to scan up to
504 * (c->leb_size - lp->free).
505 */
506 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
507 if (IS_ERR(sleb))
508 return PTR_ERR(sleb);
509
510 ubifs_assert(!list_empty(&sleb->nodes));
511 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
512
513 if (snod->type == UBIFS_IDX_NODE) {
514 struct ubifs_gced_idx_leb *idx_gc;
515
516 dbg_gc("indexing LEB %d (free %d, dirty %d)",
517 lnum, lp->free, lp->dirty);
518 list_for_each_entry(snod, &sleb->nodes, list) {
519 struct ubifs_idx_node *idx = snod->node;
520 int level = le16_to_cpu(idx->level);
521
522 ubifs_assert(snod->type == UBIFS_IDX_NODE);
523 key_read(c, ubifs_idx_key(c, idx), &snod->key);
524 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
525 snod->offs);
526 if (err)
527 goto out;
528 }
529
530 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
531 if (!idx_gc) {
532 err = -ENOMEM;
533 goto out;
534 }
535
536 idx_gc->lnum = lnum;
537 idx_gc->unmap = 0;
538 list_add(&idx_gc->list, &c->idx_gc);
539
540 /*
541 * Don't release the LEB until after the next commit, because
542 * it may contain data which is needed for recovery. So
543 * although we freed this LEB, it will become usable only after
544 * the commit.
545 */
546 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
547 LPROPS_INDEX, 1);
548 if (err)
549 goto out;
550 err = LEB_FREED_IDX;
551 } else {
552 dbg_gc("data LEB %d (free %d, dirty %d)",
553 lnum, lp->free, lp->dirty);
554
555 err = move_nodes(c, sleb);
556 if (err)
557 goto out_inc_seq;
558
559 err = gc_sync_wbufs(c);
560 if (err)
561 goto out_inc_seq;
562
563 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
564 if (err)
565 goto out_inc_seq;
566
567 /* Allow for races with TNC */
568 c->gced_lnum = lnum;
569 smp_wmb();
570 c->gc_seq += 1;
571 smp_wmb();
572
573 if (c->gc_lnum == -1) {
574 c->gc_lnum = lnum;
575 err = LEB_RETAINED;
576 } else {
577 err = ubifs_wbuf_sync_nolock(wbuf);
578 if (err)
579 goto out;
580
581 err = ubifs_leb_unmap(c, lnum);
582 if (err)
583 goto out;
584
585 err = LEB_FREED;
586 }
587 }
588
589out:
590 ubifs_scan_destroy(sleb);
591 return err;
592
593out_inc_seq:
594 /* We may have moved at least some nodes so allow for races with TNC */
595 c->gced_lnum = lnum;
596 smp_wmb();
597 c->gc_seq += 1;
598 smp_wmb();
599 goto out;
600}
601
602/**
603 * ubifs_garbage_collect - UBIFS garbage collector.
604 * @c: UBIFS file-system description object
605 * @anyway: do GC even if there are free LEBs
606 *
607 * This function does out-of-place garbage collection. The return codes are:
608 * o positive LEB number if the LEB has been freed and may be used;
609 * o %-EAGAIN if the caller has to run commit;
610 * o %-ENOSPC if GC failed to make any progress;
611 * o other negative error codes in case of other errors.
612 *
613 * Garbage collector writes data to the journal when GC'ing data LEBs, and just
614 * marking indexing nodes dirty when GC'ing indexing LEBs. Thus, at some point
615 * commit may be required. But commit cannot be run from inside GC, because the
616 * caller might be holding the commit lock, so %-EAGAIN is returned instead;
617 * And this error code means that the caller has to run commit, and re-run GC
618 * if there is still no free space.
619 *
620 * There are many reasons why this function may return %-EAGAIN:
621 * o the log is full and there is no space to write an LEB reference for
622 * @c->gc_lnum;
623 * o the journal is too large and exceeds size limitations;
624 * o GC moved indexing LEBs, but they can be used only after the commit;
625 * o the shrinker fails to find clean znodes to free and requests the commit;
626 * o etc.
627 *
628 * Note, if the file-system is close to be full, this function may return
629 * %-EAGAIN infinitely, so the caller has to limit amount of re-invocations of
630 * the function. E.g., this happens if the limits on the journal size are too
631 * tough and GC writes too much to the journal before an LEB is freed. This
632 * might also mean that the journal is too large, and the TNC becomes to big,
633 * so that the shrinker is constantly called, finds not clean znodes to free,
634 * and requests commit. Well, this may also happen if the journal is all right,
635 * but another kernel process consumes too much memory. Anyway, infinite
636 * %-EAGAIN may happen, but in some extreme/misconfiguration cases.
637 */
638int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
639{
640 int i, err, ret, min_space = c->dead_wm;
641 struct ubifs_lprops lp;
642 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
643
644 ubifs_assert_cmt_locked(c);
645 ubifs_assert(!c->ro_media && !c->ro_mount);
646
647 if (ubifs_gc_should_commit(c))
648 return -EAGAIN;
649
650 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
651
652 if (c->ro_error) {
653 ret = -EROFS;
654 goto out_unlock;
655 }
656
657 /* We expect the write-buffer to be empty on entry */
658 ubifs_assert(!wbuf->used);
659
660 for (i = 0; ; i++) {
661 int space_before, space_after;
662
663 cond_resched();
664
665 /* Give the commit an opportunity to run */
666 if (ubifs_gc_should_commit(c)) {
667 ret = -EAGAIN;
668 break;
669 }
670
671 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
672 /*
673 * We've done enough iterations. Indexing LEBs were
674 * moved and will be available after the commit.
675 */
676 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
677 ubifs_commit_required(c);
678 ret = -EAGAIN;
679 break;
680 }
681
682 if (i > HARD_LEBS_LIMIT) {
683 /*
684 * We've moved too many LEBs and have not made
685 * progress, give up.
686 */
687 dbg_gc("hard limit, -ENOSPC");
688 ret = -ENOSPC;
689 break;
690 }
691
692 /*
693 * Empty and freeable LEBs can turn up while we waited for
694 * the wbuf lock, or while we have been running GC. In that
695 * case, we should just return one of those instead of
696 * continuing to GC dirty LEBs. Hence we request
697 * 'ubifs_find_dirty_leb()' to return an empty LEB if it can.
698 */
699 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
700 if (ret) {
701 if (ret == -ENOSPC)
702 dbg_gc("no more dirty LEBs");
703 break;
704 }
705
706 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
707 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
708 min_space);
709
710 space_before = c->leb_size - wbuf->offs - wbuf->used;
711 if (wbuf->lnum == -1)
712 space_before = 0;
713
714 ret = ubifs_garbage_collect_leb(c, &lp);
715 if (ret < 0) {
716 if (ret == -EAGAIN) {
717 /*
718 * This is not error, so we have to return the
719 * LEB to lprops. But if 'ubifs_return_leb()'
720 * fails, its failure code is propagated to the
721 * caller instead of the original '-EAGAIN'.
722 */
723 err = ubifs_return_leb(c, lp.lnum);
724 if (err)
725 ret = err;
726 break;
727 }
728 goto out;
729 }
730
731 if (ret == LEB_FREED) {
732 /* An LEB has been freed and is ready for use */
733 dbg_gc("LEB %d freed, return", lp.lnum);
734 ret = lp.lnum;
735 break;
736 }
737
738 if (ret == LEB_FREED_IDX) {
739 /*
740 * This was an indexing LEB and it cannot be
741 * immediately used. And instead of requesting the
742 * commit straight away, we try to garbage collect some
743 * more.
744 */
745 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
746 continue;
747 }
748
749 ubifs_assert(ret == LEB_RETAINED);
750 space_after = c->leb_size - wbuf->offs - wbuf->used;
751 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
752 space_after - space_before);
753
754 if (space_after > space_before) {
755 /* GC makes progress, keep working */
756 min_space >>= 1;
757 if (min_space < c->dead_wm)
758 min_space = c->dead_wm;
759 continue;
760 }
761
762 dbg_gc("did not make progress");
763
764 /*
765 * GC moved an LEB bud have not done any progress. This means
766 * that the previous GC head LEB contained too few free space
767 * and the LEB which was GC'ed contained only large nodes which
768 * did not fit that space.
769 *
770 * We can do 2 things:
771 * 1. pick another LEB in a hope it'll contain a small node
772 * which will fit the space we have at the end of current GC
773 * head LEB, but there is no guarantee, so we try this out
774 * unless we have already been working for too long;
775 * 2. request an LEB with more dirty space, which will force
776 * 'ubifs_find_dirty_leb()' to start scanning the lprops
777 * table, instead of just picking one from the heap
778 * (previously it already picked the dirtiest LEB).
779 */
780 if (i < SOFT_LEBS_LIMIT) {
781 dbg_gc("try again");
782 continue;
783 }
784
785 min_space <<= 1;
786 if (min_space > c->dark_wm)
787 min_space = c->dark_wm;
788 dbg_gc("set min. space to %d", min_space);
789 }
790
791 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
792 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
793 ubifs_commit_required(c);
794 ret = -EAGAIN;
795 }
796
797 err = ubifs_wbuf_sync_nolock(wbuf);
798 if (!err)
799 err = ubifs_leb_unmap(c, c->gc_lnum);
800 if (err) {
801 ret = err;
802 goto out;
803 }
804out_unlock:
805 mutex_unlock(&wbuf->io_mutex);
806 return ret;
807
808out:
809 ubifs_assert(ret < 0);
810 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
811 ubifs_wbuf_sync_nolock(wbuf);
812 ubifs_ro_mode(c, ret);
813 mutex_unlock(&wbuf->io_mutex);
814 ubifs_return_leb(c, lp.lnum);
815 return ret;
816}
817
818/**
819 * ubifs_gc_start_commit - garbage collection at start of commit.
820 * @c: UBIFS file-system description object
821 *
822 * If a LEB has only dirty and free space, then we may safely unmap it and make
823 * it free. Note, we cannot do this with indexing LEBs because dirty space may
824 * correspond index nodes that are required for recovery. In that case, the
825 * LEB cannot be unmapped until after the next commit.
826 *
827 * This function returns %0 upon success and a negative error code upon failure.
828 */
829int ubifs_gc_start_commit(struct ubifs_info *c)
830{
831 struct ubifs_gced_idx_leb *idx_gc;
832 const struct ubifs_lprops *lp;
833 int err = 0, flags;
834
835 ubifs_get_lprops(c);
836
837 /*
838 * Unmap (non-index) freeable LEBs. Note that recovery requires that all
839 * wbufs are sync'd before this, which is done in 'do_commit()'.
840 */
841 while (1) {
842 lp = ubifs_fast_find_freeable(c);
843 if (IS_ERR(lp)) {
844 err = PTR_ERR(lp);
845 goto out;
846 }
847 if (!lp)
848 break;
849 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
850 ubifs_assert(!(lp->flags & LPROPS_INDEX));
851 err = ubifs_leb_unmap(c, lp->lnum);
852 if (err)
853 goto out;
854 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
855 if (IS_ERR(lp)) {
856 err = PTR_ERR(lp);
857 goto out;
858 }
859 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
860 ubifs_assert(!(lp->flags & LPROPS_INDEX));
861 }
862
863 /* Mark GC'd index LEBs OK to unmap after this commit finishes */
864 list_for_each_entry(idx_gc, &c->idx_gc, list)
865 idx_gc->unmap = 1;
866
867 /* Record index freeable LEBs for unmapping after commit */
868 while (1) {
869 lp = ubifs_fast_find_frdi_idx(c);
870 if (IS_ERR(lp)) {
871 err = PTR_ERR(lp);
872 goto out;
873 }
874 if (!lp)
875 break;
876 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
877 if (!idx_gc) {
878 err = -ENOMEM;
879 goto out;
880 }
881 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
882 ubifs_assert(lp->flags & LPROPS_INDEX);
883 /* Don't release the LEB until after the next commit */
884 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
885 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
886 if (IS_ERR(lp)) {
887 err = PTR_ERR(lp);
888 kfree(idx_gc);
889 goto out;
890 }
891 ubifs_assert(lp->flags & LPROPS_TAKEN);
892 ubifs_assert(!(lp->flags & LPROPS_INDEX));
893 idx_gc->lnum = lp->lnum;
894 idx_gc->unmap = 1;
895 list_add(&idx_gc->list, &c->idx_gc);
896 }
897out:
898 ubifs_release_lprops(c);
899 return err;
900}
901
902/**
903 * ubifs_gc_end_commit - garbage collection at end of commit.
904 * @c: UBIFS file-system description object
905 *
906 * This function completes out-of-place garbage collection of index LEBs.
907 */
908int ubifs_gc_end_commit(struct ubifs_info *c)
909{
910 struct ubifs_gced_idx_leb *idx_gc, *tmp;
911 struct ubifs_wbuf *wbuf;
912 int err = 0;
913
914 wbuf = &c->jheads[GCHD].wbuf;
915 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
916 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
917 if (idx_gc->unmap) {
918 dbg_gc("LEB %d", idx_gc->lnum);
919 err = ubifs_leb_unmap(c, idx_gc->lnum);
920 if (err)
921 goto out;
922 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
923 LPROPS_NC, 0, LPROPS_TAKEN, -1);
924 if (err)
925 goto out;
926 list_del(&idx_gc->list);
927 kfree(idx_gc);
928 }
929out:
930 mutex_unlock(&wbuf->io_mutex);
931 return err;
932}
933#endif
934/**
935 * ubifs_destroy_idx_gc - destroy idx_gc list.
936 * @c: UBIFS file-system description object
937 *
938 * This function destroys the @c->idx_gc list. It is called when unmounting
939 * so locks are not needed. Returns zero in case of success and a negative
940 * error code in case of failure.
941 */
942void ubifs_destroy_idx_gc(struct ubifs_info *c)
943{
944 while (!list_empty(&c->idx_gc)) {
945 struct ubifs_gced_idx_leb *idx_gc;
946
947 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
948 list);
949 c->idx_gc_cnt -= 1;
950 list_del(&idx_gc->list);
951 kfree(idx_gc);
952 }
953}
954#ifndef __UBOOT__
955/**
956 * ubifs_get_idx_gc_leb - get a LEB from GC'd index LEB list.
957 * @c: UBIFS file-system description object
958 *
959 * Called during start commit so locks are not needed.
960 */
961int ubifs_get_idx_gc_leb(struct ubifs_info *c)
962{
963 struct ubifs_gced_idx_leb *idx_gc;
964 int lnum;
965
966 if (list_empty(&c->idx_gc))
967 return -ENOSPC;
968 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
969 lnum = idx_gc->lnum;
970 /* c->idx_gc_cnt is updated by the caller when lprops are updated */
971 list_del(&idx_gc->list);
972 kfree(idx_gc);
973 return lnum;
974}
975#endif