blob: f824ec8b29363aa7c99832901df788dbdce6aa93 [file] [log] [blame]
Heiko Schocherff94bc42014-06-24 10:10:04 +02001/*
2 * Copyright (c) 2012 Linutronix GmbH
Heiko Schocher0195a7b2015-10-22 06:19:21 +02003 * Copyright (c) 2014 sigma star gmbh
Heiko Schocherff94bc42014-06-24 10:10:04 +02004 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 *
8 */
9
Heiko Schocherff94bc42014-06-24 10:10:04 +020010#ifndef __UBOOT__
11#include <linux/crc32.h>
12#else
13#include <div64.h>
14#include <malloc.h>
15#include <ubi_uboot.h>
16#endif
17
18#include <linux/compat.h>
19#include <linux/math64.h>
20#include "ubi.h"
21
22/**
Heiko Schocher0195a7b2015-10-22 06:19:21 +020023 * init_seen - allocate memory for used for debugging.
24 * @ubi: UBI device description object
25 */
26static inline int *init_seen(struct ubi_device *ubi)
27{
28 int *ret;
29
30 if (!ubi_dbg_chk_fastmap(ubi))
31 return NULL;
32
33 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
34 if (!ret)
35 return ERR_PTR(-ENOMEM);
36
37 return ret;
38}
39
40/**
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
43 */
44static inline void free_seen(int *seen)
45{
46 kfree(seen);
47}
48
49/**
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
54 */
55static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
56{
57 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 return;
59
60 seen[pnum] = 1;
61}
62
63/**
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
67 */
68static int self_check_seen(struct ubi_device *ubi, int *seen)
69{
70 int pnum, ret = 0;
71
72 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 return 0;
74
75 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
77 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 ret = -EINVAL;
79 }
80 }
81
82 return ret;
83}
84
85/**
Heiko Schocherff94bc42014-06-24 10:10:04 +020086 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
88 */
89size_t ubi_calc_fm_size(struct ubi_device *ubi)
90{
91 size_t size;
92
Heiko Schocher0195a7b2015-10-22 06:19:21 +020093 size = sizeof(struct ubi_fm_sb) +
94 sizeof(struct ubi_fm_hdr) +
95 sizeof(struct ubi_fm_scan_pool) +
96 sizeof(struct ubi_fm_scan_pool) +
97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 (sizeof(struct ubi_fm_eba) +
99 (ubi->peb_count * sizeof(__be32))) +
Heiko Schocherff94bc42014-06-24 10:10:04 +0200100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 return roundup(size, ubi->leb_size);
102}
103
104
105/**
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
109 *
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
112 */
113static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
114{
115 struct ubi_vid_hdr *new;
116
117 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
118 if (!new)
119 goto out;
120
121 new->vol_type = UBI_VID_DYNAMIC;
122 new->vol_id = cpu_to_be32(vol_id);
123
124 /* UBI implementations without fastmap support have to delete the
125 * fastmap.
126 */
127 new->compat = UBI_COMPAT_DELETE;
128
129out:
130 return new;
131}
132
133/**
134 * add_aeb - create and add a attach erase block to a given list.
135 * @ai: UBI attach info object
136 * @list: the target list
137 * @pnum: PEB number of the new attach erase block
138 * @ec: erease counter of the new LEB
139 * @scrub: scrub this PEB after attaching
140 *
141 * Returns 0 on success, < 0 indicates an internal error.
142 */
143static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
144 int pnum, int ec, int scrub)
145{
146 struct ubi_ainf_peb *aeb;
147
148 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
149 if (!aeb)
150 return -ENOMEM;
151
152 aeb->pnum = pnum;
153 aeb->ec = ec;
154 aeb->lnum = -1;
155 aeb->scrub = scrub;
156 aeb->copy_flag = aeb->sqnum = 0;
157
158 ai->ec_sum += aeb->ec;
159 ai->ec_count++;
160
161 if (ai->max_ec < aeb->ec)
162 ai->max_ec = aeb->ec;
163
164 if (ai->min_ec > aeb->ec)
165 ai->min_ec = aeb->ec;
166
167 list_add_tail(&aeb->u.list, list);
168
169 return 0;
170}
171
172/**
173 * add_vol - create and add a new volume to ubi_attach_info.
174 * @ai: ubi_attach_info object
175 * @vol_id: VID of the new volume
176 * @used_ebs: number of used EBS
177 * @data_pad: data padding value of the new volume
178 * @vol_type: volume type
179 * @last_eb_bytes: number of bytes in the last LEB
180 *
181 * Returns the new struct ubi_ainf_volume on success.
182 * NULL indicates an error.
183 */
184static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 int used_ebs, int data_pad, u8 vol_type,
186 int last_eb_bytes)
187{
188 struct ubi_ainf_volume *av;
189 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
190
191 while (*p) {
192 parent = *p;
193 av = rb_entry(parent, struct ubi_ainf_volume, rb);
194
195 if (vol_id > av->vol_id)
196 p = &(*p)->rb_left;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200197 else if (vol_id < av->vol_id)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200198 p = &(*p)->rb_right;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200199 else
200 return ERR_PTR(-EINVAL);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200201 }
202
203 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
204 if (!av)
205 goto out;
206
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200207 av->highest_lnum = av->leb_count = av->used_ebs = 0;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200208 av->vol_id = vol_id;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200209 av->data_pad = data_pad;
210 av->last_data_size = last_eb_bytes;
211 av->compat = 0;
212 av->vol_type = vol_type;
213 av->root = RB_ROOT;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200214 if (av->vol_type == UBI_STATIC_VOLUME)
215 av->used_ebs = used_ebs;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200216
217 dbg_bld("found volume (ID %i)", vol_id);
218
219 rb_link_node(&av->rb, parent, p);
220 rb_insert_color(&av->rb, &ai->volumes);
221
222out:
223 return av;
224}
225
226/**
227 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
228 * from it's original list.
229 * @ai: ubi_attach_info object
230 * @aeb: the to be assigned SEB
231 * @av: target scan volume
232 */
233static void assign_aeb_to_av(struct ubi_attach_info *ai,
234 struct ubi_ainf_peb *aeb,
235 struct ubi_ainf_volume *av)
236{
237 struct ubi_ainf_peb *tmp_aeb;
238 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
239
240 p = &av->root.rb_node;
241 while (*p) {
242 parent = *p;
243
244 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
245 if (aeb->lnum != tmp_aeb->lnum) {
246 if (aeb->lnum < tmp_aeb->lnum)
247 p = &(*p)->rb_left;
248 else
249 p = &(*p)->rb_right;
250
251 continue;
252 } else
253 break;
254 }
255
256 list_del(&aeb->u.list);
257 av->leb_count++;
258
259 rb_link_node(&aeb->u.rb, parent, p);
260 rb_insert_color(&aeb->u.rb, &av->root);
261}
262
263/**
264 * update_vol - inserts or updates a LEB which was found a pool.
265 * @ubi: the UBI device object
266 * @ai: attach info object
267 * @av: the volume this LEB belongs to
268 * @new_vh: the volume header derived from new_aeb
269 * @new_aeb: the AEB to be examined
270 *
271 * Returns 0 on success, < 0 indicates an internal error.
272 */
273static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
274 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
275 struct ubi_ainf_peb *new_aeb)
276{
277 struct rb_node **p = &av->root.rb_node, *parent = NULL;
278 struct ubi_ainf_peb *aeb, *victim;
279 int cmp_res;
280
281 while (*p) {
282 parent = *p;
283 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
284
285 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
286 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
287 p = &(*p)->rb_left;
288 else
289 p = &(*p)->rb_right;
290
291 continue;
292 }
293
294 /* This case can happen if the fastmap gets written
295 * because of a volume change (creation, deletion, ..).
296 * Then a PEB can be within the persistent EBA and the pool.
297 */
298 if (aeb->pnum == new_aeb->pnum) {
299 ubi_assert(aeb->lnum == new_aeb->lnum);
300 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
301
302 return 0;
303 }
304
305 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
306 if (cmp_res < 0)
307 return cmp_res;
308
309 /* new_aeb is newer */
310 if (cmp_res & 1) {
311 victim = kmem_cache_alloc(ai->aeb_slab_cache,
312 GFP_KERNEL);
313 if (!victim)
314 return -ENOMEM;
315
316 victim->ec = aeb->ec;
317 victim->pnum = aeb->pnum;
318 list_add_tail(&victim->u.list, &ai->erase);
319
320 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200321 av->last_data_size =
Heiko Schocherff94bc42014-06-24 10:10:04 +0200322 be32_to_cpu(new_vh->data_size);
323
324 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
325 av->vol_id, aeb->lnum, new_aeb->pnum);
326
327 aeb->ec = new_aeb->ec;
328 aeb->pnum = new_aeb->pnum;
329 aeb->copy_flag = new_vh->copy_flag;
330 aeb->scrub = new_aeb->scrub;
331 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
332
333 /* new_aeb is older */
334 } else {
335 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
336 av->vol_id, aeb->lnum, new_aeb->pnum);
337 list_add_tail(&new_aeb->u.list, &ai->erase);
338 }
339
340 return 0;
341 }
342 /* This LEB is new, let's add it to the volume */
343
344 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
345 av->highest_lnum = be32_to_cpu(new_vh->lnum);
346 av->last_data_size = be32_to_cpu(new_vh->data_size);
347 }
348
349 if (av->vol_type == UBI_STATIC_VOLUME)
350 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
351
352 av->leb_count++;
353
354 rb_link_node(&new_aeb->u.rb, parent, p);
355 rb_insert_color(&new_aeb->u.rb, &av->root);
356
357 return 0;
358}
359
360/**
361 * process_pool_aeb - we found a non-empty PEB in a pool.
362 * @ubi: UBI device object
363 * @ai: attach info object
364 * @new_vh: the volume header derived from new_aeb
365 * @new_aeb: the AEB to be examined
366 *
367 * Returns 0 on success, < 0 indicates an internal error.
368 */
369static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
370 struct ubi_vid_hdr *new_vh,
371 struct ubi_ainf_peb *new_aeb)
372{
373 struct ubi_ainf_volume *av, *tmp_av = NULL;
374 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
375 int found = 0;
376
377 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
378 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
379 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
380
381 return 0;
382 }
383
384 /* Find the volume this SEB belongs to */
385 while (*p) {
386 parent = *p;
387 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
388
389 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
390 p = &(*p)->rb_left;
391 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
392 p = &(*p)->rb_right;
393 else {
394 found = 1;
395 break;
396 }
397 }
398
399 if (found)
400 av = tmp_av;
401 else {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200402 ubi_err(ubi, "orphaned volume in fastmap pool!");
403 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200404 return UBI_BAD_FASTMAP;
405 }
406
407 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
408
409 return update_vol(ubi, ai, av, new_vh, new_aeb);
410}
411
412/**
413 * unmap_peb - unmap a PEB.
414 * If fastmap detects a free PEB in the pool it has to check whether
415 * this PEB has been unmapped after writing the fastmap.
416 *
417 * @ai: UBI attach info object
418 * @pnum: The PEB to be unmapped
419 */
420static void unmap_peb(struct ubi_attach_info *ai, int pnum)
421{
422 struct ubi_ainf_volume *av;
423 struct rb_node *node, *node2;
424 struct ubi_ainf_peb *aeb;
425
426 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
427 av = rb_entry(node, struct ubi_ainf_volume, rb);
428
429 for (node2 = rb_first(&av->root); node2;
430 node2 = rb_next(node2)) {
431 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
432 if (aeb->pnum == pnum) {
433 rb_erase(&aeb->u.rb, &av->root);
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200434 av->leb_count--;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200435 kmem_cache_free(ai->aeb_slab_cache, aeb);
436 return;
437 }
438 }
439 }
440}
441
442/**
443 * scan_pool - scans a pool for changed (no longer empty PEBs).
444 * @ubi: UBI device object
445 * @ai: attach info object
446 * @pebs: an array of all PEB numbers in the to be scanned pool
447 * @pool_size: size of the pool (number of entries in @pebs)
448 * @max_sqnum: pointer to the maximal sequence number
Heiko Schocherff94bc42014-06-24 10:10:04 +0200449 * @free: list of PEBs which are most likely free (and go into @ai->free)
450 *
451 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
452 * < 0 indicates an internal error.
453 */
454#ifndef __UBOOT__
455static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
Heiko Schocher248f2602015-10-22 06:19:22 +0200456 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200457 struct list_head *free)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200458#else
459static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
Heiko Schocher248f2602015-10-22 06:19:22 +0200460 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200461 struct list_head *free)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200462#endif
463{
464 struct ubi_vid_hdr *vh;
465 struct ubi_ec_hdr *ech;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200466 struct ubi_ainf_peb *new_aeb;
467 int i, pnum, err, ret = 0;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200468
469 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
470 if (!ech)
471 return -ENOMEM;
472
473 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
474 if (!vh) {
475 kfree(ech);
476 return -ENOMEM;
477 }
478
479 dbg_bld("scanning fastmap pool: size = %i", pool_size);
480
481 /*
482 * Now scan all PEBs in the pool to find changes which have been made
483 * after the creation of the fastmap
484 */
485 for (i = 0; i < pool_size; i++) {
486 int scrub = 0;
487 int image_seq;
488
489 pnum = be32_to_cpu(pebs[i]);
490
491 if (ubi_io_is_bad(ubi, pnum)) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200492 ubi_err(ubi, "bad PEB in fastmap pool!");
Heiko Schocherff94bc42014-06-24 10:10:04 +0200493 ret = UBI_BAD_FASTMAP;
494 goto out;
495 }
496
497 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
498 if (err && err != UBI_IO_BITFLIPS) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200499 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200500 pnum, err);
501 ret = err > 0 ? UBI_BAD_FASTMAP : err;
502 goto out;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200503 } else if (err == UBI_IO_BITFLIPS)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200504 scrub = 1;
505
506 /*
507 * Older UBI implementations have image_seq set to zero, so
508 * we shouldn't fail if image_seq == 0.
509 */
510 image_seq = be32_to_cpu(ech->image_seq);
511
512 if (image_seq && (image_seq != ubi->image_seq)) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200513 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200514 be32_to_cpu(ech->image_seq), ubi->image_seq);
515 ret = UBI_BAD_FASTMAP;
516 goto out;
517 }
518
519 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
520 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
521 unsigned long long ec = be64_to_cpu(ech->ec);
522 unmap_peb(ai, pnum);
523 dbg_bld("Adding PEB to free: %i", pnum);
524 if (err == UBI_IO_FF_BITFLIPS)
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200525 add_aeb(ai, free, pnum, ec, 1);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200526 else
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200527 add_aeb(ai, free, pnum, ec, 0);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200528 continue;
529 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
530 dbg_bld("Found non empty PEB:%i in pool", pnum);
531
532 if (err == UBI_IO_BITFLIPS)
533 scrub = 1;
534
Heiko Schocherff94bc42014-06-24 10:10:04 +0200535 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
536 GFP_KERNEL);
537 if (!new_aeb) {
538 ret = -ENOMEM;
539 goto out;
540 }
541
542 new_aeb->ec = be64_to_cpu(ech->ec);
543 new_aeb->pnum = pnum;
544 new_aeb->lnum = be32_to_cpu(vh->lnum);
545 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
546 new_aeb->copy_flag = vh->copy_flag;
547 new_aeb->scrub = scrub;
548
549 if (*max_sqnum < new_aeb->sqnum)
550 *max_sqnum = new_aeb->sqnum;
551
552 err = process_pool_aeb(ubi, ai, vh, new_aeb);
553 if (err) {
554 ret = err > 0 ? UBI_BAD_FASTMAP : err;
555 goto out;
556 }
557 } else {
558 /* We are paranoid and fall back to scanning mode */
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200559 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
Heiko Schocherff94bc42014-06-24 10:10:04 +0200560 ret = err > 0 ? UBI_BAD_FASTMAP : err;
561 goto out;
562 }
563
564 }
565
566out:
567 ubi_free_vid_hdr(ubi, vh);
568 kfree(ech);
569 return ret;
570}
571
572/**
573 * count_fastmap_pebs - Counts the PEBs found by fastmap.
574 * @ai: The UBI attach info object
575 */
576static int count_fastmap_pebs(struct ubi_attach_info *ai)
577{
578 struct ubi_ainf_peb *aeb;
579 struct ubi_ainf_volume *av;
580 struct rb_node *rb1, *rb2;
581 int n = 0;
582
583 list_for_each_entry(aeb, &ai->erase, u.list)
584 n++;
585
586 list_for_each_entry(aeb, &ai->free, u.list)
587 n++;
588
589 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
590 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
591 n++;
592
593 return n;
594}
595
596/**
597 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
598 * @ubi: UBI device object
599 * @ai: UBI attach info object
600 * @fm: the fastmap to be attached
601 *
602 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
603 * < 0 indicates an internal error.
604 */
605static int ubi_attach_fastmap(struct ubi_device *ubi,
606 struct ubi_attach_info *ai,
607 struct ubi_fastmap_layout *fm)
608{
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200609 struct list_head used, free;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200610 struct ubi_ainf_volume *av;
611 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200612 struct ubi_fm_sb *fmsb;
613 struct ubi_fm_hdr *fmhdr;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200614 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200615 struct ubi_fm_ec *fmec;
616 struct ubi_fm_volhdr *fmvhdr;
617 struct ubi_fm_eba *fm_eba;
618 int ret, i, j, pool_size, wl_pool_size;
619 size_t fm_pos = 0, fm_size = ubi->fm_size;
620 unsigned long long max_sqnum = 0;
621 void *fm_raw = ubi->fm_buf;
622
623 INIT_LIST_HEAD(&used);
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200624 INIT_LIST_HEAD(&free);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200625 ai->min_ec = UBI_MAX_ERASECOUNTER;
626
Heiko Schocherff94bc42014-06-24 10:10:04 +0200627 fmsb = (struct ubi_fm_sb *)(fm_raw);
628 ai->max_sqnum = fmsb->sqnum;
629 fm_pos += sizeof(struct ubi_fm_sb);
630 if (fm_pos >= fm_size)
631 goto fail_bad;
632
633 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
634 fm_pos += sizeof(*fmhdr);
635 if (fm_pos >= fm_size)
636 goto fail_bad;
637
638 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200639 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200640 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
641 goto fail_bad;
642 }
643
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200644 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
645 fm_pos += sizeof(*fmpl);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200646 if (fm_pos >= fm_size)
647 goto fail_bad;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200648 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
649 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
650 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200651 goto fail_bad;
652 }
653
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200654 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
655 fm_pos += sizeof(*fmpl_wl);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200656 if (fm_pos >= fm_size)
657 goto fail_bad;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200658 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
659 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
660 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200661 goto fail_bad;
662 }
663
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200664 pool_size = be16_to_cpu(fmpl->size);
665 wl_pool_size = be16_to_cpu(fmpl_wl->size);
666 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
667 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200668
669 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200670 ubi_err(ubi, "bad pool size: %i", pool_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200671 goto fail_bad;
672 }
673
674 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200675 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200676 goto fail_bad;
677 }
678
679
680 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
681 fm->max_pool_size < 0) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200682 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200683 goto fail_bad;
684 }
685
686 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
687 fm->max_wl_pool_size < 0) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200688 ubi_err(ubi, "bad maximal WL pool size: %i",
689 fm->max_wl_pool_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200690 goto fail_bad;
691 }
692
693 /* read EC values from free list */
694 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
695 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
696 fm_pos += sizeof(*fmec);
697 if (fm_pos >= fm_size)
698 goto fail_bad;
699
700 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
701 be32_to_cpu(fmec->ec), 0);
702 }
703
704 /* read EC values from used list */
705 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
706 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
707 fm_pos += sizeof(*fmec);
708 if (fm_pos >= fm_size)
709 goto fail_bad;
710
711 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
712 be32_to_cpu(fmec->ec), 0);
713 }
714
715 /* read EC values from scrub list */
716 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
717 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
718 fm_pos += sizeof(*fmec);
719 if (fm_pos >= fm_size)
720 goto fail_bad;
721
722 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
723 be32_to_cpu(fmec->ec), 1);
724 }
725
726 /* read EC values from erase list */
727 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
728 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
729 fm_pos += sizeof(*fmec);
730 if (fm_pos >= fm_size)
731 goto fail_bad;
732
733 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
734 be32_to_cpu(fmec->ec), 1);
735 }
736
737 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
738 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
739
740 /* Iterate over all volumes and read their EBA table */
741 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
742 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
743 fm_pos += sizeof(*fmvhdr);
744 if (fm_pos >= fm_size)
745 goto fail_bad;
746
747 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200748 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200749 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
750 goto fail_bad;
751 }
752
753 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
754 be32_to_cpu(fmvhdr->used_ebs),
755 be32_to_cpu(fmvhdr->data_pad),
756 fmvhdr->vol_type,
757 be32_to_cpu(fmvhdr->last_eb_bytes));
758
759 if (!av)
760 goto fail_bad;
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200761 if (PTR_ERR(av) == -EINVAL) {
762 ubi_err(ubi, "volume (ID %i) already exists",
763 fmvhdr->vol_id);
764 goto fail_bad;
765 }
Heiko Schocherff94bc42014-06-24 10:10:04 +0200766
767 ai->vols_found++;
768 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
769 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
770
771 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
772 fm_pos += sizeof(*fm_eba);
773 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
774 if (fm_pos >= fm_size)
775 goto fail_bad;
776
777 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200778 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200779 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
780 goto fail_bad;
781 }
782
783 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
784 int pnum = be32_to_cpu(fm_eba->pnum[j]);
785
786 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
787 continue;
788
789 aeb = NULL;
790 list_for_each_entry(tmp_aeb, &used, u.list) {
791 if (tmp_aeb->pnum == pnum) {
792 aeb = tmp_aeb;
793 break;
794 }
795 }
796
Heiko Schocherff94bc42014-06-24 10:10:04 +0200797 if (!aeb) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200798 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
799 goto fail_bad;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200800 }
801
802 aeb->lnum = j;
803
804 if (av->highest_lnum <= aeb->lnum)
805 av->highest_lnum = aeb->lnum;
806
807 assign_aeb_to_av(ai, aeb, av);
808
809 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
810 aeb->pnum, aeb->lnum, av->vol_id);
811 }
Heiko Schocherff94bc42014-06-24 10:10:04 +0200812 }
813
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200814 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200815 if (ret)
816 goto fail;
817
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200818 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200819 if (ret)
820 goto fail;
821
822 if (max_sqnum > ai->max_sqnum)
823 ai->max_sqnum = max_sqnum;
824
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200825 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200826 list_move_tail(&tmp_aeb->u.list, &ai->free);
827
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200828 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
829 list_move_tail(&tmp_aeb->u.list, &ai->erase);
830
831 ubi_assert(list_empty(&free));
Heiko Schocherff94bc42014-06-24 10:10:04 +0200832
833 /*
834 * If fastmap is leaking PEBs (must not happen), raise a
835 * fat warning and fall back to scanning mode.
836 * We do this here because in ubi_wl_init() it's too late
837 * and we cannot fall back to scanning.
838 */
839#ifndef __UBOOT__
840 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
841 ai->bad_peb_count - fm->used_blocks))
842 goto fail_bad;
843#else
844 if (count_fastmap_pebs(ai) != ubi->peb_count -
845 ai->bad_peb_count - fm->used_blocks) {
846 WARN_ON(1);
847 goto fail_bad;
848 }
849#endif
850
851 return 0;
852
853fail_bad:
854 ret = UBI_BAD_FASTMAP;
855fail:
856 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
857 list_del(&tmp_aeb->u.list);
858 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
859 }
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200860 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
Heiko Schocherff94bc42014-06-24 10:10:04 +0200861 list_del(&tmp_aeb->u.list);
862 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
863 }
864
865 return ret;
866}
867
868/**
869 * ubi_scan_fastmap - scan the fastmap.
870 * @ubi: UBI device object
871 * @ai: UBI attach info to be filled
872 * @fm_anchor: The fastmap starts at this PEB
873 *
874 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
875 * UBI_BAD_FASTMAP if one was found but is not usable.
876 * < 0 indicates an internal error.
877 */
878int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
879 int fm_anchor)
880{
881 struct ubi_fm_sb *fmsb, *fmsb2;
882 struct ubi_vid_hdr *vh;
883 struct ubi_ec_hdr *ech;
884 struct ubi_fastmap_layout *fm;
885 int i, used_blocks, pnum, ret = 0;
886 size_t fm_size;
887 __be32 crc, tmp_crc;
888 unsigned long long sqnum = 0;
889
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200890 down_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200891 memset(ubi->fm_buf, 0, ubi->fm_size);
892
893 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
894 if (!fmsb) {
895 ret = -ENOMEM;
896 goto out;
897 }
898
899 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
900 if (!fm) {
901 ret = -ENOMEM;
902 kfree(fmsb);
903 goto out;
904 }
905
906 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
907 if (ret && ret != UBI_IO_BITFLIPS)
908 goto free_fm_sb;
909 else if (ret == UBI_IO_BITFLIPS)
910 fm->to_be_tortured[0] = 1;
911
912 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200913 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200914 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
915 ret = UBI_BAD_FASTMAP;
916 goto free_fm_sb;
917 }
918
919 if (fmsb->version != UBI_FM_FMT_VERSION) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200920 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200921 fmsb->version, UBI_FM_FMT_VERSION);
922 ret = UBI_BAD_FASTMAP;
923 goto free_fm_sb;
924 }
925
926 used_blocks = be32_to_cpu(fmsb->used_blocks);
927 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200928 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
929 used_blocks);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200930 ret = UBI_BAD_FASTMAP;
931 goto free_fm_sb;
932 }
933
934 fm_size = ubi->leb_size * used_blocks;
935 if (fm_size != ubi->fm_size) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200936 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
937 fm_size, ubi->fm_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200938 ret = UBI_BAD_FASTMAP;
939 goto free_fm_sb;
940 }
941
942 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
943 if (!ech) {
944 ret = -ENOMEM;
945 goto free_fm_sb;
946 }
947
948 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
949 if (!vh) {
950 ret = -ENOMEM;
951 goto free_hdr;
952 }
953
954 for (i = 0; i < used_blocks; i++) {
955 int image_seq;
956
957 pnum = be32_to_cpu(fmsb->block_loc[i]);
958
959 if (ubi_io_is_bad(ubi, pnum)) {
960 ret = UBI_BAD_FASTMAP;
961 goto free_hdr;
962 }
963
964 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
965 if (ret && ret != UBI_IO_BITFLIPS) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200966 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200967 i, pnum);
968 if (ret > 0)
969 ret = UBI_BAD_FASTMAP;
970 goto free_hdr;
971 } else if (ret == UBI_IO_BITFLIPS)
972 fm->to_be_tortured[i] = 1;
973
974 image_seq = be32_to_cpu(ech->image_seq);
975 if (!ubi->image_seq)
976 ubi->image_seq = image_seq;
977
978 /*
979 * Older UBI implementations have image_seq set to zero, so
980 * we shouldn't fail if image_seq == 0.
981 */
982 if (image_seq && (image_seq != ubi->image_seq)) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200983 ubi_err(ubi, "wrong image seq:%d instead of %d",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200984 be32_to_cpu(ech->image_seq), ubi->image_seq);
985 ret = UBI_BAD_FASTMAP;
986 goto free_hdr;
987 }
988
989 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
990 if (ret && ret != UBI_IO_BITFLIPS) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200991 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200992 i, pnum);
993 goto free_hdr;
994 }
995
996 if (i == 0) {
997 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +0200998 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +0200999 be32_to_cpu(vh->vol_id),
1000 UBI_FM_SB_VOLUME_ID);
1001 ret = UBI_BAD_FASTMAP;
1002 goto free_hdr;
1003 }
1004 } else {
1005 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001006 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
Heiko Schocherff94bc42014-06-24 10:10:04 +02001007 be32_to_cpu(vh->vol_id),
1008 UBI_FM_DATA_VOLUME_ID);
1009 ret = UBI_BAD_FASTMAP;
1010 goto free_hdr;
1011 }
1012 }
1013
1014 if (sqnum < be64_to_cpu(vh->sqnum))
1015 sqnum = be64_to_cpu(vh->sqnum);
1016
1017 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1018 ubi->leb_start, ubi->leb_size);
1019 if (ret && ret != UBI_IO_BITFLIPS) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001020 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
Heiko Schocherff94bc42014-06-24 10:10:04 +02001021 "err: %i)", i, pnum, ret);
1022 goto free_hdr;
1023 }
1024 }
1025
1026 kfree(fmsb);
1027 fmsb = NULL;
1028
1029 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1030 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1031 fmsb2->data_crc = 0;
1032 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1033 if (crc != tmp_crc) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001034 ubi_err(ubi, "fastmap data CRC is invalid");
1035 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1036 tmp_crc, crc);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001037 ret = UBI_BAD_FASTMAP;
1038 goto free_hdr;
1039 }
1040
1041 fmsb2->sqnum = sqnum;
1042
1043 fm->used_blocks = used_blocks;
1044
1045 ret = ubi_attach_fastmap(ubi, ai, fm);
1046 if (ret) {
1047 if (ret > 0)
1048 ret = UBI_BAD_FASTMAP;
1049 goto free_hdr;
1050 }
1051
1052 for (i = 0; i < used_blocks; i++) {
1053 struct ubi_wl_entry *e;
1054
1055 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1056 if (!e) {
1057 while (i--)
1058 kfree(fm->e[i]);
1059
1060 ret = -ENOMEM;
1061 goto free_hdr;
1062 }
1063
1064 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1065 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1066 fm->e[i] = e;
1067 }
1068
1069 ubi->fm = fm;
1070 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1071 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001072 ubi_msg(ubi, "attached by fastmap");
1073 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1074 ubi_msg(ubi, "fastmap WL pool size: %d",
1075 ubi->fm_wl_pool.max_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001076 ubi->fm_disabled = 0;
1077
1078 ubi_free_vid_hdr(ubi, vh);
1079 kfree(ech);
1080out:
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001081 up_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001082 if (ret == UBI_BAD_FASTMAP)
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001083 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001084 return ret;
1085
1086free_hdr:
1087 ubi_free_vid_hdr(ubi, vh);
1088 kfree(ech);
1089free_fm_sb:
1090 kfree(fmsb);
1091 kfree(fm);
1092 goto out;
1093}
1094
1095/**
1096 * ubi_write_fastmap - writes a fastmap.
1097 * @ubi: UBI device object
1098 * @new_fm: the to be written fastmap
1099 *
1100 * Returns 0 on success, < 0 indicates an internal error.
1101 */
1102static int ubi_write_fastmap(struct ubi_device *ubi,
1103 struct ubi_fastmap_layout *new_fm)
1104{
1105 size_t fm_pos = 0;
1106 void *fm_raw;
1107 struct ubi_fm_sb *fmsb;
1108 struct ubi_fm_hdr *fmh;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001109 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001110 struct ubi_fm_ec *fec;
1111 struct ubi_fm_volhdr *fvh;
1112 struct ubi_fm_eba *feba;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001113 struct ubi_wl_entry *wl_e;
1114 struct ubi_volume *vol;
1115 struct ubi_vid_hdr *avhdr, *dvhdr;
1116 struct ubi_work *ubi_wrk;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001117 struct rb_node *tmp_rb;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001118 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1119 int scrub_peb_count, erase_peb_count;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001120 int *seen_pebs = NULL;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001121
1122 fm_raw = ubi->fm_buf;
1123 memset(ubi->fm_buf, 0, ubi->fm_size);
1124
1125 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1126 if (!avhdr) {
1127 ret = -ENOMEM;
1128 goto out;
1129 }
1130
1131 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1132 if (!dvhdr) {
1133 ret = -ENOMEM;
1134 goto out_kfree;
1135 }
1136
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001137 seen_pebs = init_seen(ubi);
1138 if (IS_ERR(seen_pebs)) {
1139 ret = PTR_ERR(seen_pebs);
1140 goto out_kfree;
1141 }
1142
Heiko Schocherff94bc42014-06-24 10:10:04 +02001143 spin_lock(&ubi->volumes_lock);
1144 spin_lock(&ubi->wl_lock);
1145
1146 fmsb = (struct ubi_fm_sb *)fm_raw;
1147 fm_pos += sizeof(*fmsb);
1148 ubi_assert(fm_pos <= ubi->fm_size);
1149
1150 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1151 fm_pos += sizeof(*fmh);
1152 ubi_assert(fm_pos <= ubi->fm_size);
1153
1154 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1155 fmsb->version = UBI_FM_FMT_VERSION;
1156 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1157 /* the max sqnum will be filled in while *reading* the fastmap */
1158 fmsb->sqnum = 0;
1159
1160 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1161 free_peb_count = 0;
1162 used_peb_count = 0;
1163 scrub_peb_count = 0;
1164 erase_peb_count = 0;
1165 vol_count = 0;
1166
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001167 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1168 fm_pos += sizeof(*fmpl);
1169 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1170 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1171 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001172
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001173 for (i = 0; i < ubi->fm_pool.size; i++) {
1174 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1175 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1176 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001177
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001178 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1179 fm_pos += sizeof(*fmpl_wl);
1180 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1181 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1182 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001183
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001184 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1185 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1186 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1187 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001188
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001189 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001190 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1191
1192 fec->pnum = cpu_to_be32(wl_e->pnum);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001193 set_seen(ubi, wl_e->pnum, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001194 fec->ec = cpu_to_be32(wl_e->ec);
1195
1196 free_peb_count++;
1197 fm_pos += sizeof(*fec);
1198 ubi_assert(fm_pos <= ubi->fm_size);
1199 }
1200 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1201
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001202 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001203 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1204
1205 fec->pnum = cpu_to_be32(wl_e->pnum);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001206 set_seen(ubi, wl_e->pnum, seen_pebs);
1207 fec->ec = cpu_to_be32(wl_e->ec);
1208
1209 used_peb_count++;
1210 fm_pos += sizeof(*fec);
1211 ubi_assert(fm_pos <= ubi->fm_size);
1212 }
1213
1214 ubi_for_each_protected_peb(ubi, i, wl_e) {
1215 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1216
1217 fec->pnum = cpu_to_be32(wl_e->pnum);
1218 set_seen(ubi, wl_e->pnum, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001219 fec->ec = cpu_to_be32(wl_e->ec);
1220
1221 used_peb_count++;
1222 fm_pos += sizeof(*fec);
1223 ubi_assert(fm_pos <= ubi->fm_size);
1224 }
1225 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1226
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001227 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001228 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1229
1230 fec->pnum = cpu_to_be32(wl_e->pnum);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001231 set_seen(ubi, wl_e->pnum, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001232 fec->ec = cpu_to_be32(wl_e->ec);
1233
1234 scrub_peb_count++;
1235 fm_pos += sizeof(*fec);
1236 ubi_assert(fm_pos <= ubi->fm_size);
1237 }
1238 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1239
1240
1241 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1242 if (ubi_is_erase_work(ubi_wrk)) {
1243 wl_e = ubi_wrk->e;
1244 ubi_assert(wl_e);
1245
1246 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1247
1248 fec->pnum = cpu_to_be32(wl_e->pnum);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001249 set_seen(ubi, wl_e->pnum, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001250 fec->ec = cpu_to_be32(wl_e->ec);
1251
1252 erase_peb_count++;
1253 fm_pos += sizeof(*fec);
1254 ubi_assert(fm_pos <= ubi->fm_size);
1255 }
1256 }
1257 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1258
1259 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1260 vol = ubi->volumes[i];
1261
1262 if (!vol)
1263 continue;
1264
1265 vol_count++;
1266
1267 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1268 fm_pos += sizeof(*fvh);
1269 ubi_assert(fm_pos <= ubi->fm_size);
1270
1271 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1272 fvh->vol_id = cpu_to_be32(vol->vol_id);
1273 fvh->vol_type = vol->vol_type;
1274 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1275 fvh->data_pad = cpu_to_be32(vol->data_pad);
1276 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1277
1278 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1279 vol->vol_type == UBI_STATIC_VOLUME);
1280
1281 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1282 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1283 ubi_assert(fm_pos <= ubi->fm_size);
1284
1285 for (j = 0; j < vol->reserved_pebs; j++)
1286 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1287
1288 feba->reserved_pebs = cpu_to_be32(j);
1289 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1290 }
1291 fmh->vol_count = cpu_to_be32(vol_count);
1292 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1293
1294 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1295 avhdr->lnum = 0;
1296
1297 spin_unlock(&ubi->wl_lock);
1298 spin_unlock(&ubi->volumes_lock);
1299
1300 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1301 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1302 if (ret) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001303 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001304 goto out_kfree;
1305 }
1306
1307 for (i = 0; i < new_fm->used_blocks; i++) {
1308 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001309 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001310 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1311 }
1312
1313 fmsb->data_crc = 0;
1314 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1315 ubi->fm_size));
1316
1317 for (i = 1; i < new_fm->used_blocks; i++) {
1318 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1319 dvhdr->lnum = cpu_to_be32(i);
1320 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1321 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1322 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1323 if (ret) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001324 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
Heiko Schocherff94bc42014-06-24 10:10:04 +02001325 new_fm->e[i]->pnum);
1326 goto out_kfree;
1327 }
1328 }
1329
1330 for (i = 0; i < new_fm->used_blocks; i++) {
1331 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1332 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1333 if (ret) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001334 ubi_err(ubi, "unable to write fastmap to PEB %i!",
Heiko Schocherff94bc42014-06-24 10:10:04 +02001335 new_fm->e[i]->pnum);
1336 goto out_kfree;
1337 }
1338 }
1339
1340 ubi_assert(new_fm);
1341 ubi->fm = new_fm;
1342
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001343 ret = self_check_seen(ubi, seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001344 dbg_bld("fastmap written!");
1345
1346out_kfree:
1347 ubi_free_vid_hdr(ubi, avhdr);
1348 ubi_free_vid_hdr(ubi, dvhdr);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001349 free_seen(seen_pebs);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001350out:
1351 return ret;
1352}
1353
1354/**
1355 * erase_block - Manually erase a PEB.
1356 * @ubi: UBI device object
1357 * @pnum: PEB to be erased
1358 *
1359 * Returns the new EC value on success, < 0 indicates an internal error.
1360 */
1361static int erase_block(struct ubi_device *ubi, int pnum)
1362{
1363 int ret;
1364 struct ubi_ec_hdr *ec_hdr;
1365 long long ec;
1366
1367 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1368 if (!ec_hdr)
1369 return -ENOMEM;
1370
1371 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1372 if (ret < 0)
1373 goto out;
1374 else if (ret && ret != UBI_IO_BITFLIPS) {
1375 ret = -EINVAL;
1376 goto out;
1377 }
1378
1379 ret = ubi_io_sync_erase(ubi, pnum, 0);
1380 if (ret < 0)
1381 goto out;
1382
1383 ec = be64_to_cpu(ec_hdr->ec);
1384 ec += ret;
1385 if (ec > UBI_MAX_ERASECOUNTER) {
1386 ret = -EINVAL;
1387 goto out;
1388 }
1389
1390 ec_hdr->ec = cpu_to_be64(ec);
1391 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1392 if (ret < 0)
1393 goto out;
1394
1395 ret = ec;
1396out:
1397 kfree(ec_hdr);
1398 return ret;
1399}
1400
1401/**
1402 * invalidate_fastmap - destroys a fastmap.
1403 * @ubi: UBI device object
Heiko Schocherff94bc42014-06-24 10:10:04 +02001404 *
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001405 * This function ensures that upon next UBI attach a full scan
1406 * is issued. We need this if UBI is about to write a new fastmap
1407 * but is unable to do so. In this case we have two options:
1408 * a) Make sure that the current fastmap will not be usued upon
1409 * attach time and contine or b) fall back to RO mode to have the
1410 * current fastmap in a valid state.
Heiko Schocherff94bc42014-06-24 10:10:04 +02001411 * Returns 0 on success, < 0 indicates an internal error.
1412 */
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001413static int invalidate_fastmap(struct ubi_device *ubi)
Heiko Schocherff94bc42014-06-24 10:10:04 +02001414{
1415 int ret;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001416 struct ubi_fastmap_layout *fm;
1417 struct ubi_wl_entry *e;
1418 struct ubi_vid_hdr *vh = NULL;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001419
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001420 if (!ubi->fm)
1421 return 0;
1422
1423 ubi->fm = NULL;
1424
1425 ret = -ENOMEM;
1426 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1427 if (!fm)
1428 goto out;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001429
1430 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1431 if (!vh)
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001432 goto out_free_fm;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001433
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001434 ret = -ENOSPC;
1435 e = ubi_wl_get_fm_peb(ubi, 1);
1436 if (!e)
1437 goto out_free_fm;
1438
1439 /*
1440 * Create fake fastmap such that UBI will fall back
1441 * to scanning mode.
1442 */
Heiko Schocherff94bc42014-06-24 10:10:04 +02001443 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001444 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1445 if (ret < 0) {
1446 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1447 goto out_free_fm;
1448 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001449
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001450 fm->used_blocks = 1;
1451 fm->e[0] = e;
1452
1453 ubi->fm = fm;
1454
1455out:
1456 ubi_free_vid_hdr(ubi, vh);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001457 return ret;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001458
1459out_free_fm:
1460 kfree(fm);
1461 goto out;
1462}
1463
1464/**
1465 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1466 * WL sub-system.
1467 * @ubi: UBI device object
1468 * @fm: fastmap layout object
1469 */
1470static void return_fm_pebs(struct ubi_device *ubi,
1471 struct ubi_fastmap_layout *fm)
1472{
1473 int i;
1474
1475 if (!fm)
1476 return;
1477
1478 for (i = 0; i < fm->used_blocks; i++) {
1479 if (fm->e[i]) {
1480 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1481 fm->to_be_tortured[i]);
1482 fm->e[i] = NULL;
1483 }
1484 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001485}
1486
1487/**
1488 * ubi_update_fastmap - will be called by UBI if a volume changes or
1489 * a fastmap pool becomes full.
1490 * @ubi: UBI device object
1491 *
1492 * Returns 0 on success, < 0 indicates an internal error.
1493 */
1494int ubi_update_fastmap(struct ubi_device *ubi)
1495{
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001496 int ret, i, j;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001497 struct ubi_fastmap_layout *new_fm, *old_fm;
1498 struct ubi_wl_entry *tmp_e;
1499
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001500 down_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001501
1502 ubi_refill_pools(ubi);
1503
1504 if (ubi->ro_mode || ubi->fm_disabled) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001505 up_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001506 return 0;
1507 }
1508
1509 ret = ubi_ensure_anchor_pebs(ubi);
1510 if (ret) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001511 up_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001512 return ret;
1513 }
1514
1515 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1516 if (!new_fm) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001517 up_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001518 return -ENOMEM;
1519 }
1520
1521 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001522 old_fm = ubi->fm;
1523 ubi->fm = NULL;
1524
1525 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001526 ubi_err(ubi, "fastmap too large");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001527 ret = -ENOSPC;
1528 goto err;
1529 }
1530
1531 for (i = 1; i < new_fm->used_blocks; i++) {
1532 spin_lock(&ubi->wl_lock);
1533 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1534 spin_unlock(&ubi->wl_lock);
1535
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001536 if (!tmp_e) {
1537 if (old_fm && old_fm->e[i]) {
1538 ret = erase_block(ubi, old_fm->e[i]->pnum);
1539 if (ret < 0) {
1540 ubi_err(ubi, "could not erase old fastmap PEB");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001541
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001542 for (j = 1; j < i; j++) {
1543 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1544 j, 0);
1545 new_fm->e[j] = NULL;
1546 }
1547 goto err;
1548 }
1549 new_fm->e[i] = old_fm->e[i];
1550 old_fm->e[i] = NULL;
1551 } else {
1552 ubi_err(ubi, "could not get any free erase block");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001553
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001554 for (j = 1; j < i; j++) {
1555 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1556 new_fm->e[j] = NULL;
1557 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001558
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001559 ret = -ENOSPC;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001560 goto err;
1561 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001562 } else {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001563 new_fm->e[i] = tmp_e;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001564
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001565 if (old_fm && old_fm->e[i]) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001566 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1567 old_fm->to_be_tortured[i]);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001568 old_fm->e[i] = NULL;
1569 }
1570 }
1571 }
1572
1573 /* Old fastmap is larger than the new one */
1574 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1575 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1576 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1577 old_fm->to_be_tortured[i]);
1578 old_fm->e[i] = NULL;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001579 }
1580 }
1581
1582 spin_lock(&ubi->wl_lock);
1583 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1584 spin_unlock(&ubi->wl_lock);
1585
1586 if (old_fm) {
1587 /* no fresh anchor PEB was found, reuse the old one */
1588 if (!tmp_e) {
1589 ret = erase_block(ubi, old_fm->e[0]->pnum);
1590 if (ret < 0) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001591 ubi_err(ubi, "could not erase old anchor PEB");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001592
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001593 for (i = 1; i < new_fm->used_blocks; i++) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001594 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1595 i, 0);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001596 new_fm->e[i] = NULL;
1597 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001598 goto err;
1599 }
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001600 new_fm->e[0] = old_fm->e[0];
Heiko Schocherff94bc42014-06-24 10:10:04 +02001601 new_fm->e[0]->ec = ret;
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001602 old_fm->e[0] = NULL;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001603 } else {
1604 /* we've got a new anchor PEB, return the old one */
1605 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1606 old_fm->to_be_tortured[0]);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001607 new_fm->e[0] = tmp_e;
1608 old_fm->e[0] = NULL;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001609 }
1610 } else {
1611 if (!tmp_e) {
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001612 ubi_err(ubi, "could not find any anchor PEB");
Heiko Schocherff94bc42014-06-24 10:10:04 +02001613
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001614 for (i = 1; i < new_fm->used_blocks; i++) {
Heiko Schocherff94bc42014-06-24 10:10:04 +02001615 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001616 new_fm->e[i] = NULL;
1617 }
Heiko Schocherff94bc42014-06-24 10:10:04 +02001618
1619 ret = -ENOSPC;
1620 goto err;
1621 }
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001622 new_fm->e[0] = tmp_e;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001623 }
1624
1625 down_write(&ubi->work_sem);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001626 down_write(&ubi->fm_eba_sem);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001627 ret = ubi_write_fastmap(ubi, new_fm);
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001628 up_write(&ubi->fm_eba_sem);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001629 up_write(&ubi->work_sem);
1630
1631 if (ret)
1632 goto err;
1633
1634out_unlock:
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001635 up_write(&ubi->fm_protect);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001636 kfree(old_fm);
1637 return ret;
1638
1639err:
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001640 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001641
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001642 ret = invalidate_fastmap(ubi);
1643 if (ret < 0) {
1644 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1645 ubi_ro_mode(ubi);
1646 } else {
1647 return_fm_pebs(ubi, old_fm);
1648 return_fm_pebs(ubi, new_fm);
1649 ret = 0;
Heiko Schocherff94bc42014-06-24 10:10:04 +02001650 }
Heiko Schocher0195a7b2015-10-22 06:19:21 +02001651
1652 kfree(new_fm);
Heiko Schocherff94bc42014-06-24 10:10:04 +02001653 goto out_unlock;
1654}