blob: a81a8e715da19358b4d5ebc6d03dc3cb8dea0a77 [file] [log] [blame]
Thomas Gleixner6f4e7d32016-07-12 20:28:12 +02001/*
2 * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
3 *
4 * The parts taken from the kernel implementation are:
5 *
6 * Copyright (c) International Business Machines Corp., 2006
7 *
8 * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause
9 */
10
11#include <common.h>
12#include <errno.h>
13#include <ubispl.h>
14
15#include <linux/crc32.h>
16
17#include "ubispl.h"
18
19/**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
22 */
23static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
24{
25 size_t size;
26
27 size = sizeof(struct ubi_fm_sb) +
28 sizeof(struct ubi_fm_hdr) +
29 sizeof(struct ubi_fm_scan_pool) +
30 sizeof(struct ubi_fm_scan_pool) +
31 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
32 (sizeof(struct ubi_fm_eba) +
33 (ubi->peb_count * sizeof(__be32))) +
34 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35 return roundup(size, ubi->leb_size);
36}
37
38static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
39 unsigned long from, unsigned long len)
40{
41 return ubi->read(pnum + ubi->peb_offset, from, len, buf);
42}
43
44static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
45{
46 return peb >= ubi->peb_count || peb < 0;
47}
48
49static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
50 struct ubi_vid_hdr *vh, int unused)
51{
52 u32 magic;
53 int res;
54
55 /* No point in rescanning a corrupt block */
56 if (test_bit(pnum, ubi->corrupt))
57 return UBI_IO_BAD_HDR;
58 /*
59 * If the block has been scanned already, no need to rescan
60 */
61 if (test_and_set_bit(pnum, ubi->scanned))
62 return 0;
63
64 res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
65
66 /*
67 * Bad block, unrecoverable ECC error, skip the block
68 */
69 if (res) {
70 ubi_dbg("Skipping bad or unreadable block %d", pnum);
71 vh->magic = 0;
72 generic_set_bit(pnum, ubi->corrupt);
73 return res;
74 }
75
76 /* Magic number available ? */
77 magic = be32_to_cpu(vh->magic);
78 if (magic != UBI_VID_HDR_MAGIC) {
79 generic_set_bit(pnum, ubi->corrupt);
80 if (magic == 0xffffffff)
81 return UBI_IO_FF;
82 ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
83 return UBI_IO_BAD_HDR;
84 }
85
86 /* Header CRC correct ? */
87 if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
88 be32_to_cpu(vh->hdr_crc)) {
89 ubi_msg("Bad CRC in block 0%d", pnum);
90 generic_set_bit(pnum, ubi->corrupt);
91 return UBI_IO_BAD_HDR;
92 }
93
94 ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
95
96 return 0;
97}
98
99static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
100 struct ubi_vid_hdr *vh,
101 u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
102{
103 int res;
104
105 if (ubi_io_is_bad(ubi, fm_pnum))
106 return -EINVAL;
107
108 res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
109 if (!res) {
110 /* Check volume id, volume type and lnum */
111 if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
112 vh->vol_type == UBI_VID_STATIC &&
113 be32_to_cpu(vh->lnum) == fm_lnum)
114 return 0;
115 ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
116 fm_pnum, fm_vol_id, vh->vol_type,
117 be32_to_cpu(vh->vol_id),
118 fm_lnum, be32_to_cpu(vh->lnum));
119 }
120 return res;
121}
122
123/* Insert the logic block into the volume info */
124static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
125 struct ubi_vid_hdr *vh, u32 vol_id,
126 u32 pnum, u32 lnum)
127{
128 struct ubi_vol_info *vi = ubi->volinfo + vol_id;
129 u32 *ltp;
130
131 /*
132 * If the volume is larger than expected, yell and give up :(
133 */
134 if (lnum >= UBI_MAX_VOL_LEBS) {
135 ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
136 return -EINVAL;
137 }
138
139 ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
140 pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
141 !!test_bit(pnum, ubi->scanned));
142
143 /* Points to the translation entry */
144 ltp = vi->lebs_to_pebs + lnum;
145
146 /* If the block is already assigned, check sqnum */
147 if (__test_and_set_bit(lnum, vi->found)) {
148 u32 cur_pnum = *ltp;
149 struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
150
151 /*
152 * If the current block hase not yet been scanned, we
153 * need to do that. The other block might be stale or
154 * the current block corrupted and the FM not yet
155 * updated.
156 */
157 if (!test_bit(cur_pnum, ubi->scanned)) {
158 /*
159 * If the scan fails, we use the valid block
160 */
161 if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
162 lnum)) {
163 *ltp = pnum;
164 return 0;
165 }
166 }
167
168 /*
169 * Should not happen ....
170 */
171 if (test_bit(cur_pnum, ubi->corrupt)) {
172 *ltp = pnum;
173 return 0;
174 }
175
176 ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
177 vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
178 be64_to_cpu(vh->sqnum));
179
180 /*
181 * Compare sqnum and take the newer one
182 */
183 if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
184 *ltp = pnum;
185 } else {
186 *ltp = pnum;
187 if (lnum > vi->last_block)
188 vi->last_block = lnum;
189 }
190
191 return 0;
192}
193
194static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
195 u32 pnum)
196{
197 u32 vol_id, lnum;
198 int res;
199
200 if (ubi_io_is_bad(ubi, pnum))
201 return -EINVAL;
202
203 res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
204 if (res)
205 return res;
206
207 /* Get volume id */
208 vol_id = be32_to_cpu(vh->vol_id);
209
210 /* If this is the fastmap anchor, return right away */
211 if (vol_id == UBI_FM_SB_VOLUME_ID)
212 return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
213
214 /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
215 if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
216 return 0;
217
218 /* We are only interested in the volumes to load */
219 if (!test_bit(vol_id, ubi->toload))
220 return 0;
221
222 lnum = be32_to_cpu(vh->lnum);
223 return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
224}
225
226static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
227 u32 vol_id, u32 vol_type, u32 used)
228{
229 struct ubi_vid_hdr *vh;
230
231 if (ubi_io_is_bad(ubi, pnum))
232 return -EINVAL;
233
234 ubi->fastmap_pebs++;
235
236 if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
237 return 0;
238
239 /* We are only interested in the volumes to load */
240 if (!test_bit(vol_id, ubi->toload))
241 return 0;
242
243 vh = ubi->blockinfo + pnum;
244
245 return ubi_scan_vid_hdr(ubi, vh, pnum);
246}
247
248static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
249{
250 struct ubi_vid_hdr *vh;
251 u32 pnum;
252 int i;
253
254 ubi_dbg("Scanning pool size: %d", pool_size);
255
256 for (i = 0; i < pool_size; i++) {
257 pnum = be32_to_cpu(pebs[i]);
258
259 if (ubi_io_is_bad(ubi, pnum)) {
260 ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
261 return UBI_BAD_FASTMAP;
262 }
263
264 vh = ubi->blockinfo + pnum;
265 /*
266 * We allow the scan to fail here. The loader will notice
267 * and look for a replacement.
268 */
269 ubi_scan_vid_hdr(ubi, vh, pnum);
270 }
271 return 0;
272}
273
274/*
275 * Fastmap code is stolen from Linux kernel and this stub structure is used
276 * to make it happy.
277 */
278struct ubi_attach_info {
279 int i;
280};
281
282static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
283 struct ubi_attach_info *ai,
284 struct ubi_fastmap_layout *fm)
285{
286 struct ubi_fm_hdr *fmhdr;
287 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
288 struct ubi_fm_ec *fmec;
289 struct ubi_fm_volhdr *fmvhdr;
290 struct ubi_fm_eba *fm_eba;
291 int ret, i, j, pool_size, wl_pool_size;
292 size_t fm_pos = 0, fm_size = ubi->fm_size;
293 void *fm_raw = ubi->fm_buf;
294
295 memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
296
297 fm_pos += sizeof(struct ubi_fm_sb);
298 if (fm_pos >= fm_size)
299 goto fail_bad;
300
301 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
302 fm_pos += sizeof(*fmhdr);
303 if (fm_pos >= fm_size)
304 goto fail_bad;
305
306 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
307 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
308 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
309 goto fail_bad;
310 }
311
312 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
313 fm_pos += sizeof(*fmpl1);
314 if (fm_pos >= fm_size)
315 goto fail_bad;
316 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
317 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
318 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
319 goto fail_bad;
320 }
321
322 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
323 fm_pos += sizeof(*fmpl2);
324 if (fm_pos >= fm_size)
325 goto fail_bad;
326 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
327 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
328 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
329 goto fail_bad;
330 }
331
332 pool_size = be16_to_cpu(fmpl1->size);
333 wl_pool_size = be16_to_cpu(fmpl2->size);
334 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
335 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
336
337 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
338 ubi_err("bad pool size: %i", pool_size);
339 goto fail_bad;
340 }
341
342 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
343 ubi_err("bad WL pool size: %i", wl_pool_size);
344 goto fail_bad;
345 }
346
347 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
348 fm->max_pool_size < 0) {
349 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
350 goto fail_bad;
351 }
352
353 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
354 fm->max_wl_pool_size < 0) {
355 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
356 goto fail_bad;
357 }
358
359 /* read EC values from free list */
360 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
361 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
362 fm_pos += sizeof(*fmec);
363 if (fm_pos >= fm_size)
364 goto fail_bad;
365 }
366
367 /* read EC values from used list */
368 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
369 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
370 fm_pos += sizeof(*fmec);
371 if (fm_pos >= fm_size)
372 goto fail_bad;
373
374 generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
375 }
376
377 /* read EC values from scrub list */
378 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
379 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
380 fm_pos += sizeof(*fmec);
381 if (fm_pos >= fm_size)
382 goto fail_bad;
383 }
384
385 /* read EC values from erase list */
386 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
387 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
388 fm_pos += sizeof(*fmec);
389 if (fm_pos >= fm_size)
390 goto fail_bad;
391 }
392
393 /* Iterate over all volumes and read their EBA table */
394 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
395 u32 vol_id, vol_type, used, reserved;
396
397 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
398 fm_pos += sizeof(*fmvhdr);
399 if (fm_pos >= fm_size)
400 goto fail_bad;
401
402 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
403 ubi_err("bad fastmap vol header magic: 0x%x, " \
404 "expected: 0x%x",
405 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
406 goto fail_bad;
407 }
408
409 vol_id = be32_to_cpu(fmvhdr->vol_id);
410 vol_type = fmvhdr->vol_type;
411 used = be32_to_cpu(fmvhdr->used_ebs);
412
413 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
414 fm_pos += sizeof(*fm_eba);
415 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
416 if (fm_pos >= fm_size)
417 goto fail_bad;
418
419 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
420 ubi_err("bad fastmap EBA header magic: 0x%x, " \
421 "expected: 0x%x",
422 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
423 goto fail_bad;
424 }
425
426 reserved = be32_to_cpu(fm_eba->reserved_pebs);
427 ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
428 for (j = 0; j < reserved; j++) {
429 int pnum = be32_to_cpu(fm_eba->pnum[j]);
430
431 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
432 continue;
433
434 if (!__test_and_clear_bit(pnum, ubi->fm_used))
435 continue;
436
437 /*
438 * We only handle static volumes so used_ebs
439 * needs to be handed in. And we do not assign
440 * the reserved blocks
441 */
442 if (j >= used)
443 continue;
444
445 ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
446 vol_type, used);
447 if (!ret)
448 continue;
449
450 /*
451 * Nasty: The fastmap claims that the volume
452 * has one block more than it, but that block
453 * is always empty and the other blocks have
454 * the correct number of total LEBs in the
455 * headers. Deal with it.
456 */
457 if (ret != UBI_IO_FF && j != used - 1)
458 goto fail_bad;
459 ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
460 vol_id, j, used);
461 }
462 }
463
464 ret = scan_pool(ubi, fmpl1->pebs, pool_size);
465 if (ret)
466 goto fail;
467
468 ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
469 if (ret)
470 goto fail;
471
472#ifdef CHECKME
473 /*
474 * If fastmap is leaking PEBs (must not happen), raise a
475 * fat warning and fall back to scanning mode.
476 * We do this here because in ubi_wl_init() it's too late
477 * and we cannot fall back to scanning.
478 */
479 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
480 ai->bad_peb_count - fm->used_blocks))
481 goto fail_bad;
482#endif
483
484 return 0;
485
486fail_bad:
487 ret = UBI_BAD_FASTMAP;
488fail:
489 return ret;
490}
491
492static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
493 struct ubi_attach_info *ai,
494 int fm_anchor)
495{
496 struct ubi_fm_sb *fmsb, *fmsb2;
497 struct ubi_vid_hdr *vh;
498 struct ubi_fastmap_layout *fm;
499 int i, used_blocks, pnum, ret = 0;
500 size_t fm_size;
501 __be32 crc, tmp_crc;
502 unsigned long long sqnum = 0;
503
504 fmsb = &ubi->fm_sb;
505 fm = &ubi->fm_layout;
506
507 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
508 if (ret && ret != UBI_IO_BITFLIPS)
509 goto free_fm_sb;
510 else if (ret == UBI_IO_BITFLIPS)
511 fm->to_be_tortured[0] = 1;
512
513 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
514 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
515 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
516 ret = UBI_BAD_FASTMAP;
517 goto free_fm_sb;
518 }
519
520 if (fmsb->version != UBI_FM_FMT_VERSION) {
521 ubi_err("bad fastmap version: %i, expected: %i",
522 fmsb->version, UBI_FM_FMT_VERSION);
523 ret = UBI_BAD_FASTMAP;
524 goto free_fm_sb;
525 }
526
527 used_blocks = be32_to_cpu(fmsb->used_blocks);
528 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
529 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
530 ret = UBI_BAD_FASTMAP;
531 goto free_fm_sb;
532 }
533
534 fm_size = ubi->leb_size * used_blocks;
535 if (fm_size != ubi->fm_size) {
536 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
537 ubi->fm_size);
538 ret = UBI_BAD_FASTMAP;
539 goto free_fm_sb;
540 }
541
542 vh = &ubi->fm_vh;
543
544 for (i = 0; i < used_blocks; i++) {
545 pnum = be32_to_cpu(fmsb->block_loc[i]);
546
547 if (ubi_io_is_bad(ubi, pnum)) {
548 ret = UBI_BAD_FASTMAP;
549 goto free_hdr;
550 }
551
552#ifdef LATER
553 int image_seq;
554 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
555 if (ret && ret != UBI_IO_BITFLIPS) {
556 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
557 i, pnum);
558 if (ret > 0)
559 ret = UBI_BAD_FASTMAP;
560 goto free_hdr;
561 } else if (ret == UBI_IO_BITFLIPS)
562 fm->to_be_tortured[i] = 1;
563
564 image_seq = be32_to_cpu(ech->image_seq);
565 if (!ubi->image_seq)
566 ubi->image_seq = image_seq;
567 /*
568 * Older UBI implementations have image_seq set to zero, so
569 * we shouldn't fail if image_seq == 0.
570 */
571 if (image_seq && (image_seq != ubi->image_seq)) {
572 ubi_err("wrong image seq:%d instead of %d",
573 be32_to_cpu(ech->image_seq), ubi->image_seq);
574 ret = UBI_BAD_FASTMAP;
575 goto free_hdr;
576 }
577#endif
578 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
579 if (ret && ret != UBI_IO_BITFLIPS) {
580 ubi_err("unable to read fastmap block# %i (PEB: %i)",
581 i, pnum);
582 goto free_hdr;
583 }
584
585 /*
586 * Mainline code rescans the anchor header. We've done
587 * that already so we merily copy it over.
588 */
589 if (pnum == fm_anchor)
590 memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
591
592 if (i == 0) {
593 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
594 ubi_err("bad fastmap anchor vol_id: 0x%x," \
595 " expected: 0x%x",
596 be32_to_cpu(vh->vol_id),
597 UBI_FM_SB_VOLUME_ID);
598 ret = UBI_BAD_FASTMAP;
599 goto free_hdr;
600 }
601 } else {
602 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
603 ubi_err("bad fastmap data vol_id: 0x%x," \
604 " expected: 0x%x",
605 be32_to_cpu(vh->vol_id),
606 UBI_FM_DATA_VOLUME_ID);
607 ret = UBI_BAD_FASTMAP;
608 goto free_hdr;
609 }
610 }
611
612 if (sqnum < be64_to_cpu(vh->sqnum))
613 sqnum = be64_to_cpu(vh->sqnum);
614
615 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
616 ubi->leb_start, ubi->leb_size);
617 if (ret && ret != UBI_IO_BITFLIPS) {
618 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
619 "err: %i)", i, pnum, ret);
620 goto free_hdr;
621 }
622 }
623
624 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
625 tmp_crc = be32_to_cpu(fmsb2->data_crc);
626 fmsb2->data_crc = 0;
627 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
628 if (crc != tmp_crc) {
629 ubi_err("fastmap data CRC is invalid");
630 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
631 ret = UBI_BAD_FASTMAP;
632 goto free_hdr;
633 }
634
635 fmsb2->sqnum = sqnum;
636
637 fm->used_blocks = used_blocks;
638
639 ret = ubi_attach_fastmap(ubi, ai, fm);
640 if (ret) {
641 if (ret > 0)
642 ret = UBI_BAD_FASTMAP;
643 goto free_hdr;
644 }
645
646 ubi->fm = fm;
647 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
648 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
649 ubi_msg("attached by fastmap %uMB %u blocks",
650 ubi->fsize_mb, ubi->peb_count);
651 ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
652 ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
653
654out:
655 if (ret)
656 ubi_err("Attach by fastmap failed, doing a full scan!");
657 return ret;
658
659free_hdr:
660free_fm_sb:
661 goto out;
662}
663
664/*
665 * Scan the flash and attempt to attach via fastmap
666 */
667static void ipl_scan(struct ubi_scan_info *ubi)
668{
669 unsigned int pnum;
670 int res;
671
672 /*
673 * Scan first for the fastmap super block
674 */
675 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
676 res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
677 /*
678 * We ignore errors here as we are meriliy scanning
679 * the headers.
680 */
681 if (res != UBI_FASTMAP_ANCHOR)
682 continue;
683
684 /*
685 * If fastmap is disabled, continue scanning. This
686 * might happen because the previous attempt failed or
687 * the caller disabled it right away.
688 */
689 if (!ubi->fm_enabled)
690 continue;
691
692 /*
693 * Try to attach the fastmap, if that fails continue
694 * scanning.
695 */
696 if (!ubi_scan_fastmap(ubi, NULL, pnum))
697 return;
698 /*
699 * Fastmap failed. Clear everything we have and start
700 * over. We are paranoid and do not trust anything.
701 */
702 memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
703 pnum = 0;
704 break;
705 }
706
707 /*
708 * Continue scanning, ignore errors, we might find what we are
709 * looking for,
710 */
711 for (; pnum < ubi->peb_count; pnum++)
712 ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
713}
714
715/*
716 * Load a logical block of a volume into memory
717 */
718static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
719 struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
720 u32 last)
721{
722 struct ubi_vid_hdr *vh, *vrepl;
723 u32 pnum, crc, dlen;
724
725retry:
726 /*
727 * If this is a fastmap run, we try to rescan full, otherwise
728 * we simply give up.
729 */
730 if (!test_bit(lnum, vi->found)) {
731 ubi_warn("LEB %d of %d is missing", lnum, last);
732 return -EINVAL;
733 }
734
735 pnum = vi->lebs_to_pebs[lnum];
736
737 ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
738
739 if (ubi_io_is_bad(ubi, pnum)) {
740 ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
741 return -EINVAL;
742 }
743
744 if (test_bit(pnum, ubi->corrupt))
745 goto find_other;
746
747 /*
748 * Lets try to read that block
749 */
750 vh = ubi->blockinfo + pnum;
751
752 if (!test_bit(pnum, ubi->scanned)) {
753 ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
754 lnum, pnum);
755 if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
756 goto find_other;
757 }
758
759 /*
760 * Check, if the total number of blocks is correct
761 */
762 if (be32_to_cpu(vh->used_ebs) != last) {
763 ubi_dbg("Block count missmatch.");
764 ubi_dbg("vh->used_ebs: %d nrblocks: %d",
765 be32_to_cpu(vh->used_ebs), last);
766 generic_set_bit(pnum, ubi->corrupt);
767 goto find_other;
768 }
769
770 /*
771 * Get the data length of this block.
772 */
773 dlen = be32_to_cpu(vh->data_size);
774
775 /*
776 * Read the data into RAM. We ignore the return value
777 * here as the only thing which might go wrong are
778 * bitflips. Try nevertheless.
779 */
780 ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
781
782 /* Calculate CRC over the data */
783 crc = crc32(UBI_CRC32_INIT, laddr, dlen);
784
785 if (crc != be32_to_cpu(vh->data_crc)) {
786 ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
787 lnum, pnum);
788 generic_set_bit(pnum, ubi->corrupt);
789 goto find_other;
790 }
791
792 /* We are good. Return the data length we read */
793 return dlen;
794
795find_other:
796 ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
797 generic_clear_bit(lnum, vi->found);
798 vrepl = NULL;
799
800 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
801 struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
802 u32 t_vol_id = be32_to_cpu(tmp->vol_id);
803 u32 t_lnum = be32_to_cpu(tmp->lnum);
804
805 if (test_bit(pnum, ubi->corrupt))
806 continue;
807
808 if (t_vol_id != vol_id || t_lnum != lnum)
809 continue;
810
811 if (!test_bit(pnum, ubi->scanned)) {
812 ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
813 vol_id, lnum, pnum);
814 if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
815 continue;
816 }
817
818 /*
819 * We found one. If its the first, assign it otherwise
820 * compare the sqnum
821 */
822 generic_set_bit(lnum, vi->found);
823
824 if (!vrepl) {
825 vrepl = tmp;
826 continue;
827 }
828
829 if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
830 vrepl = tmp;
831 }
832
833 if (vrepl) {
834 /* Update the vi table */
835 pnum = vrepl - ubi->blockinfo;
836 vi->lebs_to_pebs[lnum] = pnum;
837 ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
838 vh = vrepl;
839 }
840 goto retry;
841}
842
843/*
844 * Load a volume into RAM
845 */
846static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
847{
848 struct ubi_vol_info *vi;
849 u32 lnum, last, len;
850
851 if (vol_id >= UBI_SPL_VOL_IDS)
852 return -EINVAL;
853
854 len = 0;
855 vi = ubi->volinfo + vol_id;
856 last = vi->last_block + 1;
857
858 /* Read the blocks to RAM, check CRC */
859 for (lnum = 0 ; lnum < last; lnum++) {
860 int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
861
862 if (res < 0) {
863 ubi_warn("Failed to load volume %u", vol_id);
864 return res;
865 }
866 /* res is the data length of the read block */
867 laddr += res;
868 len += res;
869 }
870 return len;
871}
872
873int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
874 int nrvols)
875{
876 struct ubi_scan_info *ubi = info->ubi;
877 int res, i, fastmap = info->fastmap;
878 u32 fsize;
879
880retry:
881 /*
882 * We do a partial initializiation of @ubi. Cleaning fm_buf is
883 * not necessary.
884 */
885 memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
886
887 ubi->read = info->read;
888
889 /* Precalculate the offsets */
890 ubi->vid_offset = info->vid_offset;
891 ubi->leb_start = info->leb_start;
892 ubi->leb_size = info->peb_size - ubi->leb_start;
893 ubi->peb_count = info->peb_count;
894 ubi->peb_offset = info->peb_offset;
895
896 fsize = info->peb_size * info->peb_count;
897 ubi->fsize_mb = fsize >> 20;
898
899 /* Fastmap init */
900 ubi->fm_size = ubi_calc_fm_size(ubi);
901 ubi->fm_enabled = fastmap;
902
903 for (i = 0; i < nrvols; i++) {
904 struct ubispl_load *lv = lvols + i;
905
906 generic_set_bit(lv->vol_id, ubi->toload);
907 }
908
909 ipl_scan(ubi);
910
911 for (i = 0; i < nrvols; i++) {
912 struct ubispl_load *lv = lvols + i;
913
914 ubi_msg("Loading VolId #%d", lv->vol_id);
915 res = ipl_load(ubi, lv->vol_id, lv->load_addr);
916 if (res < 0) {
917 if (fastmap) {
918 fastmap = 0;
919 goto retry;
920 }
921 ubi_warn("Failed");
922 return res;
923 }
924 }
925 return 0;
926}