ubi,ubifs: sync with linux v4.2

sync with linux v4.2

commit 64291f7db5bd8150a74ad2036f1037e6a0428df2
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Sun Aug 30 11:34:09 2015 -0700

    Linux 4.2

This update is needed, as it turned out, that fastmap
was in experimental/broken state in kernel v3.15, which
was the last base for U-Boot.

Signed-off-by: Heiko Schocher <hs@denx.de>
Tested-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 1bdbfa7..90fb74a 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -172,6 +172,7 @@
 
 /**
  * validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
  * @vid_hdr: the volume identifier header to check
  * @av: information about the volume this logical eraseblock belongs to
  * @pnum: physical eraseblock number the VID header came from
@@ -184,7 +185,8 @@
  * information in the VID header is consistent to the information in other VID
  * headers of the same volume.
  */
-static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+static int validate_vid_hdr(const struct ubi_device *ubi,
+			    const struct ubi_vid_hdr *vid_hdr,
 			    const struct ubi_ainf_volume *av, int pnum)
 {
 	int vol_type = vid_hdr->vol_type;
@@ -202,7 +204,7 @@
 		 */
 
 		if (vol_id != av->vol_id) {
-			ubi_err("inconsistent vol_id");
+			ubi_err(ubi, "inconsistent vol_id");
 			goto bad;
 		}
 
@@ -212,17 +214,17 @@
 			av_vol_type = UBI_VID_DYNAMIC;
 
 		if (vol_type != av_vol_type) {
-			ubi_err("inconsistent vol_type");
+			ubi_err(ubi, "inconsistent vol_type");
 			goto bad;
 		}
 
 		if (used_ebs != av->used_ebs) {
-			ubi_err("inconsistent used_ebs");
+			ubi_err(ubi, "inconsistent used_ebs");
 			goto bad;
 		}
 
 		if (data_pad != av->data_pad) {
-			ubi_err("inconsistent data_pad");
+			ubi_err(ubi, "inconsistent data_pad");
 			goto bad;
 		}
 	}
@@ -230,7 +232,7 @@
 	return 0;
 
 bad:
-	ubi_err("inconsistent VID header at PEB %d", pnum);
+	ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
 	ubi_dump_vid_hdr(vid_hdr);
 	ubi_dump_av(av);
 	return -EINVAL;
@@ -332,7 +334,7 @@
 		 * support these images anymore. Well, those images still work,
 		 * but only if no unclean reboots happened.
 		 */
-		ubi_err("unsupported on-flash UBI format");
+		ubi_err(ubi, "unsupported on-flash UBI format");
 		return -EINVAL;
 	}
 
@@ -373,7 +375,7 @@
 			if (err == UBI_IO_BITFLIPS)
 				bitflips = 1;
 			else {
-				ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+				ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
 					pnum, err);
 				if (err > 0)
 					err = -EIO;
@@ -404,7 +406,7 @@
 		second_is_newer = !second_is_newer;
 	} else {
 		dbg_bld("PEB %d CRC is OK", pnum);
-		bitflips = !!err;
+		bitflips |= !!err;
 	}
 	mutex_unlock(&ubi->buf_mutex);
 
@@ -503,7 +505,7 @@
 		 * logical eraseblocks because there was an unclean reboot.
 		 */
 		if (aeb->sqnum == sqnum && sqnum != 0) {
-			ubi_err("two LEBs with same sequence number %llu",
+			ubi_err(ubi, "two LEBs with same sequence number %llu",
 				sqnum);
 			ubi_dump_aeb(aeb, 0);
 			ubi_dump_vid_hdr(vid_hdr);
@@ -523,7 +525,7 @@
 			 * This logical eraseblock is newer than the one
 			 * found earlier.
 			 */
-			err = validate_vid_hdr(vid_hdr, av, pnum);
+			err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
 			if (err)
 				return err;
 
@@ -561,7 +563,7 @@
 	 * attaching information.
 	 */
 
-	err = validate_vid_hdr(vid_hdr, av, pnum);
+	err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
 	if (err)
 		return err;
 
@@ -664,7 +666,8 @@
 		 * Erase counter overflow. Upgrade UBI and use 64-bit
 		 * erase counters internally.
 		 */
-		ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+		ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
+			pnum, ec);
 		return -EINVAL;
 	}
 
@@ -732,7 +735,7 @@
 		return aeb;
 	}
 
-	ubi_err("no free eraseblocks");
+	ubi_err(ubi, "no free eraseblocks");
 	return ERR_PTR(-ENOSPC);
 }
 
@@ -781,9 +784,9 @@
 	if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
 		goto out_unlock;
 
-	ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+	ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
 		pnum);
-	ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+	ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
 	ubi_dump_vid_hdr(vid_hdr);
 	pr_err("hexdump of PEB %d offset %d, length %d",
 	       pnum, ubi->leb_start, ubi->leb_size);
@@ -855,7 +858,8 @@
 		bitflips = 1;
 		break;
 	default:
-		ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
+		ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
+			err);
 		return -EINVAL;
 	}
 
@@ -864,7 +868,7 @@
 
 		/* Make sure UBI version is OK */
 		if (ech->version != UBI_VERSION) {
-			ubi_err("this UBI version is %d, image version is %d",
+			ubi_err(ubi, "this UBI version is %d, image version is %d",
 				UBI_VERSION, (int)ech->version);
 			return -EINVAL;
 		}
@@ -878,7 +882,7 @@
 			 * flash. Upgrade UBI and use 64-bit erase counters
 			 * internally.
 			 */
-			ubi_err("erase counter overflow, max is %d",
+			ubi_err(ubi, "erase counter overflow, max is %d",
 				UBI_MAX_ERASECOUNTER);
 			ubi_dump_ec_hdr(ech);
 			return -EINVAL;
@@ -899,7 +903,7 @@
 		if (!ubi->image_seq)
 			ubi->image_seq = image_seq;
 		if (image_seq && ubi->image_seq != image_seq) {
-			ubi_err("bad image sequence number %d in PEB %d, expected %d",
+			ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
 				image_seq, pnum, ubi->image_seq);
 			ubi_dump_ec_hdr(ech);
 			return -EINVAL;
@@ -977,7 +981,7 @@
 			return err;
 		goto adjust_mean_ec;
 	default:
-		ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
+		ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
 			err);
 		return -EINVAL;
 	}
@@ -995,7 +999,7 @@
 		case UBI_COMPAT_DELETE:
 			if (vol_id != UBI_FM_SB_VOLUME_ID
 			    && vol_id != UBI_FM_DATA_VOLUME_ID) {
-				ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+				ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
 					vol_id, lnum);
 			}
 			err = add_to_list(ai, pnum, vol_id, lnum,
@@ -1005,13 +1009,13 @@
 			return 0;
 
 		case UBI_COMPAT_RO:
-			ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
+			ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
 				vol_id, lnum);
 			ubi->ro_mode = 1;
 			break;
 
 		case UBI_COMPAT_PRESERVE:
-			ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+			ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
 				vol_id, lnum);
 			err = add_to_list(ai, pnum, vol_id, lnum,
 					  ec, 0, &ai->alien);
@@ -1020,14 +1024,14 @@
 			return 0;
 
 		case UBI_COMPAT_REJECT:
-			ubi_err("incompatible internal volume %d:%d found",
+			ubi_err(ubi, "incompatible internal volume %d:%d found",
 				vol_id, lnum);
 			return -EINVAL;
 		}
 	}
 
 	if (ec_err)
-		ubi_warn("valid VID header but corrupted EC header at PEB %d",
+		ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
 			 pnum);
 	err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
 	if (err)
@@ -1071,7 +1075,7 @@
 	 * with the flash HW or driver.
 	 */
 	if (ai->corr_peb_count) {
-		ubi_err("%d PEBs are corrupted and preserved",
+		ubi_err(ubi, "%d PEBs are corrupted and preserved",
 			ai->corr_peb_count);
 		pr_err("Corrupted PEBs are:");
 		list_for_each_entry(aeb, &ai->corr, u.list)
@@ -1083,7 +1087,7 @@
 		 * otherwise, only print a warning.
 		 */
 		if (ai->corr_peb_count >= max_corr) {
-			ubi_err("too many corrupted PEBs, refusing");
+			ubi_err(ubi, "too many corrupted PEBs, refusing");
 			return -EINVAL;
 		}
 	}
@@ -1106,11 +1110,11 @@
 		 */
 		if (ai->maybe_bad_peb_count <= 2) {
 			ai->is_empty = 1;
-			ubi_msg("empty MTD device detected");
+			ubi_msg(ubi, "empty MTD device detected");
 			get_random_bytes(&ubi->image_seq,
 					 sizeof(ubi->image_seq));
 		} else {
-			ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+			ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
 			return -EINVAL;
 		}
 
@@ -1244,7 +1248,7 @@
 			goto out_vidh;
 	}
 
-	ubi_msg("scanning is finished");
+	ubi_msg(ubi, "scanning is finished");
 
 	/* Calculate mean erase counter */
 	if (ai->ec_count)
@@ -1293,6 +1297,30 @@
 	return err;
 }
 
+static struct ubi_attach_info *alloc_ai(void)
+{
+	struct ubi_attach_info *ai;
+
+	ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+	if (!ai)
+		return ai;
+
+	INIT_LIST_HEAD(&ai->corr);
+	INIT_LIST_HEAD(&ai->free);
+	INIT_LIST_HEAD(&ai->erase);
+	INIT_LIST_HEAD(&ai->alien);
+	ai->volumes = RB_ROOT;
+	ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+					       sizeof(struct ubi_ainf_peb),
+					       0, 0, NULL);
+	if (!ai->aeb_slab_cache) {
+		kfree(ai);
+		ai = NULL;
+	}
+
+	return ai;
+}
+
 #ifdef CONFIG_MTD_UBI_FASTMAP
 
 /**
@@ -1305,7 +1333,7 @@
  * UBI_NO_FASTMAP denotes that no fastmap was found.
  * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
  */
-static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
 {
 	int err, pnum, fm_anchor = -1;
 	unsigned long long max_sqnum = 0;
@@ -1326,7 +1354,7 @@
 		cond_resched();
 
 		dbg_gen("process PEB %d", pnum);
-		err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+		err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
 		if (err < 0)
 			goto out_vidh;
 
@@ -1342,7 +1370,12 @@
 	if (fm_anchor < 0)
 		return UBI_NO_FASTMAP;
 
-	return ubi_scan_fastmap(ubi, ai, fm_anchor);
+	destroy_ai(*ai);
+	*ai = alloc_ai();
+	if (!*ai)
+		return -ENOMEM;
+
+	return ubi_scan_fastmap(ubi, *ai, fm_anchor);
 
 out_vidh:
 	ubi_free_vid_hdr(ubi, vidh);
@@ -1354,30 +1387,6 @@
 
 #endif
 
-static struct ubi_attach_info *alloc_ai(const char *slab_name)
-{
-	struct ubi_attach_info *ai;
-
-	ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
-	if (!ai)
-		return ai;
-
-	INIT_LIST_HEAD(&ai->corr);
-	INIT_LIST_HEAD(&ai->free);
-	INIT_LIST_HEAD(&ai->erase);
-	INIT_LIST_HEAD(&ai->alien);
-	ai->volumes = RB_ROOT;
-	ai->aeb_slab_cache = kmem_cache_create(slab_name,
-					       sizeof(struct ubi_ainf_peb),
-					       0, 0, NULL);
-	if (!ai->aeb_slab_cache) {
-		kfree(ai);
-		ai = NULL;
-	}
-
-	return ai;
-}
-
 /**
  * ubi_attach - attach an MTD device.
  * @ubi: UBI device descriptor
@@ -1391,7 +1400,7 @@
 	int err;
 	struct ubi_attach_info *ai;
 
-	ai = alloc_ai("ubi_aeb_slab_cache");
+	ai = alloc_ai();
 	if (!ai)
 		return -ENOMEM;
 
@@ -1405,11 +1414,11 @@
 	if (force_scan)
 		err = scan_all(ubi, ai, 0);
 	else {
-		err = scan_fast(ubi, ai);
-		if (err > 0) {
+		err = scan_fast(ubi, &ai);
+		if (err > 0 || mtd_is_eccerr(err)) {
 			if (err != UBI_NO_FASTMAP) {
 				destroy_ai(ai);
-				ai = alloc_ai("ubi_aeb_slab_cache2");
+				ai = alloc_ai();
 				if (!ai)
 					return -ENOMEM;
 
@@ -1445,10 +1454,10 @@
 		goto out_wl;
 
 #ifdef CONFIG_MTD_UBI_FASTMAP
-	if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+	if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
 		struct ubi_attach_info *scan_ai;
 
-		scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+		scan_ai = alloc_ai();
 		if (!scan_ai) {
 			err = -ENOMEM;
 			goto out_wl;
@@ -1511,37 +1520,37 @@
 		vols_found += 1;
 
 		if (ai->is_empty) {
-			ubi_err("bad is_empty flag");
+			ubi_err(ubi, "bad is_empty flag");
 			goto bad_av;
 		}
 
 		if (av->vol_id < 0 || av->highest_lnum < 0 ||
 		    av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
 		    av->data_pad < 0 || av->last_data_size < 0) {
-			ubi_err("negative values");
+			ubi_err(ubi, "negative values");
 			goto bad_av;
 		}
 
 		if (av->vol_id >= UBI_MAX_VOLUMES &&
 		    av->vol_id < UBI_INTERNAL_VOL_START) {
-			ubi_err("bad vol_id");
+			ubi_err(ubi, "bad vol_id");
 			goto bad_av;
 		}
 
 		if (av->vol_id > ai->highest_vol_id) {
-			ubi_err("highest_vol_id is %d, but vol_id %d is there",
+			ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
 				ai->highest_vol_id, av->vol_id);
 			goto out;
 		}
 
 		if (av->vol_type != UBI_DYNAMIC_VOLUME &&
 		    av->vol_type != UBI_STATIC_VOLUME) {
-			ubi_err("bad vol_type");
+			ubi_err(ubi, "bad vol_type");
 			goto bad_av;
 		}
 
 		if (av->data_pad > ubi->leb_size / 2) {
-			ubi_err("bad data_pad");
+			ubi_err(ubi, "bad data_pad");
 			goto bad_av;
 		}
 
@@ -1553,48 +1562,48 @@
 			leb_count += 1;
 
 			if (aeb->pnum < 0 || aeb->ec < 0) {
-				ubi_err("negative values");
+				ubi_err(ubi, "negative values");
 				goto bad_aeb;
 			}
 
 			if (aeb->ec < ai->min_ec) {
-				ubi_err("bad ai->min_ec (%d), %d found",
+				ubi_err(ubi, "bad ai->min_ec (%d), %d found",
 					ai->min_ec, aeb->ec);
 				goto bad_aeb;
 			}
 
 			if (aeb->ec > ai->max_ec) {
-				ubi_err("bad ai->max_ec (%d), %d found",
+				ubi_err(ubi, "bad ai->max_ec (%d), %d found",
 					ai->max_ec, aeb->ec);
 				goto bad_aeb;
 			}
 
 			if (aeb->pnum >= ubi->peb_count) {
-				ubi_err("too high PEB number %d, total PEBs %d",
+				ubi_err(ubi, "too high PEB number %d, total PEBs %d",
 					aeb->pnum, ubi->peb_count);
 				goto bad_aeb;
 			}
 
 			if (av->vol_type == UBI_STATIC_VOLUME) {
 				if (aeb->lnum >= av->used_ebs) {
-					ubi_err("bad lnum or used_ebs");
+					ubi_err(ubi, "bad lnum or used_ebs");
 					goto bad_aeb;
 				}
 			} else {
 				if (av->used_ebs != 0) {
-					ubi_err("non-zero used_ebs");
+					ubi_err(ubi, "non-zero used_ebs");
 					goto bad_aeb;
 				}
 			}
 
 			if (aeb->lnum > av->highest_lnum) {
-				ubi_err("incorrect highest_lnum or lnum");
+				ubi_err(ubi, "incorrect highest_lnum or lnum");
 				goto bad_aeb;
 			}
 		}
 
 		if (av->leb_count != leb_count) {
-			ubi_err("bad leb_count, %d objects in the tree",
+			ubi_err(ubi, "bad leb_count, %d objects in the tree",
 				leb_count);
 			goto bad_av;
 		}
@@ -1605,13 +1614,13 @@
 		aeb = last_aeb;
 
 		if (aeb->lnum != av->highest_lnum) {
-			ubi_err("bad highest_lnum");
+			ubi_err(ubi, "bad highest_lnum");
 			goto bad_aeb;
 		}
 	}
 
 	if (vols_found != ai->vols_found) {
-		ubi_err("bad ai->vols_found %d, should be %d",
+		ubi_err(ubi, "bad ai->vols_found %d, should be %d",
 			ai->vols_found, vols_found);
 		goto out;
 	}
@@ -1628,7 +1637,8 @@
 
 			err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
 			if (err && err != UBI_IO_BITFLIPS) {
-				ubi_err("VID header is not OK (%d)", err);
+				ubi_err(ubi, "VID header is not OK (%d)",
+					err);
 				if (err > 0)
 					err = -EIO;
 				return err;
@@ -1637,37 +1647,37 @@
 			vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
 				   UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
 			if (av->vol_type != vol_type) {
-				ubi_err("bad vol_type");
+				ubi_err(ubi, "bad vol_type");
 				goto bad_vid_hdr;
 			}
 
 			if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
-				ubi_err("bad sqnum %llu", aeb->sqnum);
+				ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
 				goto bad_vid_hdr;
 			}
 
 			if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
-				ubi_err("bad vol_id %d", av->vol_id);
+				ubi_err(ubi, "bad vol_id %d", av->vol_id);
 				goto bad_vid_hdr;
 			}
 
 			if (av->compat != vidh->compat) {
-				ubi_err("bad compat %d", vidh->compat);
+				ubi_err(ubi, "bad compat %d", vidh->compat);
 				goto bad_vid_hdr;
 			}
 
 			if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
-				ubi_err("bad lnum %d", aeb->lnum);
+				ubi_err(ubi, "bad lnum %d", aeb->lnum);
 				goto bad_vid_hdr;
 			}
 
 			if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
-				ubi_err("bad used_ebs %d", av->used_ebs);
+				ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
 				goto bad_vid_hdr;
 			}
 
 			if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
-				ubi_err("bad data_pad %d", av->data_pad);
+				ubi_err(ubi, "bad data_pad %d", av->data_pad);
 				goto bad_vid_hdr;
 			}
 		}
@@ -1676,12 +1686,13 @@
 			continue;
 
 		if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
-			ubi_err("bad highest_lnum %d", av->highest_lnum);
+			ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
 			goto bad_vid_hdr;
 		}
 
 		if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
-			ubi_err("bad last_data_size %d", av->last_data_size);
+			ubi_err(ubi, "bad last_data_size %d",
+				av->last_data_size);
 			goto bad_vid_hdr;
 		}
 	}
@@ -1722,7 +1733,7 @@
 	err = 0;
 	for (pnum = 0; pnum < ubi->peb_count; pnum++)
 		if (!buf[pnum]) {
-			ubi_err("PEB %d is not referred", pnum);
+			ubi_err(ubi, "PEB %d is not referred", pnum);
 			err = 1;
 		}
 
@@ -1732,18 +1743,18 @@
 	return 0;
 
 bad_aeb:
-	ubi_err("bad attaching information about LEB %d", aeb->lnum);
+	ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
 	ubi_dump_aeb(aeb, 0);
 	ubi_dump_av(av);
 	goto out;
 
 bad_av:
-	ubi_err("bad attaching information about volume %d", av->vol_id);
+	ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
 	ubi_dump_av(av);
 	goto out;
 
 bad_vid_hdr:
-	ubi_err("bad attaching information about volume %d", av->vol_id);
+	ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
 	ubi_dump_av(av);
 	ubi_dump_vid_hdr(vidh);
 
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 290d524..f0a3b67 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -80,6 +80,7 @@
 #ifdef CONFIG_MTD_UBI_FASTMAP
 /* UBI module parameter to enable fastmap automatically on non-fastmap images */
 static bool fm_autoconvert;
+static bool fm_debug;
 #endif
 #else
 #ifdef CONFIG_MTD_UBI_FASTMAP
@@ -87,10 +88,12 @@
 #define CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT 0
 #endif
 static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
+#if !defined(CONFIG_MTD_UBI_FM_DEBUG)
+#define CONFIG_MTD_UBI_FM_DEBUG 0
+#endif
+static bool fm_debug = CONFIG_MTD_UBI_FM_DEBUG;
 #endif
 #endif
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
 
 /* Slab cache for wear-leveling entries */
 struct kmem_cache *ubi_wl_entry_slab;
@@ -110,7 +113,7 @@
 #else
 struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
 #endif
-
+ 
 #ifndef __UBOOT__
 /* Serializes UBI devices creations and removals */
 DEFINE_MUTEX(ubi_devices_mutex);
@@ -126,8 +129,17 @@
 }
 
 /* UBI version attribute ('/<sysfs>/class/ubi/version') */
-static struct class_attribute ubi_version =
-	__ATTR(version, S_IRUGO, ubi_version_show, NULL);
+static struct class_attribute ubi_class_attrs[] = {
+	__ATTR(version, S_IRUGO, ubi_version_show, NULL),
+	__ATTR_NULL
+};
+
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class ubi_class = {
+	.name		= UBI_NAME_STR,
+	.owner		= THIS_MODULE,
+	.class_attrs	= ubi_class_attrs,
+};
 
 static ssize_t dev_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf);
@@ -169,23 +181,22 @@
  */
 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
 {
+	int ret;
 	struct ubi_notification nt;
 
 	ubi_do_get_device_info(ubi, &nt.di);
 	ubi_do_get_volume_info(ubi, vol, &nt.vi);
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
 	switch (ntype) {
 	case UBI_VOLUME_ADDED:
 	case UBI_VOLUME_REMOVED:
 	case UBI_VOLUME_RESIZED:
 	case UBI_VOLUME_RENAMED:
-		if (ubi_update_fastmap(ubi)) {
-			ubi_err("Unable to update fastmap!");
-			ubi_ro_mode(ubi);
-		}
+		ret = ubi_update_fastmap(ubi);
+		if (ret)
+			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
 	}
-#endif
+
 	return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
 }
 
@@ -406,6 +417,22 @@
 	return ret;
 }
 
+static struct attribute *ubi_dev_attrs[] = {
+	&dev_eraseblock_size.attr,
+	&dev_avail_eraseblocks.attr,
+	&dev_total_eraseblocks.attr,
+	&dev_volumes_count.attr,
+	&dev_max_ec.attr,
+	&dev_reserved_for_bad.attr,
+	&dev_bad_peb_count.attr,
+	&dev_max_vol_count.attr,
+	&dev_min_io_size.attr,
+	&dev_bgt_enabled.attr,
+	&dev_mtd_num.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(ubi_dev);
+
 static void dev_release(struct device *dev)
 {
 	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
@@ -428,45 +455,15 @@
 
 	ubi->dev.release = dev_release;
 	ubi->dev.devt = ubi->cdev.dev;
-	ubi->dev.class = ubi_class;
+	ubi->dev.class = &ubi_class;
+	ubi->dev.groups = ubi_dev_groups;
 	dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
 	err = device_register(&ubi->dev);
 	if (err)
 		return err;
 
 	*ref = 1;
-	err = device_create_file(&ubi->dev, &dev_eraseblock_size);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_volumes_count);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_max_ec);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_bad_peb_count);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_max_vol_count);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_min_io_size);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_bgt_enabled);
-	if (err)
-		return err;
-	err = device_create_file(&ubi->dev, &dev_mtd_num);
-	return err;
+	return 0;
 }
 
 /**
@@ -475,17 +472,6 @@
  */
 static void ubi_sysfs_close(struct ubi_device *ubi)
 {
-	device_remove_file(&ubi->dev, &dev_mtd_num);
-	device_remove_file(&ubi->dev, &dev_bgt_enabled);
-	device_remove_file(&ubi->dev, &dev_min_io_size);
-	device_remove_file(&ubi->dev, &dev_max_vol_count);
-	device_remove_file(&ubi->dev, &dev_bad_peb_count);
-	device_remove_file(&ubi->dev, &dev_reserved_for_bad);
-	device_remove_file(&ubi->dev, &dev_max_ec);
-	device_remove_file(&ubi->dev, &dev_volumes_count);
-	device_remove_file(&ubi->dev, &dev_total_eraseblocks);
-	device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
-	device_remove_file(&ubi->dev, &dev_eraseblock_size);
 	device_unregister(&ubi->dev);
 }
 #endif
@@ -541,7 +527,7 @@
 	 */
 	err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
 	if (err) {
-		ubi_err("cannot register UBI character devices");
+		ubi_err(ubi, "cannot register UBI character devices");
 		return err;
 	}
 
@@ -552,7 +538,7 @@
 
 	err = cdev_add(&ubi->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device");
+		ubi_err(ubi, "cannot add character device");
 		goto out_unreg;
 	}
 
@@ -564,7 +550,7 @@
 		if (ubi->volumes[i]) {
 			err = ubi_add_volume(ubi, ubi->volumes[i]);
 			if (err) {
-				ubi_err("cannot add volume %d", i);
+				ubi_err(ubi, "cannot add volume %d", i);
 				goto out_volumes;
 			}
 		}
@@ -580,7 +566,8 @@
 	cdev_del(&ubi->cdev);
 out_unreg:
 	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
-	ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
+	ubi_err(ubi, "cannot initialize UBI %s, error %d",
+		ubi->ubi_name, err);
 	return err;
 }
 
@@ -674,7 +661,7 @@
 		 * guess we should just pick the largest region. But this is
 		 * not implemented.
 		 */
-		ubi_err("multiple regions, not implemented");
+		ubi_err(ubi, "multiple regions, not implemented");
 		return -EINVAL;
 	}
 
@@ -709,7 +696,7 @@
 	 * which allows us to avoid costly division operations.
 	 */
 	if (!is_power_of_2(ubi->min_io_size)) {
-		ubi_err("min. I/O unit (%d) is not power of 2",
+		ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
 			ubi->min_io_size);
 		return -EINVAL;
 	}
@@ -726,7 +713,7 @@
 	if (ubi->max_write_size < ubi->min_io_size ||
 	    ubi->max_write_size % ubi->min_io_size ||
 	    !is_power_of_2(ubi->max_write_size)) {
-		ubi_err("bad write buffer size %d for %d min. I/O unit",
+		ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
 			ubi->max_write_size, ubi->min_io_size);
 		return -EINVAL;
 	}
@@ -763,7 +750,7 @@
 
 	/* The shift must be aligned to 32-bit boundary */
 	if (ubi->vid_hdr_shift % 4) {
-		ubi_err("unaligned VID header shift %d",
+		ubi_err(ubi, "unaligned VID header shift %d",
 			ubi->vid_hdr_shift);
 		return -EINVAL;
 	}
@@ -773,7 +760,7 @@
 	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
 	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
 	    ubi->leb_start & (ubi->min_io_size - 1)) {
-		ubi_err("bad VID header (%d) or data offsets (%d)",
+		ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
 			ubi->vid_hdr_offset, ubi->leb_start);
 		return -EINVAL;
 	}
@@ -793,14 +780,14 @@
 	 * read-only mode.
 	 */
 	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
-		ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
+		ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
 		ubi->ro_mode = 1;
 	}
 
 	ubi->leb_size = ubi->peb_size - ubi->leb_start;
 
 	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
-		ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+		ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
 			ubi->mtd->index);
 		ubi->ro_mode = 1;
 	}
@@ -833,7 +820,7 @@
 	int err, old_reserved_pebs = vol->reserved_pebs;
 
 	if (ubi->ro_mode) {
-		ubi_warn("skip auto-resize because of R/O mode");
+		ubi_warn(ubi, "skip auto-resize because of R/O mode");
 		return 0;
 	}
 
@@ -854,21 +841,22 @@
 		vtbl_rec = ubi->vtbl[vol_id];
 		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
 		if (err)
-			ubi_err("cannot clean auto-resize flag for volume %d",
+			ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
 				vol_id);
 	} else {
 		desc.vol = vol;
 		err = ubi_resize_volume(&desc,
 					old_reserved_pebs + ubi->avail_pebs);
 		if (err)
-			ubi_err("cannot auto-resize volume %d", vol_id);
+			ubi_err(ubi, "cannot auto-resize volume %d",
+				vol_id);
 	}
 
 	if (err)
 		return err;
 
-	ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
-		vol->name, old_reserved_pebs, vol->reserved_pebs);
+	ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
+		vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
 	return 0;
 }
 
@@ -909,7 +897,7 @@
 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
 		ubi = ubi_devices[i];
 		if (ubi && mtd->index == ubi->mtd->index) {
-			ubi_err("mtd%d is already attached to ubi%d",
+			ubi_err(ubi, "mtd%d is already attached to ubi%d",
 				mtd->index, i);
 			return -EEXIST;
 		}
@@ -924,7 +912,7 @@
 	 * no sense to attach emulated MTD devices, so we prohibit this.
 	 */
 	if (mtd->type == MTD_UBIVOLUME) {
-		ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+		ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
 			mtd->index);
 		return -EINVAL;
 	}
@@ -935,7 +923,7 @@
 			if (!ubi_devices[ubi_num])
 				break;
 		if (ubi_num == UBI_MAX_DEVICES) {
-			ubi_err("only %d UBI devices may be created",
+			ubi_err(ubi, "only %d UBI devices may be created",
 				UBI_MAX_DEVICES);
 			return -ENFILE;
 		}
@@ -945,7 +933,7 @@
 
 		/* Make sure ubi_num is not busy */
 		if (ubi_devices[ubi_num]) {
-			ubi_err("ubi%d already exists", ubi_num);
+			ubi_err(ubi, "already exists");
 			return -EEXIST;
 		}
 	}
@@ -969,21 +957,24 @@
 	 */
 	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
 		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
-	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
-		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+	ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
+		UBI_FM_MIN_POOL_SIZE);
 
-	ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+	ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
 	ubi->fm_disabled = !fm_autoconvert;
+	if (fm_debug)
+		ubi_enable_dbg_chk_fastmap(ubi);
 
 	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
 	    <= UBI_FM_MAX_START) {
-		ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+		ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
 			UBI_FM_MAX_START);
 		ubi->fm_disabled = 1;
 	}
 
-	ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
-	ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+	ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
+	ubi_msg(ubi, "default fastmap WL pool size: %d",
+		ubi->fm_wl_pool.max_size);
 #else
 	ubi->fm_disabled = 1;
 #endif
@@ -991,10 +982,10 @@
 	mutex_init(&ubi->ckvol_mutex);
 	mutex_init(&ubi->device_mutex);
 	spin_lock_init(&ubi->volumes_lock);
-	mutex_init(&ubi->fm_mutex);
-	init_rwsem(&ubi->fm_sem);
+	init_rwsem(&ubi->fm_protect);
+	init_rwsem(&ubi->fm_eba_sem);
 
-	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+	ubi_msg(ubi, "attaching mtd%d", mtd->index);
 
 	err = io_init(ubi, max_beb_per1024);
 	if (err)
@@ -1013,7 +1004,8 @@
 #endif
 	err = ubi_attach(ubi, 0);
 	if (err) {
-		ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
+		ubi_err(ubi, "failed to attach mtd%d, error %d",
+			mtd->index, err);
 		goto out_free;
 	}
 
@@ -1034,28 +1026,28 @@
 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
 	if (IS_ERR(ubi->bgt_thread)) {
 		err = PTR_ERR(ubi->bgt_thread);
-		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
-			err);
+		ubi_err(ubi, "cannot spawn \"%s\", error %d",
+			ubi->bgt_name, err);
 		goto out_debugfs;
 	}
 
-	ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
-		mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
-	ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+	ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
+		mtd->index, mtd->name, ubi->flash_size >> 20);
+	ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
 		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
-	ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+	ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
 		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
-	ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+	ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
 		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
-	ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+	ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
 		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
-	ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+	ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
 		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
 		ubi->vtbl_slots);
-	ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+	ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
 		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
 		ubi->image_seq);
-	ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+	ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
 		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
 
 	/*
@@ -1064,7 +1056,20 @@
 	 */
 	spin_lock(&ubi->wl_lock);
 	ubi->thread_enabled = 1;
+#ifndef __UBOOT__
 	wake_up_process(ubi->bgt_thread);
+#else
+	/*
+	 * U-Boot special: We have no bgt_thread in U-Boot!
+	 * So just call do_work() here directly.
+	 */
+	err = do_work(ubi);
+	if (err) {
+		ubi_err(ubi, "%s: work failed with error code %d",
+			ubi->bgt_name, err);
+	}
+#endif
+
 	spin_unlock(&ubi->wl_lock);
 
 	ubi_devices[ubi_num] = ubi;
@@ -1124,7 +1129,7 @@
 			return -EBUSY;
 		}
 		/* This may only happen if there is a bug */
-		ubi_err("%s reference count %d, destroy anyway",
+		ubi_err(ubi, "%s reference count %d, destroy anyway",
 			ubi->ubi_name, ubi->ref_count);
 	}
 	ubi_devices[ubi_num] = NULL;
@@ -1132,11 +1137,14 @@
 
 	ubi_assert(ubi_num == ubi->ubi_num);
 	ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
-	ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+	ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
 #ifdef CONFIG_MTD_UBI_FASTMAP
 	/* If we don't write a new fastmap at detach time we lose all
-	 * EC updates that have been made since the last written fastmap. */
-	ubi_update_fastmap(ubi);
+	 * EC updates that have been made since the last written fastmap.
+	 * In case of fastmap debugging we omit the update to simulate an
+	 * unclean shutdown. */
+	if (!ubi_dbg_chk_fastmap(ubi))
+		ubi_update_fastmap(ubi);
 #endif
 	/*
 	 * Before freeing anything, we have to stop the background thread to
@@ -1160,7 +1168,7 @@
 	put_mtd_device(ubi->mtd);
 	vfree(ubi->peb_buf);
 	vfree(ubi->fm_buf);
-	ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+	ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
 	put_device(&ubi->dev);
 	return 0;
 }
@@ -1185,9 +1193,9 @@
 		return ERR_PTR(err);
 
 	/* MTD device number is defined by the major / minor numbers */
-	major = imajor(path.dentry->d_inode);
-	minor = iminor(path.dentry->d_inode);
-	mode = path.dentry->d_inode->i_mode;
+	major = imajor(d_backing_inode(path.dentry));
+	minor = iminor(d_backing_inode(path.dentry));
+	mode = d_backing_inode(path.dentry)->i_mode;
 	path_put(&path);
 	if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
 		return ERR_PTR(-EINVAL);
@@ -1250,28 +1258,20 @@
 	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
 
 	if (mtd_devs > UBI_MAX_DEVICES) {
-		ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
+		pr_err("UBI error: too many MTD devices, maximum is %d",
+		       UBI_MAX_DEVICES);
 		return -EINVAL;
 	}
 
 	/* Create base sysfs directory and sysfs files */
-	ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
-	if (IS_ERR(ubi_class)) {
-		err = PTR_ERR(ubi_class);
-		ubi_err("cannot create UBI class");
-		goto out;
-	}
-
-	err = class_create_file(ubi_class, &ubi_version);
-	if (err) {
-		ubi_err("cannot create sysfs file");
-		goto out_class;
-	}
+	err = class_register(&ubi_class);
+	if (err < 0)
+		return err;
 
 	err = misc_register(&ubi_ctrl_cdev);
 	if (err) {
-		ubi_err("cannot register device");
-		goto out_version;
+		pr_err("UBI error: cannot register device");
+		goto out;
 	}
 
 	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
@@ -1297,7 +1297,8 @@
 		mtd = open_mtd_device(p->name);
 		if (IS_ERR(mtd)) {
 			err = PTR_ERR(mtd);
-			ubi_err("cannot open mtd %s, error %d", p->name, err);
+			pr_err("UBI error: cannot open mtd %s, error %d",
+			       p->name, err);
 			/* See comment below re-ubi_is_module(). */
 			if (ubi_is_module())
 				goto out_detach;
@@ -1309,7 +1310,8 @@
 					 p->vid_hdr_offs, p->max_beb_per1024);
 		mutex_unlock(&ubi_devices_mutex);
 		if (err < 0) {
-			ubi_err("cannot attach mtd%d", mtd->index);
+			pr_err("UBI error: cannot attach mtd%d",
+			       mtd->index);
 			put_mtd_device(mtd);
 
 			/*
@@ -1332,7 +1334,7 @@
 
 	err = ubiblock_init();
 	if (err) {
-		ubi_err("block: cannot initialize, error %d", err);
+		pr_err("UBI error: block: cannot initialize, error %d", err);
 
 		/* See comment above re-ubi_is_module(). */
 		if (ubi_is_module())
@@ -1353,16 +1355,13 @@
 	kmem_cache_destroy(ubi_wl_entry_slab);
 out_dev_unreg:
 	misc_deregister(&ubi_ctrl_cdev);
-out_version:
-	class_remove_file(ubi_class, &ubi_version);
-out_class:
-	class_destroy(ubi_class);
 out:
 #ifdef __UBOOT__
 	/* Reset any globals that the driver depends on being zeroed */
 	mtd_devs = 0;
 #endif
-	ubi_err("cannot initialize UBI, error %d", err);
+	class_unregister(&ubi_class);
+	pr_err("UBI error: cannot initialize UBI, error %d", err);
 	return err;
 }
 late_initcall(ubi_init);
@@ -1386,8 +1385,7 @@
 	ubi_debugfs_exit();
 	kmem_cache_destroy(ubi_wl_entry_slab);
 	misc_deregister(&ubi_ctrl_cdev);
-	class_remove_file(ubi_class, &ubi_version);
-	class_destroy(ubi_class);
+	class_unregister(&ubi_class);
 #ifdef __UBOOT__
 	/* Reset any globals that the driver depends on being zeroed */
 	mtd_devs = 0;
@@ -1409,7 +1407,7 @@
 
 	result = simple_strtoul(str, &endp, 0);
 	if (str == endp || result >= INT_MAX) {
-		ubi_err("incorrect bytes count: \"%s\"\n", str);
+		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
 		return -EINVAL;
 	}
 
@@ -1425,7 +1423,7 @@
 	case '\0':
 		break;
 	default:
-		ubi_err("incorrect bytes count: \"%s\"\n", str);
+		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
 		return -EINVAL;
 	}
 
@@ -1467,15 +1465,15 @@
 		return -EINVAL;
 
 	if (mtd_devs == UBI_MAX_DEVICES) {
-		ubi_err("too many parameters, max. is %d\n",
-			UBI_MAX_DEVICES);
+		pr_err("UBI error: too many parameters, max. is %d\n",
+		       UBI_MAX_DEVICES);
 		return -EINVAL;
 	}
 
 	len = strnlen(val, MTD_PARAM_LEN_MAX);
 	if (len == MTD_PARAM_LEN_MAX) {
-		ubi_err("parameter \"%s\" is too long, max. is %d\n",
-			val, MTD_PARAM_LEN_MAX);
+		pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+		       val, MTD_PARAM_LEN_MAX);
 		return -EINVAL;
 	}
 
@@ -1494,7 +1492,7 @@
 		tokens[i] = strsep(&pbuf, ",");
 
 	if (pbuf) {
-		ubi_err("too many arguments at \"%s\"\n", val);
+		pr_err("UBI error: too many arguments at \"%s\"\n", val);
 		return -EINVAL;
 	}
 
@@ -1514,8 +1512,8 @@
 		int err = kstrtoint(token, 10, &p->max_beb_per1024);
 
 		if (err) {
-			ubi_err("bad value for max_beb_per1024 parameter: %s",
-				token);
+			pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+			       token);
 			return -EINVAL;
 		}
 	}
@@ -1525,7 +1523,8 @@
 		int err = kstrtoint(token, 10, &p->ubi_num);
 
 		if (err) {
-			ubi_err("bad value for ubi_num parameter: %s", token);
+			pr_err("UBI error: bad value for ubi_num parameter: %s",
+			       token);
 			return -EINVAL;
 		}
 	} else
@@ -1552,6 +1551,8 @@
 #ifdef CONFIG_MTD_UBI_FASTMAP
 module_param(fm_autoconvert, bool, 0644);
 MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+module_param(fm_debug, bool, 0);
+MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
 #endif
 MODULE_VERSION(__stringify(UBI_VERSION));
 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 6dcc4e4..c35c85b 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -33,12 +33,12 @@
 		return;
 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
 	if (err && err != -EUCLEAN) {
-		ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+		ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
 			err, len, pnum, offset, read);
 		goto out;
 	}
 
-	ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+	ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
 		len, pnum, offset);
 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
 out:
@@ -229,8 +229,8 @@
 	if (IS_ERR_OR_NULL(dfs_rootdir)) {
 		int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
 
-		ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
-			err);
+		pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
+		       err);
 		return err;
 	}
 
@@ -254,7 +254,7 @@
 	struct dentry *dent = file->f_path.dentry;
 	struct ubi_device *ubi;
 	struct ubi_debug_info *d;
-	char buf[3];
+	char buf[8];
 	int val;
 
 	ubi = ubi_get_device(ubi_num);
@@ -266,12 +266,30 @@
 		val = d->chk_gen;
 	else if (dent == d->dfs_chk_io)
 		val = d->chk_io;
+	else if (dent == d->dfs_chk_fastmap)
+		val = d->chk_fastmap;
 	else if (dent == d->dfs_disable_bgt)
 		val = d->disable_bgt;
 	else if (dent == d->dfs_emulate_bitflips)
 		val = d->emulate_bitflips;
 	else if (dent == d->dfs_emulate_io_failures)
 		val = d->emulate_io_failures;
+	else if (dent == d->dfs_emulate_power_cut) {
+		snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
+		count = simple_read_from_buffer(user_buf, count, ppos,
+						buf, strlen(buf));
+		goto out;
+	} else if (dent == d->dfs_power_cut_min) {
+		snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
+		count = simple_read_from_buffer(user_buf, count, ppos,
+						buf, strlen(buf));
+		goto out;
+	} else if (dent == d->dfs_power_cut_max) {
+		snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
+		count = simple_read_from_buffer(user_buf, count, ppos,
+						buf, strlen(buf));
+		goto out;
+	}
 	else {
 		count = -EINVAL;
 		goto out;
@@ -300,7 +318,7 @@
 	struct ubi_device *ubi;
 	struct ubi_debug_info *d;
 	size_t buf_size;
-	char buf[8];
+	char buf[8] = {0};
 	int val;
 
 	ubi = ubi_get_device(ubi_num);
@@ -314,6 +332,21 @@
 		goto out;
 	}
 
+	if (dent == d->dfs_power_cut_min) {
+		if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
+			count = -EINVAL;
+		goto out;
+	} else if (dent == d->dfs_power_cut_max) {
+		if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
+			count = -EINVAL;
+		goto out;
+	} else if (dent == d->dfs_emulate_power_cut) {
+		if (kstrtoint(buf, 0, &val) != 0)
+			count = -EINVAL;
+		d->emulate_power_cut = val;
+		goto out;
+	}
+
 	if (buf[0] == '1')
 		val = 1;
 	else if (buf[0] == '0')
@@ -327,6 +360,8 @@
 		d->chk_gen = val;
 	else if (dent == d->dfs_chk_io)
 		d->chk_io = val;
+	else if (dent == d->dfs_chk_fastmap)
+		d->chk_fastmap = val;
 	else if (dent == d->dfs_disable_bgt)
 		d->disable_bgt = val;
 	else if (dent == d->dfs_emulate_bitflips)
@@ -397,6 +432,13 @@
 		goto out_remove;
 	d->dfs_chk_io = dent;
 
+	fname = "chk_fastmap";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_chk_fastmap = dent;
+
 	fname = "tst_disable_bgt";
 	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
 				   &dfs_fops);
@@ -418,13 +460,34 @@
 		goto out_remove;
 	d->dfs_emulate_io_failures = dent;
 
+	fname = "tst_emulate_power_cut";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_emulate_power_cut = dent;
+
+	fname = "tst_emulate_power_cut_min";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_power_cut_min = dent;
+
+	fname = "tst_emulate_power_cut_max";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_power_cut_max = dent;
+
 	return 0;
 
 out_remove:
 	debugfs_remove_recursive(d->dfs_dir);
 out:
 	err = dent ? PTR_ERR(dent) : -ENODEV;
-	ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+	ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
 		fname, err);
 	return err;
 }
@@ -438,6 +501,39 @@
 	if (IS_ENABLED(CONFIG_DEBUG_FS))
 		debugfs_remove_recursive(ubi->dbg.dfs_dir);
 }
+
+/**
+ * ubi_dbg_power_cut - emulate a power cut if it is time to do so
+ * @ubi: UBI device description object
+ * @caller: Flags set to indicate from where the function is being called
+ *
+ * Returns non-zero if a power cut was emulated, zero if not.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+	unsigned int range;
+
+	if ((ubi->dbg.emulate_power_cut & caller) == 0)
+		return 0;
+
+	if (ubi->dbg.power_cut_counter == 0) {
+		ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
+
+		if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
+			range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
+			ubi->dbg.power_cut_counter += prandom_u32() % range;
+		}
+		return 0;
+	}
+
+	ubi->dbg.power_cut_counter--;
+	if (ubi->dbg.power_cut_counter)
+		return 0;
+
+	ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
+	ubi_ro_mode(ubi);
+	return 1;
+}
 #else
 int ubi_debugfs_init(void)
 {
@@ -456,4 +552,9 @@
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
 }
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+	return 0;
+}
 #endif
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index bfa9dfb..d8d824e 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -117,4 +117,16 @@
 {
 	return ubi->dbg.chk_gen;
 }
+
+static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
+{
+	return ubi->dbg.chk_fastmap;
+}
+
+static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
+{
+	ubi->dbg.chk_fastmap = 1;
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
 #endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fce0ff8..6a78f67 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -333,9 +333,9 @@
 
 	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
 
-	down_read(&ubi->fm_sem);
+	down_read(&ubi->fm_eba_sem);
 	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
-	up_read(&ubi->fm_sem);
+	up_read(&ubi->fm_eba_sem);
 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
 
 out_unlock:
@@ -415,11 +415,13 @@
 				 */
 				if (err == UBI_IO_BAD_HDR_EBADMSG ||
 				    err == UBI_IO_BAD_HDR) {
-					ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+					ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
 						 pnum, vol_id, lnum);
 					err = -EBADMSG;
-				} else
+				} else {
+					err = -EINVAL;
 					ubi_ro_mode(ubi);
+				}
 			}
 			goto out_free;
 		} else if (err == UBI_IO_BITFLIPS)
@@ -434,15 +436,14 @@
 
 	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
 	if (err) {
-		if (err == UBI_IO_BITFLIPS) {
+		if (err == UBI_IO_BITFLIPS)
 			scrub = 1;
-			err = 0;
-		} else if (mtd_is_eccerr(err)) {
+		else if (mtd_is_eccerr(err)) {
 			if (vol->vol_type == UBI_DYNAMIC_VOLUME)
 				goto out_unlock;
 			scrub = 1;
 			if (!check) {
-				ubi_msg("force data checking");
+				ubi_msg(ubi, "force data checking");
 				check = 1;
 				goto retry;
 			}
@@ -453,7 +454,7 @@
 	if (check) {
 		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
 		if (crc1 != crc) {
-			ubi_warn("CRC error: calculated %#08x, must be %#08x",
+			ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
 				 crc1, crc);
 			err = -EBADMSG;
 			goto out_unlock;
@@ -473,6 +474,63 @@
 	return err;
 }
 
+#ifndef __UBOOT__
+/**
+ * ubi_eba_read_leb_sg - read data into a scatter gather list.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * This function works exactly like ubi_eba_read_leb(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+			struct ubi_sgl *sgl, int lnum, int offset, int len,
+			int check)
+{
+	int to_read;
+	int ret;
+	struct scatterlist *sg;
+
+	for (;;) {
+		ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
+		sg = &sgl->sg[sgl->list_pos];
+		if (len < sg->length - sgl->page_pos)
+			to_read = len;
+		else
+			to_read = sg->length - sgl->page_pos;
+
+		ret = ubi_eba_read_leb(ubi, vol, lnum,
+				       sg_virt(sg) + sgl->page_pos, offset,
+				       to_read, check);
+		if (ret < 0)
+			return ret;
+
+		offset += to_read;
+		len -= to_read;
+		if (!len) {
+			sgl->page_pos += to_read;
+			if (sgl->page_pos == sg->length) {
+				sgl->list_pos++;
+				sgl->page_pos = 0;
+			}
+
+			break;
+		}
+
+		sgl->list_pos++;
+		sgl->page_pos = 0;
+	}
+
+	return ret;
+}
+#endif
+
 /**
  * recover_peb - recover from write failure.
  * @ubi: UBI device description object
@@ -504,22 +562,27 @@
 	new_pnum = ubi_wl_get_peb(ubi);
 	if (new_pnum < 0) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
+		up_read(&ubi->fm_eba_sem);
 		return new_pnum;
 	}
 
-	ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
+	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+		pnum, new_pnum);
 
 	err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
 	if (err && err != UBI_IO_BITFLIPS) {
 		if (err > 0)
 			err = -EIO;
+		up_read(&ubi->fm_eba_sem);
 		goto out_put;
 	}
 
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
-	if (err)
+	if (err) {
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
+	}
 
 	data_size = offset + len;
 	mutex_lock(&ubi->buf_mutex);
@@ -528,8 +591,10 @@
 	/* Read everything before the area where the write failure happened */
 	if (offset > 0) {
 		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
-		if (err && err != UBI_IO_BITFLIPS)
+		if (err && err != UBI_IO_BITFLIPS) {
+			up_read(&ubi->fm_eba_sem);
 			goto out_unlock;
+		}
 	}
 
 	memcpy(ubi->peb_buf + offset, buf, len);
@@ -537,18 +602,18 @@
 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
 	if (err) {
 		mutex_unlock(&ubi->buf_mutex);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
 	mutex_unlock(&ubi->buf_mutex);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 
-	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = new_pnum;
-	up_read(&ubi->fm_sem);
+	up_read(&ubi->fm_eba_sem);
 	ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 
-	ubi_msg("data was successfully recovered");
+	ubi_msg(ubi, "data was successfully recovered");
 	return 0;
 
 out_unlock:
@@ -563,13 +628,13 @@
 	 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
 	 * get another one.
 	 */
-	ubi_warn("failed to write to PEB %d", new_pnum);
+	ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
 	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
 	if (++tries > UBI_IO_RETRIES) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		return err;
 	}
-	ubi_msg("try again");
+	ubi_msg(ubi, "try again");
 	goto retry;
 }
 
@@ -607,7 +672,7 @@
 
 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
 		if (err) {
-			ubi_warn("failed to write data to PEB %d", pnum);
+			ubi_warn(ubi, "failed to write data to PEB %d", pnum);
 			if (err == -EIO && ubi->bad_allowed)
 				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
 						  offset, len);
@@ -640,6 +705,7 @@
 	if (pnum < 0) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		leb_write_unlock(ubi, vol_id, lnum);
+		up_read(&ubi->fm_eba_sem);
 		return pnum;
 	}
 
@@ -648,23 +714,24 @@
 
 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
 	if (err) {
-		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
 			 vol_id, lnum, pnum);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
 	if (len) {
 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
 		if (err) {
-			ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+			ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
 				 len, offset, vol_id, lnum, pnum);
+			up_read(&ubi->fm_eba_sem);
 			goto write_error;
 		}
 	}
 
-	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_sem);
+	up_read(&ubi->fm_eba_sem);
 
 	leb_write_unlock(ubi, vol_id, lnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
@@ -692,7 +759,7 @@
 	}
 
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg("try another PEB");
+	ubi_msg(ubi, "try another PEB");
 	goto retry;
 }
 
@@ -761,6 +828,7 @@
 	if (pnum < 0) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		leb_write_unlock(ubi, vol_id, lnum);
+		up_read(&ubi->fm_eba_sem);
 		return pnum;
 	}
 
@@ -769,22 +837,23 @@
 
 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
 	if (err) {
-		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
 			 vol_id, lnum, pnum);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
 	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
 	if (err) {
-		ubi_warn("failed to write %d bytes of data to PEB %d",
+		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
 			 len, pnum);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] < 0);
-	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_sem);
+	up_read(&ubi->fm_eba_sem);
 
 	leb_write_unlock(ubi, vol_id, lnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
@@ -812,7 +881,7 @@
 	}
 
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg("try another PEB");
+	ubi_msg(ubi, "try another PEB");
 	goto retry;
 }
 
@@ -836,7 +905,7 @@
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
 			      int lnum, const void *buf, int len)
 {
-	int err, pnum, tries = 0, vol_id = vol->vol_id;
+	int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t crc;
 
@@ -879,6 +948,7 @@
 	pnum = ubi_wl_get_peb(ubi);
 	if (pnum < 0) {
 		err = pnum;
+		up_read(&ubi->fm_eba_sem);
 		goto out_leb_unlock;
 	}
 
@@ -887,28 +957,30 @@
 
 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
 	if (err) {
-		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
 			 vol_id, lnum, pnum);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
 	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
 	if (err) {
-		ubi_warn("failed to write %d bytes of data to PEB %d",
+		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
 			 len, pnum);
+		up_read(&ubi->fm_eba_sem);
 		goto write_error;
 	}
 
-	if (vol->eba_tbl[lnum] >= 0) {
-		err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
+	old_pnum = vol->eba_tbl[lnum];
+	vol->eba_tbl[lnum] = pnum;
+	up_read(&ubi->fm_eba_sem);
+
+	if (old_pnum >= 0) {
+		err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
 		if (err)
 			goto out_leb_unlock;
 	}
 
-	down_read(&ubi->fm_sem);
-	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_sem);
-
 out_leb_unlock:
 	leb_write_unlock(ubi, vol_id, lnum);
 out_mutex:
@@ -934,7 +1006,7 @@
 	}
 
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg("try another PEB");
+	ubi_msg(ubi, "try another PEB");
 	goto retry;
 }
 
@@ -1057,7 +1129,7 @@
 	dbg_wl("read %d bytes of data", aldata_size);
 	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
 	if (err && err != UBI_IO_BITFLIPS) {
-		ubi_warn("error %d while reading data from PEB %d",
+		ubi_warn(ubi, "error %d while reading data from PEB %d",
 			 err, from);
 		err = MOVE_SOURCE_RD_ERR;
 		goto out_unlock_buf;
@@ -1107,7 +1179,7 @@
 	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
 	if (err) {
 		if (err != UBI_IO_BITFLIPS) {
-			ubi_warn("error %d while reading VID header back from PEB %d",
+			ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
 				 err, to);
 			if (is_error_sane(err))
 				err = MOVE_TARGET_RD_ERR;
@@ -1134,7 +1206,7 @@
 		err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
 		if (err) {
 			if (err != UBI_IO_BITFLIPS) {
-				ubi_warn("error %d while reading data back from PEB %d",
+				ubi_warn(ubi, "error %d while reading data back from PEB %d",
 					 err, to);
 				if (is_error_sane(err))
 					err = MOVE_TARGET_RD_ERR;
@@ -1146,7 +1218,7 @@
 		cond_resched();
 
 		if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
-			ubi_warn("read data back from PEB %d and it is different",
+			ubi_warn(ubi, "read data back from PEB %d and it is different",
 				 to);
 			err = -EINVAL;
 			goto out_unlock_buf;
@@ -1154,9 +1226,9 @@
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] == from);
-	down_read(&ubi->fm_sem);
+	down_read(&ubi->fm_eba_sem);
 	vol->eba_tbl[lnum] = to;
-	up_read(&ubi->fm_sem);
+	up_read(&ubi->fm_eba_sem);
 
 out_unlock_buf:
 	mutex_unlock(&ubi->buf_mutex);
@@ -1199,10 +1271,10 @@
 			return;
 	}
 
-	ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+	ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
 		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
 	if (ubi->corr_peb_count)
-		ubi_warn("%d PEBs are corrupted and not used",
+		ubi_warn(ubi, "%d PEBs are corrupted and not used",
 			 ubi->corr_peb_count);
 }
 
@@ -1280,7 +1352,7 @@
 					fm_eba[i][j] == UBI_LEB_UNMAPPED)
 					continue;
 
-				ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+				ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
 					vol->vol_id, i, fm_eba[i][j],
 					scan_eba[i][j]);
 				ubi_assert(0);
@@ -1355,15 +1427,16 @@
 				 * during re-size.
 				 */
 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
-			vol->eba_tbl[aeb->lnum] = aeb->pnum;
+			else
+				vol->eba_tbl[aeb->lnum] = aeb->pnum;
 		}
 	}
 
 	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
-		ubi_err("no enough physical eraseblocks (%d, need %d)",
+		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
 			ubi->avail_pebs, EBA_RESERVED_PEBS);
 		if (ubi->corr_peb_count)
-			ubi_err("%d PEBs are corrupted and not used",
+			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
 		err = -ENOSPC;
 		goto out_free;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 0000000..a33d406
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
+ */
+
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+#ifndef __UBOOT__
+static void update_fastmap_work_fn(struct work_struct *wrk)
+#else
+void update_fastmap_work_fn(struct ubi_device *ubi)
+#endif
+{
+#ifndef __UBOOT__
+	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+#endif
+
+	ubi_update_fastmap(ubi);
+	spin_lock(&ubi->wl_lock);
+	ubi->fm_work_scheduled = 0;
+	spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+	struct rb_node *p;
+	struct ubi_wl_entry *e, *victim = NULL;
+	int max_ec = UBI_MAX_ERASECOUNTER;
+
+	ubi_rb_for_each_entry(p, e, root, u.rb) {
+		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+			victim = e;
+			max_ec = e->ec;
+		}
+	}
+
+	return victim;
+}
+
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+				    struct ubi_fm_pool *pool)
+{
+	int i;
+	struct ubi_wl_entry *e;
+
+	for (i = pool->used; i < pool->size; i++) {
+		e = ubi->lookuptbl[pool->pebs[i]];
+		wl_tree_add(e, &ubi->free);
+		ubi->free_count++;
+	}
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+	struct rb_node *p;
+	struct ubi_wl_entry *e;
+
+	ubi_rb_for_each_entry(p, e, root, u.rb)
+		if (e->pnum < UBI_FM_MAX_START)
+			return 1;
+
+	return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+	struct ubi_wl_entry *e = NULL;
+
+	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+		goto out;
+
+	if (anchor)
+		e = find_anchor_wl_entry(&ubi->free);
+	else
+		e = find_mean_wl_entry(ubi, &ubi->free);
+
+	if (!e)
+		goto out;
+
+	self_check_in_wl_tree(ubi, e, &ubi->free);
+
+	/* remove it from the free list,
+	 * the wl subsystem does no longer know this erase block */
+	rb_erase(&e->u.rb, &ubi->free);
+	ubi->free_count--;
+out:
+	return e;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+	struct ubi_fm_pool *pool = &ubi->fm_pool;
+	struct ubi_wl_entry *e;
+	int enough;
+
+	spin_lock(&ubi->wl_lock);
+
+	return_unused_pool_pebs(ubi, wl_pool);
+	return_unused_pool_pebs(ubi, pool);
+
+	wl_pool->size = 0;
+	pool->size = 0;
+
+	for (;;) {
+		enough = 0;
+		if (pool->size < pool->max_size) {
+			if (!ubi->free.rb_node)
+				break;
+
+			e = wl_get_wle(ubi);
+			if (!e)
+				break;
+
+			pool->pebs[pool->size] = e->pnum;
+			pool->size++;
+		} else
+			enough++;
+
+		if (wl_pool->size < wl_pool->max_size) {
+			if (!ubi->free.rb_node ||
+			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+				break;
+
+			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+			self_check_in_wl_tree(ubi, e, &ubi->free);
+			rb_erase(&e->u.rb, &ubi->free);
+			ubi->free_count--;
+
+			wl_pool->pebs[wl_pool->size] = e->pnum;
+			wl_pool->size++;
+		} else
+			enough++;
+
+		if (enough == 2)
+			break;
+	}
+
+	wl_pool->used = 0;
+	pool->used = 0;
+
+	spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+	int ret, retried = 0;
+	struct ubi_fm_pool *pool = &ubi->fm_pool;
+	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+again:
+	down_read(&ubi->fm_eba_sem);
+	spin_lock(&ubi->wl_lock);
+
+	/* We check here also for the WL pool because at this point we can
+	 * refill the WL pool synchronous. */
+	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->fm_eba_sem);
+		ret = ubi_update_fastmap(ubi);
+		if (ret) {
+			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+			down_read(&ubi->fm_eba_sem);
+			return -ENOSPC;
+		}
+		down_read(&ubi->fm_eba_sem);
+		spin_lock(&ubi->wl_lock);
+	}
+
+	if (pool->used == pool->size) {
+		spin_unlock(&ubi->wl_lock);
+		if (retried) {
+			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+			ret = -ENOSPC;
+			goto out;
+		}
+		retried = 1;
+		up_read(&ubi->fm_eba_sem);
+		goto again;
+	}
+
+	ubi_assert(pool->used < pool->size);
+	ret = pool->pebs[pool->used++];
+	prot_queue_add(ubi, ubi->lookuptbl[ret]);
+	spin_unlock(&ubi->wl_lock);
+out:
+	return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+	int pnum;
+
+	if (pool->used == pool->size) {
+#ifndef __UBOOT__
+		/* We cannot update the fastmap here because this
+		 * function is called in atomic context.
+		 * Let's fail here and refill/update it as soon as possible. */
+		if (!ubi->fm_work_scheduled) {
+			ubi->fm_work_scheduled = 1;
+			schedule_work(&ubi->fm_work);
+		}
+		return NULL;
+#else
+		/*
+		 * No work queues in U-Boot, we must do this immediately
+		 */
+		update_fastmap_work_fn(ubi);
+#endif
+	}
+
+	pnum = pool->pebs[pool->used++];
+	return ubi->lookuptbl[pnum];
+}
+
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+	struct ubi_work *wrk;
+
+	spin_lock(&ubi->wl_lock);
+	if (ubi->wl_scheduled) {
+		spin_unlock(&ubi->wl_lock);
+		return 0;
+	}
+	ubi->wl_scheduled = 1;
+	spin_unlock(&ubi->wl_lock);
+
+	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+	if (!wrk) {
+		spin_lock(&ubi->wl_lock);
+		ubi->wl_scheduled = 0;
+		spin_unlock(&ubi->wl_lock);
+		return -ENOMEM;
+	}
+
+	wrk->anchor = 1;
+	wrk->func = &wear_leveling_worker;
+	schedule_ubi_work(ubi, wrk);
+	return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+		      int lnum, int torture)
+{
+	struct ubi_wl_entry *e;
+	int vol_id, pnum = fm_e->pnum;
+
+	dbg_wl("PEB %d", pnum);
+
+	ubi_assert(pnum >= 0);
+	ubi_assert(pnum < ubi->peb_count);
+
+	spin_lock(&ubi->wl_lock);
+	e = ubi->lookuptbl[pnum];
+
+	/* This can happen if we recovered from a fastmap the very
+	 * first time and writing now a new one. In this case the wl system
+	 * has never seen any PEB used by the original fastmap.
+	 */
+	if (!e) {
+		e = fm_e;
+		ubi_assert(e->ec >= 0);
+		ubi->lookuptbl[pnum] = e;
+	}
+
+	spin_unlock(&ubi->wl_lock);
+
+	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+	return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+	return wrk->func == erase_worker;
+}
+
+static void ubi_fastmap_close(struct ubi_device *ubi)
+{
+	int i;
+
+#ifndef __UBOOT__
+	flush_work(&ubi->fm_work);
+#else
+	update_fastmap_work_fn(ubi);
+#endif
+	return_unused_pool_pebs(ubi, &ubi->fm_pool);
+	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
+	if (ubi->fm) {
+		for (i = 0; i < ubi->fm->used_blocks; i++)
+			kfree(ubi->fm->e[i]);
+	}
+	kfree(ubi->fm);
+}
+
+/**
+ * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
+ * See find_mean_wl_entry()
+ *
+ * @ubi: UBI device description object
+ * @e: physical eraseblock to return
+ * @root: RB tree to test against.
+ */
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+					   struct ubi_wl_entry *e,
+					   struct rb_root *root) {
+	if (e && !ubi->fm_disabled && !ubi->fm &&
+	    e->pnum < UBI_FM_MAX_START)
+		e = rb_entry(rb_next(root->rb_node),
+			     struct ubi_wl_entry, u.rb);
+
+	return e;
+}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index a2166e4..48ba98a 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
  * Author: Richard Weinberger <richard@nod.at>
  *
  * SPDX-License-Identifier:	GPL-2.0+
@@ -19,6 +20,69 @@
 #include "ubi.h"
 
 /**
+ * init_seen - allocate memory for used for debugging.
+ * @ubi: UBI device description object
+ */
+static inline int *init_seen(struct ubi_device *ubi)
+{
+	int *ret;
+
+	if (!ubi_dbg_chk_fastmap(ubi))
+		return NULL;
+
+	ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+	if (!ret)
+		return ERR_PTR(-ENOMEM);
+
+	return ret;
+}
+
+/**
+ * free_seen - free the seen logic integer array.
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void free_seen(int *seen)
+{
+	kfree(seen);
+}
+
+/**
+ * set_seen - mark a PEB as seen.
+ * @ubi: UBI device description object
+ * @pnum: The PEB to be makred as seen
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+{
+	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+		return;
+
+	seen[pnum] = 1;
+}
+
+/**
+ * self_check_seen - check whether all PEB have been seen by fastmap.
+ * @ubi: UBI device description object
+ * @seen: integer array of @ubi->peb_count size
+ */
+static int self_check_seen(struct ubi_device *ubi, int *seen)
+{
+	int pnum, ret = 0;
+
+	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+		return 0;
+
+	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+		if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+/**
  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
  * @ubi: UBI device description object
  */
@@ -26,12 +90,13 @@
 {
 	size_t size;
 
-	size = sizeof(struct ubi_fm_hdr) + \
-		sizeof(struct ubi_fm_scan_pool) + \
-		sizeof(struct ubi_fm_scan_pool) + \
-		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
-		(sizeof(struct ubi_fm_eba) + \
-		(ubi->peb_count * sizeof(__be32))) + \
+	size = sizeof(struct ubi_fm_sb) +
+		sizeof(struct ubi_fm_hdr) +
+		sizeof(struct ubi_fm_scan_pool) +
+		sizeof(struct ubi_fm_scan_pool) +
+		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+		(sizeof(struct ubi_fm_eba) +
+		(ubi->peb_count * sizeof(__be32))) +
 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
 	return roundup(size, ubi->leb_size);
 }
@@ -129,22 +194,25 @@
 
 		if (vol_id > av->vol_id)
 			p = &(*p)->rb_left;
-		else if (vol_id > av->vol_id)
+		else if (vol_id < av->vol_id)
 			p = &(*p)->rb_right;
+		else
+			return ERR_PTR(-EINVAL);
 	}
 
 	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
 	if (!av)
 		goto out;
 
-	av->highest_lnum = av->leb_count = 0;
+	av->highest_lnum = av->leb_count = av->used_ebs = 0;
 	av->vol_id = vol_id;
-	av->used_ebs = used_ebs;
 	av->data_pad = data_pad;
 	av->last_data_size = last_eb_bytes;
 	av->compat = 0;
 	av->vol_type = vol_type;
 	av->root = RB_ROOT;
+	if (av->vol_type == UBI_STATIC_VOLUME)
+		av->used_ebs = used_ebs;
 
 	dbg_bld("found volume (ID %i)", vol_id);
 
@@ -250,7 +318,7 @@
 			list_add_tail(&victim->u.list, &ai->erase);
 
 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
-				av->last_data_size = \
+				av->last_data_size =
 					be32_to_cpu(new_vh->data_size);
 
 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
@@ -331,7 +399,8 @@
 	if (found)
 		av = tmp_av;
 	else {
-		ubi_err("orphaned volume in fastmap pool!");
+		ubi_err(ubi, "orphaned volume in fastmap pool!");
+		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 		return UBI_BAD_FASTMAP;
 	}
 
@@ -362,6 +431,7 @@
 			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
 			if (aeb->pnum == pnum) {
 				rb_erase(&aeb->u.rb, &av->root);
+				av->leb_count--;
 				kmem_cache_free(ai->aeb_slab_cache, aeb);
 				return;
 			}
@@ -376,7 +446,6 @@
  * @pebs: an array of all PEB numbers in the to be scanned pool
  * @pool_size: size of the pool (number of entries in @pebs)
  * @max_sqnum: pointer to the maximal sequence number
- * @eba_orphans: list of PEBs which need to be scanned
  * @free: list of PEBs which are most likely free (and go into @ai->free)
  *
  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
@@ -385,17 +454,17 @@
 #ifndef __UBOOT__
 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
 		     int *pebs, int pool_size, unsigned long long *max_sqnum,
-		     struct list_head *eba_orphans, struct list_head *freef)
+		     struct list_head *free)
 #else
 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
-		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
-		     struct list_head *eba_orphans, struct list_head *freef)
+		     int *pebs, int pool_size, unsigned long long *max_sqnum,
+		     struct list_head *free)
 #endif
 {
 	struct ubi_vid_hdr *vh;
 	struct ubi_ec_hdr *ech;
-	struct ubi_ainf_peb *new_aeb, *tmp_aeb;
-	int i, pnum, err, found_orphan, ret = 0;
+	struct ubi_ainf_peb *new_aeb;
+	int i, pnum, err, ret = 0;
 
 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 	if (!ech)
@@ -420,18 +489,18 @@
 		pnum = be32_to_cpu(pebs[i]);
 
 		if (ubi_io_is_bad(ubi, pnum)) {
-			ubi_err("bad PEB in fastmap pool!");
+			ubi_err(ubi, "bad PEB in fastmap pool!");
 			ret = UBI_BAD_FASTMAP;
 			goto out;
 		}
 
 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 		if (err && err != UBI_IO_BITFLIPS) {
-			ubi_err("unable to read EC header! PEB:%i err:%i",
+			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
 				pnum, err);
 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 			goto out;
-		} else if (ret == UBI_IO_BITFLIPS)
+		} else if (err == UBI_IO_BITFLIPS)
 			scrub = 1;
 
 		/*
@@ -441,7 +510,7 @@
 		image_seq = be32_to_cpu(ech->image_seq);
 
 		if (image_seq && (image_seq != ubi->image_seq)) {
-			ubi_err("bad image seq: 0x%x, expected: 0x%x",
+			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
 				be32_to_cpu(ech->image_seq), ubi->image_seq);
 			ret = UBI_BAD_FASTMAP;
 			goto out;
@@ -453,9 +522,9 @@
 			unmap_peb(ai, pnum);
 			dbg_bld("Adding PEB to free: %i", pnum);
 			if (err == UBI_IO_FF_BITFLIPS)
-				add_aeb(ai, freef, pnum, ec, 1);
+				add_aeb(ai, free, pnum, ec, 1);
 			else
-				add_aeb(ai, freef, pnum, ec, 0);
+				add_aeb(ai, free, pnum, ec, 0);
 			continue;
 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
 			dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -463,18 +532,6 @@
 			if (err == UBI_IO_BITFLIPS)
 				scrub = 1;
 
-			found_orphan = 0;
-			list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
-				if (tmp_aeb->pnum == pnum) {
-					found_orphan = 1;
-					break;
-				}
-			}
-			if (found_orphan) {
-				list_del(&tmp_aeb->u.list);
-				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
-			}
-
 			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
 						   GFP_KERNEL);
 			if (!new_aeb) {
@@ -499,7 +556,7 @@
 			}
 		} else {
 			/* We are paranoid and fall back to scanning mode */
-			ubi_err("fastmap pool PEBs contains damaged PEBs!");
+			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 			goto out;
 		}
@@ -549,13 +606,12 @@
 			      struct ubi_attach_info *ai,
 			      struct ubi_fastmap_layout *fm)
 {
-	struct list_head used, eba_orphans, freef;
+	struct list_head used, free;
 	struct ubi_ainf_volume *av;
 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
-	struct ubi_ec_hdr *ech;
 	struct ubi_fm_sb *fmsb;
 	struct ubi_fm_hdr *fmhdr;
-	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
 	struct ubi_fm_ec *fmec;
 	struct ubi_fm_volhdr *fmvhdr;
 	struct ubi_fm_eba *fm_eba;
@@ -565,23 +621,9 @@
 	void *fm_raw = ubi->fm_buf;
 
 	INIT_LIST_HEAD(&used);
-	INIT_LIST_HEAD(&freef);
-	INIT_LIST_HEAD(&eba_orphans);
-	INIT_LIST_HEAD(&ai->corr);
-	INIT_LIST_HEAD(&ai->free);
-	INIT_LIST_HEAD(&ai->erase);
-	INIT_LIST_HEAD(&ai->alien);
-	ai->volumes = RB_ROOT;
+	INIT_LIST_HEAD(&free);
 	ai->min_ec = UBI_MAX_ERASECOUNTER;
 
-	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
-					       sizeof(struct ubi_ainf_peb),
-					       0, 0, NULL);
-	if (!ai->aeb_slab_cache) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
 	fmsb = (struct ubi_fm_sb *)(fm_raw);
 	ai->max_sqnum = fmsb->sqnum;
 	fm_pos += sizeof(struct ubi_fm_sb);
@@ -594,56 +636,57 @@
 		goto fail_bad;
 
 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
-		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
 		goto fail_bad;
 	}
 
-	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
-	fm_pos += sizeof(*fmpl1);
+	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl);
 	if (fm_pos >= fm_size)
 		goto fail_bad;
-	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
-		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
-			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
+		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
 		goto fail_bad;
 	}
 
-	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
-	fm_pos += sizeof(*fmpl2);
+	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl_wl);
 	if (fm_pos >= fm_size)
 		goto fail_bad;
-	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
-		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
-			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
+		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
 		goto fail_bad;
 	}
 
-	pool_size = be16_to_cpu(fmpl1->size);
-	wl_pool_size = be16_to_cpu(fmpl2->size);
-	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
-	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+	pool_size = be16_to_cpu(fmpl->size);
+	wl_pool_size = be16_to_cpu(fmpl_wl->size);
+	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
+	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
 
 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
-		ubi_err("bad pool size: %i", pool_size);
+		ubi_err(ubi, "bad pool size: %i", pool_size);
 		goto fail_bad;
 	}
 
 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
-		ubi_err("bad WL pool size: %i", wl_pool_size);
+		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
 		goto fail_bad;
 	}
 
 
 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
 	    fm->max_pool_size < 0) {
-		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
 		goto fail_bad;
 	}
 
 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
 	    fm->max_wl_pool_size < 0) {
-		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+		ubi_err(ubi, "bad maximal WL pool size: %i",
+			fm->max_wl_pool_size);
 		goto fail_bad;
 	}
 
@@ -702,8 +745,7 @@
 			goto fail_bad;
 
 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
-			ubi_err("bad fastmap vol header magic: 0x%x, " \
-				"expected: 0x%x",
+			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
 			goto fail_bad;
 		}
@@ -716,6 +758,11 @@
 
 		if (!av)
 			goto fail_bad;
+		if (PTR_ERR(av) == -EINVAL) {
+			ubi_err(ubi, "volume (ID %i) already exists",
+				fmvhdr->vol_id);
+			goto fail_bad;
+		}
 
 		ai->vols_found++;
 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
@@ -728,8 +775,7 @@
 			goto fail_bad;
 
 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
-			ubi_err("bad fastmap EBA header magic: 0x%x, " \
-				"expected: 0x%x",
+			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
 			goto fail_bad;
 		}
@@ -748,28 +794,9 @@
 				}
 			}
 
-			/* This can happen if a PEB is already in an EBA known
-			 * by this fastmap but the PEB itself is not in the used
-			 * list.
-			 * In this case the PEB can be within the fastmap pool
-			 * or while writing the fastmap it was in the protection
-			 * queue.
-			 */
 			if (!aeb) {
-				aeb = kmem_cache_alloc(ai->aeb_slab_cache,
-						       GFP_KERNEL);
-				if (!aeb) {
-					ret = -ENOMEM;
-
-					goto fail;
-				}
-
-				aeb->lnum = j;
-				aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
-				aeb->ec = -1;
-				aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
-				list_add_tail(&aeb->u.list, &eba_orphans);
-				continue;
+				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
+				goto fail_bad;
 			}
 
 			aeb->lnum = j;
@@ -782,61 +809,26 @@
 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
 				aeb->pnum, aeb->lnum, av->vol_id);
 		}
-
-		ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-		if (!ech) {
-			ret = -ENOMEM;
-			goto fail;
-		}
-
-		list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
-					 u.list) {
-			int err;
-
-			if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
-				ubi_err("bad PEB in fastmap EBA orphan list");
-				ret = UBI_BAD_FASTMAP;
-				kfree(ech);
-				goto fail;
-			}
-
-			err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
-			if (err && err != UBI_IO_BITFLIPS) {
-				ubi_err("unable to read EC header! PEB:%i " \
-					"err:%i", tmp_aeb->pnum, err);
-				ret = err > 0 ? UBI_BAD_FASTMAP : err;
-				kfree(ech);
-
-				goto fail;
-			} else if (err == UBI_IO_BITFLIPS)
-				tmp_aeb->scrub = 1;
-
-			tmp_aeb->ec = be64_to_cpu(ech->ec);
-			assign_aeb_to_av(ai, tmp_aeb, av);
-		}
-
-		kfree(ech);
 	}
 
-	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
-			&eba_orphans, &freef);
+	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
 	if (ret)
 		goto fail;
 
-	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
-			&eba_orphans, &freef);
+	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
 	if (ret)
 		goto fail;
 
 	if (max_sqnum > ai->max_sqnum)
 		ai->max_sqnum = max_sqnum;
 
-	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list)
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
 		list_move_tail(&tmp_aeb->u.list, &ai->free);
 
-	ubi_assert(list_empty(&used));
-	ubi_assert(list_empty(&eba_orphans));
-	ubi_assert(list_empty(&freef));
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
+		list_move_tail(&tmp_aeb->u.list, &ai->erase);
+
+	ubi_assert(list_empty(&free));
 
 	/*
 	 * If fastmap is leaking PEBs (must not happen), raise a
@@ -865,11 +857,7 @@
 		list_del(&tmp_aeb->u.list);
 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 	}
-	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
-		list_del(&tmp_aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
-	}
-	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list) {
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
 		list_del(&tmp_aeb->u.list);
 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 	}
@@ -899,7 +887,7 @@
 	__be32 crc, tmp_crc;
 	unsigned long long sqnum = 0;
 
-	mutex_lock(&ubi->fm_mutex);
+	down_write(&ubi->fm_protect);
 	memset(ubi->fm_buf, 0, ubi->fm_size);
 
 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
@@ -922,14 +910,14 @@
 		fm->to_be_tortured[0] = 1;
 
 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
-		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
 		ret = UBI_BAD_FASTMAP;
 		goto free_fm_sb;
 	}
 
 	if (fmsb->version != UBI_FM_FMT_VERSION) {
-		ubi_err("bad fastmap version: %i, expected: %i",
+		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
 			fmsb->version, UBI_FM_FMT_VERSION);
 		ret = UBI_BAD_FASTMAP;
 		goto free_fm_sb;
@@ -937,15 +925,16 @@
 
 	used_blocks = be32_to_cpu(fmsb->used_blocks);
 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
-		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
+			used_blocks);
 		ret = UBI_BAD_FASTMAP;
 		goto free_fm_sb;
 	}
 
 	fm_size = ubi->leb_size * used_blocks;
 	if (fm_size != ubi->fm_size) {
-		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
-			ubi->fm_size);
+		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
+			fm_size, ubi->fm_size);
 		ret = UBI_BAD_FASTMAP;
 		goto free_fm_sb;
 	}
@@ -974,7 +963,7 @@
 
 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 		if (ret && ret != UBI_IO_BITFLIPS) {
-			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
 				i, pnum);
 			if (ret > 0)
 				ret = UBI_BAD_FASTMAP;
@@ -991,7 +980,7 @@
 		 * we shouldn't fail if image_seq == 0.
 		 */
 		if (image_seq && (image_seq != ubi->image_seq)) {
-			ubi_err("wrong image seq:%d instead of %d",
+			ubi_err(ubi, "wrong image seq:%d instead of %d",
 				be32_to_cpu(ech->image_seq), ubi->image_seq);
 			ret = UBI_BAD_FASTMAP;
 			goto free_hdr;
@@ -999,15 +988,14 @@
 
 		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
 		if (ret && ret != UBI_IO_BITFLIPS) {
-			ubi_err("unable to read fastmap block# %i (PEB: %i)",
+			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
 				i, pnum);
 			goto free_hdr;
 		}
 
 		if (i == 0) {
 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
-				ubi_err("bad fastmap anchor vol_id: 0x%x," \
-					" expected: 0x%x",
+				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
 					be32_to_cpu(vh->vol_id),
 					UBI_FM_SB_VOLUME_ID);
 				ret = UBI_BAD_FASTMAP;
@@ -1015,8 +1003,7 @@
 			}
 		} else {
 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
-				ubi_err("bad fastmap data vol_id: 0x%x," \
-					" expected: 0x%x",
+				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
 					be32_to_cpu(vh->vol_id),
 					UBI_FM_DATA_VOLUME_ID);
 				ret = UBI_BAD_FASTMAP;
@@ -1030,7 +1017,7 @@
 		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
 				  ubi->leb_start, ubi->leb_size);
 		if (ret && ret != UBI_IO_BITFLIPS) {
-			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
 				"err: %i)", i, pnum, ret);
 			goto free_hdr;
 		}
@@ -1044,8 +1031,9 @@
 	fmsb2->data_crc = 0;
 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
 	if (crc != tmp_crc) {
-		ubi_err("fastmap data CRC is invalid");
-		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+		ubi_err(ubi, "fastmap data CRC is invalid");
+		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
+			tmp_crc, crc);
 		ret = UBI_BAD_FASTMAP;
 		goto free_hdr;
 	}
@@ -1081,17 +1069,18 @@
 	ubi->fm = fm;
 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
-	ubi_msg("attached by fastmap");
-	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
-	ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+	ubi_msg(ubi, "attached by fastmap");
+	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
+	ubi_msg(ubi, "fastmap WL pool size: %d",
+		ubi->fm_wl_pool.max_size);
 	ubi->fm_disabled = 0;
 
 	ubi_free_vid_hdr(ubi, vh);
 	kfree(ech);
 out:
-	mutex_unlock(&ubi->fm_mutex);
+	up_write(&ubi->fm_protect);
 	if (ret == UBI_BAD_FASTMAP)
-		ubi_err("Attach by fastmap failed, doing a full scan!");
+		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
 	return ret;
 
 free_hdr:
@@ -1117,17 +1106,18 @@
 	void *fm_raw;
 	struct ubi_fm_sb *fmsb;
 	struct ubi_fm_hdr *fmh;
-	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
 	struct ubi_fm_ec *fec;
 	struct ubi_fm_volhdr *fvh;
 	struct ubi_fm_eba *feba;
-	struct rb_node *node;
 	struct ubi_wl_entry *wl_e;
 	struct ubi_volume *vol;
 	struct ubi_vid_hdr *avhdr, *dvhdr;
 	struct ubi_work *ubi_wrk;
+	struct rb_node *tmp_rb;
 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
 	int scrub_peb_count, erase_peb_count;
+	int *seen_pebs = NULL;
 
 	fm_raw = ubi->fm_buf;
 	memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1144,6 +1134,12 @@
 		goto out_kfree;
 	}
 
+	seen_pebs = init_seen(ubi);
+	if (IS_ERR(seen_pebs)) {
+		ret = PTR_ERR(seen_pebs);
+		goto out_kfree;
+	}
+
 	spin_lock(&ubi->volumes_lock);
 	spin_lock(&ubi->wl_lock);
 
@@ -1168,29 +1164,33 @@
 	erase_peb_count = 0;
 	vol_count = 0;
 
-	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
-	fm_pos += sizeof(*fmpl1);
-	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
-	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
-	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl);
+	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
+	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
 
-	for (i = 0; i < ubi->fm_pool.size; i++)
-		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+	for (i = 0; i < ubi->fm_pool.size; i++) {
+		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
+	}
 
-	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
-	fm_pos += sizeof(*fmpl2);
-	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
-	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
-	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl_wl);
+	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
+	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
 
-	for (i = 0; i < ubi->fm_wl_pool.size; i++)
-		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
+		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
+	}
 
-	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
-		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 
 		fec->pnum = cpu_to_be32(wl_e->pnum);
+		set_seen(ubi, wl_e->pnum, seen_pebs);
 		fec->ec = cpu_to_be32(wl_e->ec);
 
 		free_peb_count++;
@@ -1199,11 +1199,23 @@
 	}
 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
 
-	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
-		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 
 		fec->pnum = cpu_to_be32(wl_e->pnum);
+		set_seen(ubi, wl_e->pnum, seen_pebs);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		used_peb_count++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= ubi->fm_size);
+	}
+
+	ubi_for_each_protected_peb(ubi, i, wl_e) {
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		set_seen(ubi, wl_e->pnum, seen_pebs);
 		fec->ec = cpu_to_be32(wl_e->ec);
 
 		used_peb_count++;
@@ -1212,11 +1224,11 @@
 	}
 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
 
-	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
-		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 
 		fec->pnum = cpu_to_be32(wl_e->pnum);
+		set_seen(ubi, wl_e->pnum, seen_pebs);
 		fec->ec = cpu_to_be32(wl_e->ec);
 
 		scrub_peb_count++;
@@ -1234,6 +1246,7 @@
 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 
 			fec->pnum = cpu_to_be32(wl_e->pnum);
+			set_seen(ubi, wl_e->pnum, seen_pebs);
 			fec->ec = cpu_to_be32(wl_e->ec);
 
 			erase_peb_count++;
@@ -1287,12 +1300,13 @@
 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
 	if (ret) {
-		ubi_err("unable to write vid_hdr to fastmap SB!");
+		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
 		goto out_kfree;
 	}
 
 	for (i = 0; i < new_fm->used_blocks; i++) {
 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
 	}
 
@@ -1307,7 +1321,7 @@
 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
 		if (ret) {
-			ubi_err("unable to write vid_hdr to PEB %i!",
+			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
 				new_fm->e[i]->pnum);
 			goto out_kfree;
 		}
@@ -1317,7 +1331,7 @@
 		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
 			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
 		if (ret) {
-			ubi_err("unable to write fastmap to PEB %i!",
+			ubi_err(ubi, "unable to write fastmap to PEB %i!",
 				new_fm->e[i]->pnum);
 			goto out_kfree;
 		}
@@ -1326,11 +1340,13 @@
 	ubi_assert(new_fm);
 	ubi->fm = new_fm;
 
+	ret = self_check_seen(ubi, seen_pebs);
 	dbg_bld("fastmap written!");
 
 out_kfree:
 	ubi_free_vid_hdr(ubi, avhdr);
 	ubi_free_vid_hdr(ubi, dvhdr);
+	free_seen(seen_pebs);
 out:
 	return ret;
 }
@@ -1385,31 +1401,87 @@
 /**
  * invalidate_fastmap - destroys a fastmap.
  * @ubi: UBI device object
- * @fm: the fastmap to be destroyed
  *
+ * This function ensures that upon next UBI attach a full scan
+ * is issued. We need this if UBI is about to write a new fastmap
+ * but is unable to do so. In this case we have two options:
+ * a) Make sure that the current fastmap will not be usued upon
+ * attach time and contine or b) fall back to RO mode to have the
+ * current fastmap in a valid state.
  * Returns 0 on success, < 0 indicates an internal error.
  */
-static int invalidate_fastmap(struct ubi_device *ubi,
-			      struct ubi_fastmap_layout *fm)
+static int invalidate_fastmap(struct ubi_device *ubi)
 {
 	int ret;
-	struct ubi_vid_hdr *vh;
+	struct ubi_fastmap_layout *fm;
+	struct ubi_wl_entry *e;
+	struct ubi_vid_hdr *vh = NULL;
 
-	ret = erase_block(ubi, fm->e[0]->pnum);
-	if (ret < 0)
-		return ret;
+	if (!ubi->fm)
+		return 0;
+
+	ubi->fm = NULL;
+
+	ret = -ENOMEM;
+	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+	if (!fm)
+		goto out;
 
 	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
 	if (!vh)
-		return -ENOMEM;
+		goto out_free_fm;
 
-	/* deleting the current fastmap SB is not enough, an old SB may exist,
-	 * so create a (corrupted) SB such that fastmap will find it and fall
-	 * back to scanning mode in any case */
+	ret = -ENOSPC;
+	e = ubi_wl_get_fm_peb(ubi, 1);
+	if (!e)
+		goto out_free_fm;
+
+	/*
+	 * Create fake fastmap such that UBI will fall back
+	 * to scanning mode.
+	 */
 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+	if (ret < 0) {
+		ubi_wl_put_fm_peb(ubi, e, 0, 0);
+		goto out_free_fm;
+	}
 
+	fm->used_blocks = 1;
+	fm->e[0] = e;
+
+	ubi->fm = fm;
+
+out:
+	ubi_free_vid_hdr(ubi, vh);
 	return ret;
+
+out_free_fm:
+	kfree(fm);
+	goto out;
+}
+
+/**
+ * return_fm_pebs - returns all PEBs used by a fastmap back to the
+ * WL sub-system.
+ * @ubi: UBI device object
+ * @fm: fastmap layout object
+ */
+static void return_fm_pebs(struct ubi_device *ubi,
+			   struct ubi_fastmap_layout *fm)
+{
+	int i;
+
+	if (!fm)
+		return;
+
+	for (i = 0; i < fm->used_blocks; i++) {
+		if (fm->e[i]) {
+			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
+					  fm->to_be_tortured[i]);
+			fm->e[i] = NULL;
+		}
+	}
 }
 
 /**
@@ -1421,50 +1493,37 @@
  */
 int ubi_update_fastmap(struct ubi_device *ubi)
 {
-	int ret, i;
+	int ret, i, j;
 	struct ubi_fastmap_layout *new_fm, *old_fm;
 	struct ubi_wl_entry *tmp_e;
 
-	mutex_lock(&ubi->fm_mutex);
+	down_write(&ubi->fm_protect);
 
 	ubi_refill_pools(ubi);
 
 	if (ubi->ro_mode || ubi->fm_disabled) {
-		mutex_unlock(&ubi->fm_mutex);
+		up_write(&ubi->fm_protect);
 		return 0;
 	}
 
 	ret = ubi_ensure_anchor_pebs(ubi);
 	if (ret) {
-		mutex_unlock(&ubi->fm_mutex);
+		up_write(&ubi->fm_protect);
 		return ret;
 	}
 
 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
 	if (!new_fm) {
-		mutex_unlock(&ubi->fm_mutex);
+		up_write(&ubi->fm_protect);
 		return -ENOMEM;
 	}
 
 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
-
-	for (i = 0; i < new_fm->used_blocks; i++) {
-		new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
-		if (!new_fm->e[i]) {
-			while (i--)
-				kfree(new_fm->e[i]);
-
-			kfree(new_fm);
-			mutex_unlock(&ubi->fm_mutex);
-			return -ENOMEM;
-		}
-	}
-
 	old_fm = ubi->fm;
 	ubi->fm = NULL;
 
 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
-		ubi_err("fastmap too large");
+		ubi_err(ubi, "fastmap too large");
 		ret = -ENOSPC;
 		goto err;
 	}
@@ -1474,37 +1533,49 @@
 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
 		spin_unlock(&ubi->wl_lock);
 
-		if (!tmp_e && !old_fm) {
-			int j;
-			ubi_err("could not get any free erase block");
+		if (!tmp_e) {
+			if (old_fm && old_fm->e[i]) {
+				ret = erase_block(ubi, old_fm->e[i]->pnum);
+				if (ret < 0) {
+					ubi_err(ubi, "could not erase old fastmap PEB");
 
-			for (j = 1; j < i; j++)
-				ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+					for (j = 1; j < i; j++) {
+						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+								  j, 0);
+						new_fm->e[j] = NULL;
+					}
+					goto err;
+				}
+				new_fm->e[i] = old_fm->e[i];
+				old_fm->e[i] = NULL;
+			} else {
+				ubi_err(ubi, "could not get any free erase block");
 
-			ret = -ENOSPC;
-			goto err;
-		} else if (!tmp_e && old_fm) {
-			ret = erase_block(ubi, old_fm->e[i]->pnum);
-			if (ret < 0) {
-				int j;
+				for (j = 1; j < i; j++) {
+					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+					new_fm->e[j] = NULL;
+				}
 
-				for (j = 1; j < i; j++)
-					ubi_wl_put_fm_peb(ubi, new_fm->e[j],
-							  j, 0);
-
-				ubi_err("could not erase old fastmap PEB");
+				ret = -ENOSPC;
 				goto err;
 			}
-
-			new_fm->e[i]->pnum = old_fm->e[i]->pnum;
-			new_fm->e[i]->ec = old_fm->e[i]->ec;
 		} else {
-			new_fm->e[i]->pnum = tmp_e->pnum;
-			new_fm->e[i]->ec = tmp_e->ec;
+			new_fm->e[i] = tmp_e;
 
-			if (old_fm)
+			if (old_fm && old_fm->e[i]) {
 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
 						  old_fm->to_be_tortured[i]);
+				old_fm->e[i] = NULL;
+			}
+		}
+	}
+
+	/* Old fastmap is larger than the new one */
+	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
+		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
+			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+					  old_fm->to_be_tortured[i]);
+			old_fm->e[i] = NULL;
 		}
 	}
 
@@ -1517,67 +1588,67 @@
 		if (!tmp_e) {
 			ret = erase_block(ubi, old_fm->e[0]->pnum);
 			if (ret < 0) {
-				int i;
-				ubi_err("could not erase old anchor PEB");
+				ubi_err(ubi, "could not erase old anchor PEB");
 
-				for (i = 1; i < new_fm->used_blocks; i++)
+				for (i = 1; i < new_fm->used_blocks; i++) {
 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
 							  i, 0);
+					new_fm->e[i] = NULL;
+				}
 				goto err;
 			}
-
-			new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+			new_fm->e[0] = old_fm->e[0];
 			new_fm->e[0]->ec = ret;
+			old_fm->e[0] = NULL;
 		} else {
 			/* we've got a new anchor PEB, return the old one */
 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
 					  old_fm->to_be_tortured[0]);
-
-			new_fm->e[0]->pnum = tmp_e->pnum;
-			new_fm->e[0]->ec = tmp_e->ec;
+			new_fm->e[0] = tmp_e;
+			old_fm->e[0] = NULL;
 		}
 	} else {
 		if (!tmp_e) {
-			int i;
-			ubi_err("could not find any anchor PEB");
+			ubi_err(ubi, "could not find any anchor PEB");
 
-			for (i = 1; i < new_fm->used_blocks; i++)
+			for (i = 1; i < new_fm->used_blocks; i++) {
 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+				new_fm->e[i] = NULL;
+			}
 
 			ret = -ENOSPC;
 			goto err;
 		}
-
-		new_fm->e[0]->pnum = tmp_e->pnum;
-		new_fm->e[0]->ec = tmp_e->ec;
+		new_fm->e[0] = tmp_e;
 	}
 
 	down_write(&ubi->work_sem);
-	down_write(&ubi->fm_sem);
+	down_write(&ubi->fm_eba_sem);
 	ret = ubi_write_fastmap(ubi, new_fm);
-	up_write(&ubi->fm_sem);
+	up_write(&ubi->fm_eba_sem);
 	up_write(&ubi->work_sem);
 
 	if (ret)
 		goto err;
 
 out_unlock:
-	mutex_unlock(&ubi->fm_mutex);
+	up_write(&ubi->fm_protect);
 	kfree(old_fm);
 	return ret;
 
 err:
-	kfree(new_fm);
+	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
 
-	ubi_warn("Unable to write new fastmap, err=%i", ret);
-
-	ret = 0;
-	if (old_fm) {
-		ret = invalidate_fastmap(ubi, old_fm);
-		if (ret < 0)
-			ubi_err("Unable to invalidiate current fastmap!");
-		else if (ret)
-			ret = 0;
+	ret = invalidate_fastmap(ubi);
+	if (ret < 0) {
+		ubi_err(ubi, "Unable to invalidiate current fastmap!");
+		ubi_ro_mode(ubi);
+	} else {
+		return_fm_pebs(ubi, old_fm);
+		return_fm_pebs(ubi, new_fm);
+		ret = 0;
 	}
+
+	kfree(new_fm);
 	goto out_unlock;
 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 0e2e933..d1bdec3 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -169,19 +169,20 @@
 			 * enabled. A corresponding message will be printed
 			 * later, when it is has been scrubbed.
 			 */
-			ubi_msg("fixable bit-flip detected at PEB %d", pnum);
+			ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
+				pnum);
 			ubi_assert(len == read);
 			return UBI_IO_BITFLIPS;
 		}
 
 		if (retries++ < UBI_IO_RETRIES) {
-			ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+			ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
 				 err, errstr, len, pnum, offset, read);
 			yield();
 			goto retry;
 		}
 
-		ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+		ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
 			err, errstr, len, pnum, offset, read);
 		dump_stack();
 
@@ -238,7 +239,7 @@
 	ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
 
 	if (ubi->ro_mode) {
-		ubi_err("read-only mode");
+		ubi_err(ubi, "read-only mode");
 		return -EROFS;
 	}
 
@@ -265,7 +266,7 @@
 	}
 
 	if (ubi_dbg_is_write_failure(ubi)) {
-		ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+		ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
 			len, pnum, offset);
 		dump_stack();
 		return -EIO;
@@ -274,7 +275,7 @@
 	addr = (loff_t)pnum * ubi->peb_size + offset;
 	err = mtd_write(ubi->mtd, addr, len, &written, buf);
 	if (err) {
-		ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+		ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
 			err, len, pnum, offset, written);
 		dump_stack();
 		ubi_dump_flash(ubi, pnum, offset, len);
@@ -330,7 +331,7 @@
 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
 
 	if (ubi->ro_mode) {
-		ubi_err("read-only mode");
+		ubi_err(ubi, "read-only mode");
 		return -EROFS;
 	}
 
@@ -347,12 +348,12 @@
 	err = mtd_erase(ubi->mtd, &ei);
 	if (err) {
 		if (retries++ < UBI_IO_RETRIES) {
-			ubi_warn("error %d while erasing PEB %d, retry",
+			ubi_warn(ubi, "error %d while erasing PEB %d, retry",
 				 err, pnum);
 			yield();
 			goto retry;
 		}
-		ubi_err("cannot erase PEB %d, error %d", pnum, err);
+		ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
 		dump_stack();
 		return err;
 	}
@@ -360,17 +361,18 @@
 	err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
 					   ei.state == MTD_ERASE_FAILED);
 	if (err) {
-		ubi_err("interrupted PEB %d erasure", pnum);
+		ubi_err(ubi, "interrupted PEB %d erasure", pnum);
 		return -EINTR;
 	}
 
 	if (ei.state == MTD_ERASE_FAILED) {
 		if (retries++ < UBI_IO_RETRIES) {
-			ubi_warn("error while erasing PEB %d, retry", pnum);
+			ubi_warn(ubi, "error while erasing PEB %d, retry",
+				 pnum);
 			yield();
 			goto retry;
 		}
-		ubi_err("cannot erase PEB %d", pnum);
+		ubi_err(ubi, "cannot erase PEB %d", pnum);
 		dump_stack();
 		return -EIO;
 	}
@@ -380,7 +382,7 @@
 		return err;
 
 	if (ubi_dbg_is_erase_failure(ubi)) {
-		ubi_err("cannot erase PEB %d (emulated)", pnum);
+		ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
 		return -EIO;
 	}
 
@@ -403,7 +405,7 @@
 {
 	int err, i, patt_count;
 
-	ubi_msg("run torture test for PEB %d", pnum);
+	ubi_msg(ubi, "run torture test for PEB %d", pnum);
 	patt_count = ARRAY_SIZE(patterns);
 	ubi_assert(patt_count > 0);
 
@@ -420,7 +422,7 @@
 
 		err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
 		if (err == 0) {
-			ubi_err("erased PEB %d, but a non-0xFF byte found",
+			ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
 				pnum);
 			err = -EIO;
 			goto out;
@@ -440,7 +442,7 @@
 		err = ubi_check_pattern(ubi->peb_buf, patterns[i],
 					ubi->peb_size);
 		if (err == 0) {
-			ubi_err("pattern %x checking failed for PEB %d",
+			ubi_err(ubi, "pattern %x checking failed for PEB %d",
 				patterns[i], pnum);
 			err = -EIO;
 			goto out;
@@ -448,7 +450,7 @@
 	}
 
 	err = patt_count;
-	ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
+	ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
 
 out:
 	mutex_unlock(&ubi->buf_mutex);
@@ -458,7 +460,7 @@
 		 * has not passed because it happened on a freshly erased
 		 * physical eraseblock which means something is wrong with it.
 		 */
-		ubi_err("read problems on freshly erased PEB %d, must be bad",
+		ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
 			pnum);
 		err = -EIO;
 	}
@@ -534,7 +536,7 @@
 	 * it. Supposedly the flash media or the driver is screwed up, so
 	 * return an error.
 	 */
-	ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
+	ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
 	ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
 	return -EIO;
 }
@@ -566,7 +568,7 @@
 		return err;
 
 	if (ubi->ro_mode) {
-		ubi_err("read-only mode");
+		ubi_err(ubi, "read-only mode");
 		return -EROFS;
 	}
 
@@ -608,7 +610,7 @@
 
 		ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
 		if (ret < 0)
-			ubi_err("error %d while checking if PEB %d is bad",
+			ubi_err(ubi, "error %d while checking if PEB %d is bad",
 				ret, pnum);
 		else if (ret)
 			dbg_io("PEB %d is bad", pnum);
@@ -634,7 +636,7 @@
 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
 
 	if (ubi->ro_mode) {
-		ubi_err("read-only mode");
+		ubi_err(ubi, "read-only mode");
 		return -EROFS;
 	}
 
@@ -643,7 +645,7 @@
 
 	err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
 	if (err)
-		ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
+		ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
 	return err;
 }
 
@@ -666,32 +668,32 @@
 	leb_start = be32_to_cpu(ec_hdr->data_offset);
 
 	if (ec_hdr->version != UBI_VERSION) {
-		ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
+		ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
 			UBI_VERSION, (int)ec_hdr->version);
 		goto bad;
 	}
 
 	if (vid_hdr_offset != ubi->vid_hdr_offset) {
-		ubi_err("bad VID header offset %d, expected %d",
+		ubi_err(ubi, "bad VID header offset %d, expected %d",
 			vid_hdr_offset, ubi->vid_hdr_offset);
 		goto bad;
 	}
 
 	if (leb_start != ubi->leb_start) {
-		ubi_err("bad data offset %d, expected %d",
+		ubi_err(ubi, "bad data offset %d, expected %d",
 			leb_start, ubi->leb_start);
 		goto bad;
 	}
 
 	if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
-		ubi_err("bad erase counter %lld", ec);
+		ubi_err(ubi, "bad erase counter %lld", ec);
 		goto bad;
 	}
 
 	return 0;
 
 bad:
-	ubi_err("bad EC header");
+	ubi_err(ubi, "bad EC header");
 	ubi_dump_ec_hdr(ec_hdr);
 	dump_stack();
 	return 1;
@@ -757,7 +759,7 @@
 		if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
 			/* The physical eraseblock is supposedly empty */
 			if (verbose)
-				ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+				ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
 					 pnum);
 			dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
 				pnum);
@@ -772,7 +774,7 @@
 		 * 0xFF bytes. Report that the header is corrupted.
 		 */
 		if (verbose) {
-			ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+			ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
 				 pnum, magic, UBI_EC_HDR_MAGIC);
 			ubi_dump_ec_hdr(ec_hdr);
 		}
@@ -786,7 +788,7 @@
 
 	if (hdr_crc != crc) {
 		if (verbose) {
-			ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+			ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
 				 pnum, crc, hdr_crc);
 			ubi_dump_ec_hdr(ec_hdr);
 		}
@@ -802,7 +804,7 @@
 	/* And of course validate what has just been read from the media */
 	err = validate_ec_hdr(ubi, ec_hdr);
 	if (err) {
-		ubi_err("validation failed for PEB %d", pnum);
+		ubi_err(ubi, "validation failed for PEB %d", pnum);
 		return -EINVAL;
 	}
 
@@ -849,6 +851,9 @@
 	if (err)
 		return err;
 
+	if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+		return -EROFS;
+
 	err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
 	return err;
 }
@@ -876,40 +881,40 @@
 	int usable_leb_size = ubi->leb_size - data_pad;
 
 	if (copy_flag != 0 && copy_flag != 1) {
-		ubi_err("bad copy_flag");
+		ubi_err(ubi, "bad copy_flag");
 		goto bad;
 	}
 
 	if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
 	    data_pad < 0) {
-		ubi_err("negative values");
+		ubi_err(ubi, "negative values");
 		goto bad;
 	}
 
 	if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
-		ubi_err("bad vol_id");
+		ubi_err(ubi, "bad vol_id");
 		goto bad;
 	}
 
 	if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
-		ubi_err("bad compat");
+		ubi_err(ubi, "bad compat");
 		goto bad;
 	}
 
 	if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
 	    compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
 	    compat != UBI_COMPAT_REJECT) {
-		ubi_err("bad compat");
+		ubi_err(ubi, "bad compat");
 		goto bad;
 	}
 
 	if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
-		ubi_err("bad vol_type");
+		ubi_err(ubi, "bad vol_type");
 		goto bad;
 	}
 
 	if (data_pad >= ubi->leb_size / 2) {
-		ubi_err("bad data_pad");
+		ubi_err(ubi, "bad data_pad");
 		goto bad;
 	}
 
@@ -921,45 +926,45 @@
 		 * mapped logical eraseblocks.
 		 */
 		if (used_ebs == 0) {
-			ubi_err("zero used_ebs");
+			ubi_err(ubi, "zero used_ebs");
 			goto bad;
 		}
 		if (data_size == 0) {
-			ubi_err("zero data_size");
+			ubi_err(ubi, "zero data_size");
 			goto bad;
 		}
 		if (lnum < used_ebs - 1) {
 			if (data_size != usable_leb_size) {
-				ubi_err("bad data_size");
+				ubi_err(ubi, "bad data_size");
 				goto bad;
 			}
 		} else if (lnum == used_ebs - 1) {
 			if (data_size == 0) {
-				ubi_err("bad data_size at last LEB");
+				ubi_err(ubi, "bad data_size at last LEB");
 				goto bad;
 			}
 		} else {
-			ubi_err("too high lnum");
+			ubi_err(ubi, "too high lnum");
 			goto bad;
 		}
 	} else {
 		if (copy_flag == 0) {
 			if (data_crc != 0) {
-				ubi_err("non-zero data CRC");
+				ubi_err(ubi, "non-zero data CRC");
 				goto bad;
 			}
 			if (data_size != 0) {
-				ubi_err("non-zero data_size");
+				ubi_err(ubi, "non-zero data_size");
 				goto bad;
 			}
 		} else {
 			if (data_size == 0) {
-				ubi_err("zero data_size of copy");
+				ubi_err(ubi, "zero data_size of copy");
 				goto bad;
 			}
 		}
 		if (used_ebs != 0) {
-			ubi_err("bad used_ebs");
+			ubi_err(ubi, "bad used_ebs");
 			goto bad;
 		}
 	}
@@ -967,7 +972,7 @@
 	return 0;
 
 bad:
-	ubi_err("bad VID header");
+	ubi_err(ubi, "bad VID header");
 	ubi_dump_vid_hdr(vid_hdr);
 	dump_stack();
 	return 1;
@@ -1012,7 +1017,7 @@
 
 		if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
 			if (verbose)
-				ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+				ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
 					 pnum);
 			dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
 				pnum);
@@ -1023,7 +1028,7 @@
 		}
 
 		if (verbose) {
-			ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+			ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
 				 pnum, magic, UBI_VID_HDR_MAGIC);
 			ubi_dump_vid_hdr(vid_hdr);
 		}
@@ -1037,7 +1042,7 @@
 
 	if (hdr_crc != crc) {
 		if (verbose) {
-			ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+			ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
 				 pnum, crc, hdr_crc);
 			ubi_dump_vid_hdr(vid_hdr);
 		}
@@ -1051,7 +1056,7 @@
 
 	err = validate_vid_hdr(ubi, vid_hdr);
 	if (err) {
-		ubi_err("validation failed for PEB %d", pnum);
+		ubi_err(ubi, "validation failed for PEB %d", pnum);
 		return -EINVAL;
 	}
 
@@ -1096,6 +1101,9 @@
 	if (err)
 		return err;
 
+	if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+		return -EROFS;
+
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			   ubi->vid_hdr_alsize);
@@ -1121,7 +1129,7 @@
 	if (!err)
 		return err;
 
-	ubi_err("self-check failed for PEB %d", pnum);
+	ubi_err(ubi, "self-check failed for PEB %d", pnum);
 	dump_stack();
 	return err > 0 ? -EINVAL : err;
 }
@@ -1146,14 +1154,14 @@
 
 	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
-		ubi_err("bad magic %#08x, must be %#08x",
+		ubi_err(ubi, "bad magic %#08x, must be %#08x",
 			magic, UBI_EC_HDR_MAGIC);
 		goto fail;
 	}
 
 	err = validate_ec_hdr(ubi, ec_hdr);
 	if (err) {
-		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_err(ubi, "self-check failed for PEB %d", pnum);
 		goto fail;
 	}
 
@@ -1193,8 +1201,9 @@
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
 	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
 	if (hdr_crc != crc) {
-		ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
-		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
+			crc, hdr_crc);
+		ubi_err(ubi, "self-check failed for PEB %d", pnum);
 		ubi_dump_ec_hdr(ec_hdr);
 		dump_stack();
 		err = -EINVAL;
@@ -1228,21 +1237,21 @@
 
 	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
-		ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
+		ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
 			magic, pnum, UBI_VID_HDR_MAGIC);
 		goto fail;
 	}
 
 	err = validate_vid_hdr(ubi, vid_hdr);
 	if (err) {
-		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_err(ubi, "self-check failed for PEB %d", pnum);
 		goto fail;
 	}
 
 	return err;
 
 fail:
-	ubi_err("self-check failed for PEB %d", pnum);
+	ubi_err(ubi, "self-check failed for PEB %d", pnum);
 	ubi_dump_vid_hdr(vid_hdr);
 	dump_stack();
 	return -EINVAL;
@@ -1280,9 +1289,9 @@
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
 	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
 	if (hdr_crc != crc) {
-		ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+		ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
 			pnum, crc, hdr_crc);
-		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_err(ubi, "self-check failed for PEB %d", pnum);
 		ubi_dump_vid_hdr(vid_hdr);
 		dump_stack();
 		err = -EINVAL;
@@ -1321,7 +1330,7 @@
 
 	buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
 	if (!buf1) {
-		ubi_err("cannot allocate memory to check writes");
+		ubi_err(ubi, "cannot allocate memory to check writes");
 		return 0;
 	}
 
@@ -1339,14 +1348,15 @@
 		if (c == c1)
 			continue;
 
-		ubi_err("self-check failed for PEB %d:%d, len %d",
+		ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
 			pnum, offset, len);
-		ubi_msg("data differ at position %d", i);
-		ubi_msg("hex dump of the original buffer from %d to %d",
+		ubi_msg(ubi, "data differ at position %d", i);
+		dump_len = max_t(int, 128, len - i);
+		ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
 			i, i + dump_len);
 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
 			       buf + i, dump_len, 1);
-		ubi_msg("hex dump of the read buffer from %d to %d",
+		ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
 			i, i + dump_len);
 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
 			       buf1 + i, dump_len, 1);
@@ -1386,20 +1396,20 @@
 
 	buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
 	if (!buf) {
-		ubi_err("cannot allocate memory to check for 0xFFs");
+		ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
 		return 0;
 	}
 
 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
 	if (err && !mtd_is_bitflip(err)) {
-		ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+		ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
 			err, len, pnum, offset, read);
 		goto error;
 	}
 
 	err = ubi_check_pattern(buf, 0xFF, len);
 	if (err == 0) {
-		ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+		ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
 			pnum, offset, len);
 		goto fail;
 	}
@@ -1408,8 +1418,8 @@
 	return 0;
 
 fail:
-	ubi_err("self-check failed for PEB %d", pnum);
-	ubi_msg("hex dump of the %d-%d region", offset, offset + len);
+	ubi_err(ubi, "self-check failed for PEB %d", pnum);
+	ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
 	err = -EINVAL;
 error:
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index fd2bbd6..06b9314 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -132,7 +132,7 @@
 		return ERR_PTR(-EINVAL);
 
 	if (mode != UBI_READONLY && mode != UBI_READWRITE &&
-	    mode != UBI_EXCLUSIVE)
+	    mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
 		return ERR_PTR(-EINVAL);
 
 	/*
@@ -177,10 +177,17 @@
 		break;
 
 	case UBI_EXCLUSIVE:
-		if (vol->exclusive || vol->writers || vol->readers)
+		if (vol->exclusive || vol->writers || vol->readers ||
+		    vol->metaonly)
 			goto out_unlock;
 		vol->exclusive = 1;
 		break;
+
+	case UBI_METAONLY:
+		if (vol->metaonly || vol->exclusive)
+			goto out_unlock;
+		vol->metaonly = 1;
+		break;
 	}
 	get_device(&vol->dev);
 	vol->ref_count += 1;
@@ -199,7 +206,7 @@
 			return ERR_PTR(err);
 		}
 		if (err == 1) {
-			ubi_warn("volume %d on UBI device %d is corrupted",
+			ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
 				 vol_id, ubi->ubi_num);
 			vol->corrupted = 1;
 		}
@@ -216,7 +223,7 @@
 	kfree(desc);
 out_put_ubi:
 	ubi_put_device(ubi);
-	ubi_err("cannot open device %d, volume %d, error %d",
+	ubi_err(ubi, "cannot open device %d, volume %d, error %d",
 		ubi_num, vol_id, err);
 	return ERR_PTR(err);
 }
@@ -303,7 +310,7 @@
 	if (error)
 		return ERR_PTR(error);
 
-	inode = path.dentry->d_inode;
+	inode = d_backing_inode(path.dentry);
 	mod = inode->i_mode;
 	ubi_num = ubi_major2num(imajor(inode));
 	vol_id = iminor(inode) - 1;
@@ -340,6 +347,10 @@
 		break;
 	case UBI_EXCLUSIVE:
 		vol->exclusive = 0;
+		break;
+	case UBI_METAONLY:
+		vol->metaonly = 0;
+		break;
 	}
 	vol->ref_count -= 1;
 	spin_unlock(&ubi->volumes_lock);
@@ -352,6 +363,43 @@
 EXPORT_SYMBOL_GPL(ubi_close_volume);
 
 /**
+ * leb_read_sanity_check - does sanity checks on read requests.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ *
+ * This function is used by ubi_leb_read() and ubi_leb_read_sg()
+ * to perform sanity checks.
+ */
+static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
+				 int offset, int len)
+{
+	struct ubi_volume *vol = desc->vol;
+	struct ubi_device *ubi = vol->ubi;
+	int vol_id = vol->vol_id;
+
+	if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+	    lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+	    offset + len > vol->usable_leb_size)
+		return -EINVAL;
+
+	if (vol->vol_type == UBI_STATIC_VOLUME) {
+		if (vol->used_ebs == 0)
+			/* Empty static UBI volume */
+			return 0;
+		if (lnum == vol->used_ebs - 1 &&
+		    offset + len > vol->last_eb_bytes)
+			return -EINVAL;
+	}
+
+	if (vol->upd_marker)
+		return -EBADF;
+
+	return 0;
+}
+
+/**
  * ubi_leb_read - read data.
  * @desc: volume descriptor
  * @lnum: logical eraseblock number to read from
@@ -387,28 +435,16 @@
 
 	dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
 
-	if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
-	    lnum >= vol->used_ebs || offset < 0 || len < 0 ||
-	    offset + len > vol->usable_leb_size)
-		return -EINVAL;
+	err = leb_read_sanity_check(desc, lnum, offset, len);
+	if (err < 0)
+		return err;
 
-	if (vol->vol_type == UBI_STATIC_VOLUME) {
-		if (vol->used_ebs == 0)
-			/* Empty static UBI volume */
-			return 0;
-		if (lnum == vol->used_ebs - 1 &&
-		    offset + len > vol->last_eb_bytes)
-			return -EINVAL;
-	}
-
-	if (vol->upd_marker)
-		return -EBADF;
 	if (len == 0)
 		return 0;
 
 	err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
 	if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
-		ubi_warn("mark volume %d as corrupted", vol_id);
+		ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
 		vol->corrupted = 1;
 	}
 
@@ -416,6 +452,47 @@
 }
 EXPORT_SYMBOL_GPL(ubi_leb_read);
 
+#ifndef __UBOOT__
+/**
+ * ubi_leb_read_sg - read data into a scatter gather list.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function works exactly like ubi_leb_read_sg(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+		    int offset, int len, int check)
+{
+	struct ubi_volume *vol = desc->vol;
+	struct ubi_device *ubi = vol->ubi;
+	int err, vol_id = vol->vol_id;
+
+	dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+	err = leb_read_sanity_check(desc, lnum, offset, len);
+	if (err < 0)
+		return err;
+
+	if (len == 0)
+		return 0;
+
+	err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
+	if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+		ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+		vol->corrupted = 1;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
+#endif
+
 /**
  * ubi_leb_write - write data.
  * @desc: volume descriptor
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 49530b7..504865b 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -63,6 +63,8 @@
 	for (i = 0; i < vol->used_ebs; i++) {
 		int size;
 
+		cond_resched();
+
 		if (i == vol->used_ebs - 1)
 			size = vol->last_eb_bytes;
 		else
@@ -100,7 +102,7 @@
 	ubi->avail_pebs -= need;
 	ubi->rsvd_pebs += need;
 	ubi->beb_rsvd_pebs += need;
-	ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+	ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
 }
 
 /**
@@ -117,7 +119,7 @@
 	ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
 	if (ubi->beb_rsvd_level < 0) {
 		ubi->beb_rsvd_level = 0;
-		ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+		ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
 			 ubi->bad_peb_count, ubi->bad_peb_limit);
 	}
 }
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 2809805..83345cd 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -395,8 +395,6 @@
 #define UBI_FM_MIN_POOL_SIZE	8
 #define UBI_FM_MAX_POOL_SIZE	256
 
-#define UBI_FM_WL_POOL_SIZE	25
-
 /**
  * struct ubi_fm_sb - UBI fastmap super block
  * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 754b337..540f721 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -43,17 +43,18 @@
 
 /* Normal UBI messages */
 #ifdef CONFIG_UBI_SILENCE_MSG
-#define ubi_msg(fmt, ...)
+#define ubi_msg(ubi, fmt, ...)
 #else
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(ubi, fmt, ...) printk(UBI_NAME_STR "%d: " fmt "\n", \
+					 ubi->ubi_num, ##__VA_ARGS__)
 #endif
 
 /* UBI warning messages */
-#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n",  \
-				   __func__, ##__VA_ARGS__)
+#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
+					ubi->ubi_num, __func__, ##__VA_ARGS__)
 /* UBI error messages */
-#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n",      \
-				 __func__, ##__VA_ARGS__)
+#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
+				      ubi->ubi_num, __func__, ##__VA_ARGS__)
 
 /* Background thread name pattern */
 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
@@ -147,6 +148,17 @@
 	UBI_BAD_FASTMAP,
 };
 
+/*
+ * Flags for emulate_power_cut in ubi_debug_info
+ *
+ * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
+ * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
+ */
+enum {
+	POWER_CUT_EC_WRITE = 0x01,
+	POWER_CUT_VID_WRITE = 0x02,
+};
+
 /**
  * struct ubi_wl_entry - wear-leveling entry.
  * @u.rb: link in the corresponding (free/used) RB-tree
@@ -257,6 +269,7 @@
  * @readers: number of users holding this volume in read-only mode
  * @writers: number of users holding this volume in read-write mode
  * @exclusive: whether somebody holds this volume in exclusive mode
+ * @metaonly: whether somebody is altering only meta data of this volume
  *
  * @reserved_pebs: how many physical eraseblocks are reserved for this volume
  * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -305,6 +318,7 @@
 	int readers;
 	int writers;
 	int exclusive;
+	int metaonly;
 
 	int reserved_pebs;
 	int vol_type;
@@ -339,7 +353,8 @@
 /**
  * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
  * @vol: reference to the corresponding volume description object
- * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
+ * or %UBI_METAONLY)
  */
 struct ubi_volume_desc {
 	struct ubi_volume *vol;
@@ -353,30 +368,48 @@
  *
  * @chk_gen: if UBI general extra checks are enabled
  * @chk_io: if UBI I/O extra checks are enabled
+ * @chk_fastmap: if UBI fastmap extra checks are enabled
  * @disable_bgt: disable the background task for testing purposes
  * @emulate_bitflips: emulate bit-flips for testing purposes
  * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @emulate_power_cut: emulate power cut for testing purposes
+ * @power_cut_counter: count down for writes left until emulated power cut
+ * @power_cut_min: minimum number of writes before emulating a power cut
+ * @power_cut_max: maximum number of writes until emulating a power cut
  * @dfs_dir_name: name of debugfs directory containing files of this UBI device
  * @dfs_dir: direntry object of the UBI device debugfs directory
  * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
  * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
  * @dfs_disable_bgt: debugfs knob to disable the background task
  * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
  * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
+ * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
+ * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
  */
 struct ubi_debug_info {
 	unsigned int chk_gen:1;
 	unsigned int chk_io:1;
+	unsigned int chk_fastmap:1;
 	unsigned int disable_bgt:1;
 	unsigned int emulate_bitflips:1;
 	unsigned int emulate_io_failures:1;
+	unsigned int emulate_power_cut:2;
+	unsigned int power_cut_counter;
+	unsigned int power_cut_min;
+	unsigned int power_cut_max;
 	char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
 	struct dentry *dfs_dir;
 	struct dentry *dfs_chk_gen;
 	struct dentry *dfs_chk_io;
+	struct dentry *dfs_chk_fastmap;
 	struct dentry *dfs_disable_bgt;
 	struct dentry *dfs_emulate_bitflips;
 	struct dentry *dfs_emulate_io_failures;
+	struct dentry *dfs_emulate_power_cut;
+	struct dentry *dfs_power_cut_min;
+	struct dentry *dfs_power_cut_max;
 };
 
 /**
@@ -390,7 +423,8 @@
  * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
  *                @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
  *                @vol->readers, @vol->writers, @vol->exclusive,
- *                @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ *                @vol->metaonly, @vol->ref_count, @vol->mapping and
+ *                @vol->eba_tbl.
  * @ref_count: count of references on the UBI device
  * @image_seq: image sequence number recorded on EC headers
  *
@@ -422,11 +456,13 @@
  * @fm_pool: in-memory data structure of the fastmap pool
  * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
  *		sub-system
- * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
+ * that critical sections cannot be interrupted by ubi_update_fastmap()
  * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
  * @fm_size: fastmap size in bytes
- * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
  * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
  *
  * @used: RB-tree of used physical eraseblocks
  * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -438,9 +474,11 @@
  * @pq_head: protection queue head
  * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
  *	     @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- *	     @erroneous, and @erroneous_peb_count fields
+ *	     @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ *	     and @fm_wl_pool fields
  * @move_mutex: serializes eraseblock moves
- * @work_sem: synchronizes the WL worker with use tasks
+ * @work_sem: used to wait for all the scheduled works to finish and prevent
+ * new works from being submitted
  * @wl_scheduled: non-zero if the wear-leveling was scheduled
  * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
  *             physical eraseblock
@@ -474,7 +512,7 @@
  * @vid_hdr_offset: starting offset of the volume identifier header (might be
  *                  unaligned)
  * @vid_hdr_aloffset: starting offset of the VID header aligned to
- * @hdrs_min_io_size
+ *                    @hdrs_min_io_size
  * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
  * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
  *               not
@@ -527,13 +565,14 @@
 	struct ubi_fastmap_layout *fm;
 	struct ubi_fm_pool fm_pool;
 	struct ubi_fm_pool fm_wl_pool;
-	struct rw_semaphore fm_sem;
-	struct mutex fm_mutex;
+	struct rw_semaphore fm_eba_sem;
+	struct rw_semaphore fm_protect;
 	void *fm_buf;
 	size_t fm_size;
 #ifndef __UBOOT__
 	struct work_struct fm_work;
 #endif
+	int fm_work_scheduled;
 
 	/* Wear-leveling sub-system's stuff */
 	struct rb_root used;
@@ -716,14 +755,15 @@
  * @torture: if the physical eraseblock has to be tortured
  * @anchor: produce a anchor PEB to by used by fastmap
  *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
+ * The @func pointer points to the worker function. If the @shutdown argument is
+ * not zero, the worker has to free the resources and exit immediately as the
+ * WL sub-system is shutting down.
+ * The worker has to return zero in case of success and a negative error code in
  * case of failure.
  */
 struct ubi_work {
 	struct list_head list;
-	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
 	/* The below fields are only relevant to erasure works */
 	struct ubi_wl_entry *e;
 	int vol_id;
@@ -738,7 +778,7 @@
 extern const struct file_operations ubi_ctrl_cdev_operations;
 extern const struct file_operations ubi_cdev_operations;
 extern const struct file_operations ubi_vol_cdev_operations;
-extern struct class *ubi_class;
+extern struct class ubi_class;
 extern struct mutex ubi_devices_mutex;
 extern struct blocking_notifier_head ubi_notifiers;
 
@@ -807,6 +847,9 @@
 		      int lnum);
 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		     void *buf, int offset, int len, int check);
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+			struct ubi_sgl *sgl, int lnum, int offset, int len,
+			int check);
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		      const void *buf, int offset, int len);
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
@@ -877,10 +920,14 @@
 		      int pnum, const struct ubi_vid_hdr *vid_hdr);
 
 /* fastmap.c */
+#ifdef CONFIG_MTD_UBI_FASTMAP
 size_t ubi_calc_fm_size(struct ubi_device *ubi);
 int ubi_update_fastmap(struct ubi_device *ubi);
 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
 		     int fm_anchor);
+#else
+static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+#endif
 
 /* block.c */
 #ifdef CONFIG_MTD_UBI_BLOCK
@@ -901,6 +948,42 @@
 }
 #endif
 
+/*
+ * ubi_for_each_free_peb - walk the UBI free RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_free_peb(ubi, e, tmp_rb)	\
+	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
+
+/*
+ * ubi_for_each_used_peb - walk the UBI used RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_used_peb(ubi, e, tmp_rb)	\
+	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
+
+/*
+ * ubi_for_each_scub_peb - walk the UBI scub RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_scrub_peb(ubi, e, tmp_rb)	\
+	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
+
+/*
+ * ubi_for_each_protected_peb - walk the UBI protection queue.
+ * @ubi: UBI device description object
+ * @i: a integer used as counter
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ */
+#define ubi_for_each_protected_peb(ubi, i, e)	\
+	for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++)	\
+		list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
 
 /*
  * ubi_rb_for_each_entry - walk an RB-tree.
@@ -1004,7 +1087,7 @@
 {
 	if (!ubi->ro_mode) {
 		ubi->ro_mode = 1;
-		ubi_warn("switch to read-only mode");
+		ubi_warn(ubi, "switch to read-only mode");
 		dump_stack();
 	}
 }
@@ -1035,4 +1118,7 @@
 		return idx;
 }
 
+#ifdef __UBOOT__
+int do_work(struct ubi_device *ubi);
+#endif
 #endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index c52c9ce..e0caf8e 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -127,6 +127,10 @@
 	ubi_assert(!vol->updating && !vol->changing_leb);
 	vol->updating = 1;
 
+	vol->upd_buf = vmalloc(ubi->leb_size);
+	if (!vol->upd_buf)
+		return -ENOMEM;
+
 	err = set_update_marker(ubi, vol);
 	if (err)
 		return err;
@@ -146,14 +150,12 @@
 		err = clear_update_marker(ubi, vol, 0);
 		if (err)
 			return err;
+
+		vfree(vol->upd_buf);
 		vol->updating = 0;
 		return 0;
 	}
 
-	vol->upd_buf = vmalloc(ubi->leb_size);
-	if (!vol->upd_buf)
-		return -ENOMEM;
-
 	vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
 			       vol->usable_leb_size);
 	vol->upd_bytes = bytes;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index f4392f5..f8ab08f 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -114,6 +114,19 @@
 	ubi_put_device(ubi);
 	return ret;
 }
+
+static struct attribute *volume_dev_attrs[] = {
+	&attr_vol_reserved_ebs.attr,
+	&attr_vol_type.attr,
+	&attr_vol_name.attr,
+	&attr_vol_corrupted.attr,
+	&attr_vol_alignment.attr,
+	&attr_vol_usable_eb_size.attr,
+	&attr_vol_data_bytes.attr,
+	&attr_vol_upd_marker.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(volume_dev);
 #endif
 
 /* Release method for volume devices */
@@ -125,66 +138,6 @@
 	kfree(vol);
 }
 
-#ifndef __UBOOT__
-/**
- * volume_sysfs_init - initialize sysfs for new volume.
- * @ubi: UBI device description object
- * @vol: volume description object
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, this function does not free allocated resources in case of failure -
- * the caller does it. This is because this would cause release() here and the
- * caller would oops.
- */
-static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
-{
-	int err;
-
-	err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_type);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_name);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_corrupted);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_alignment);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_data_bytes);
-	if (err)
-		return err;
-	err = device_create_file(&vol->dev, &attr_vol_upd_marker);
-	return err;
-}
-
-/**
- * volume_sysfs_close - close sysfs for a volume.
- * @vol: volume description object
- */
-static void volume_sysfs_close(struct ubi_volume *vol)
-{
-	device_remove_file(&vol->dev, &attr_vol_upd_marker);
-	device_remove_file(&vol->dev, &attr_vol_data_bytes);
-	device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
-	device_remove_file(&vol->dev, &attr_vol_alignment);
-	device_remove_file(&vol->dev, &attr_vol_corrupted);
-	device_remove_file(&vol->dev, &attr_vol_name);
-	device_remove_file(&vol->dev, &attr_vol_type);
-	device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
-	device_unregister(&vol->dev);
-}
-#endif
-
 /**
  * ubi_create_volume - create volume.
  * @ubi: UBI device description object
@@ -221,7 +174,7 @@
 			}
 
 		if (vol_id == UBI_VOL_NUM_AUTO) {
-			ubi_err("out of volume IDs");
+			ubi_err(ubi, "out of volume IDs");
 			err = -ENFILE;
 			goto out_unlock;
 		}
@@ -235,7 +188,7 @@
 	/* Ensure that this volume does not exist */
 	err = -EEXIST;
 	if (ubi->volumes[vol_id]) {
-		ubi_err("volume %d already exists", vol_id);
+		ubi_err(ubi, "volume %d already exists", vol_id);
 		goto out_unlock;
 	}
 
@@ -244,20 +197,22 @@
 		if (ubi->volumes[i] &&
 		    ubi->volumes[i]->name_len == req->name_len &&
 		    !strcmp(ubi->volumes[i]->name, req->name)) {
-			ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
+			ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+				req->name, i);
 			goto out_unlock;
 		}
 
 	/* Calculate how many eraseblocks are requested */
 	vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
-	vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
-				      vol->usable_leb_size);
+	vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
+				     vol->usable_leb_size);
 
 	/* Reserve physical eraseblocks */
 	if (vol->reserved_pebs > ubi->avail_pebs) {
-		ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
+		ubi_err(ubi, "not enough PEBs, only %d available",
+			ubi->avail_pebs);
 		if (ubi->corr_peb_count)
-			ubi_err("%d PEBs are corrupted and not used",
+			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
 		err = -ENOSPC;
 		goto out_unlock;
@@ -312,26 +267,25 @@
 	dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
 	err = cdev_add(&vol->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device");
+		ubi_err(ubi, "cannot add character device");
 		goto out_mapping;
 	}
 
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
 	vol->dev.devt = dev;
-	vol->dev.class = ubi_class;
+#ifndef __UBOOT__
+	vol->dev.class = &ubi_class;
+	vol->dev.groups = volume_dev_groups;
+#endif
 
 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
 	if (err) {
-		ubi_err("cannot register device");
+		ubi_err(ubi, "cannot register device");
 		goto out_cdev;
 	}
 
-	err = volume_sysfs_init(ubi, vol);
-	if (err)
-		goto out_sysfs;
-
 	/* Fill volume table record */
 	memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
 	vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
@@ -368,7 +322,7 @@
 	 */
 	do_free = 0;
 	get_device(&vol->dev);
-	volume_sysfs_close(vol);
+	device_unregister(&vol->dev);
 out_cdev:
 	cdev_del(&vol->cdev);
 out_mapping:
@@ -384,7 +338,7 @@
 		kfree(vol);
 	else
 		put_device(&vol->dev);
-	ubi_err("cannot create volume %d, error %d", vol_id, err);
+	ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
 	return err;
 }
 
@@ -436,7 +390,7 @@
 	}
 
 	cdev_del(&vol->cdev);
-	volume_sysfs_close(vol);
+	device_unregister(&vol->dev);
 
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= reserved_pebs;
@@ -452,7 +406,7 @@
 	return err;
 
 out_err:
-	ubi_err("cannot remove volume %d, error %d", vol_id, err);
+	ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
 	spin_lock(&ubi->volumes_lock);
 	ubi->volumes[vol_id] = vol;
 out_unlock:
@@ -485,7 +439,7 @@
 
 	if (vol->vol_type == UBI_STATIC_VOLUME &&
 	    reserved_pebs < vol->used_ebs) {
-		ubi_err("too small size %d, %d LEBs contain data",
+		ubi_err(ubi, "too small size %d, %d LEBs contain data",
 			reserved_pebs, vol->used_ebs);
 		return -EINVAL;
 	}
@@ -514,10 +468,10 @@
 	if (pebs > 0) {
 		spin_lock(&ubi->volumes_lock);
 		if (pebs > ubi->avail_pebs) {
-			ubi_err("not enough PEBs: requested %d, available %d",
+			ubi_err(ubi, "not enough PEBs: requested %d, available %d",
 				pebs, ubi->avail_pebs);
 			if (ubi->corr_peb_count)
-				ubi_err("%d PEBs are corrupted and not used",
+				ubi_err(ubi, "%d PEBs are corrupted and not used",
 					ubi->corr_peb_count);
 			spin_unlock(&ubi->volumes_lock);
 			err = -ENOSPC;
@@ -641,7 +595,7 @@
 	dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
 	err = cdev_add(&vol->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device for volume %d, error %d",
+		ubi_err(ubi, "cannot add character device for volume %d, error %d",
 			vol_id, err);
 		return err;
 	}
@@ -649,19 +603,15 @@
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
 	vol->dev.devt = dev;
-	vol->dev.class = ubi_class;
+#ifndef __UBOOT__
+	vol->dev.class = &ubi_class;
+	vol->dev.groups = volume_dev_groups;
+#endif
 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
 	if (err)
 		goto out_cdev;
 
-	err = volume_sysfs_init(ubi, vol);
-	if (err) {
-		cdev_del(&vol->cdev);
-		volume_sysfs_close(vol);
-		return err;
-	}
-
 	self_check_volumes(ubi);
 	return err;
 
@@ -684,7 +634,7 @@
 
 	ubi->volumes[vol->vol_id] = NULL;
 	cdev_del(&vol->cdev);
-	volume_sysfs_close(vol);
+	device_unregister(&vol->dev);
 }
 
 /**
@@ -708,7 +658,7 @@
 
 	if (!vol) {
 		if (reserved_pebs) {
-			ubi_err("no volume info, but volume exists");
+			ubi_err(ubi, "no volume info, but volume exists");
 			goto fail;
 		}
 		spin_unlock(&ubi->volumes_lock);
@@ -717,90 +667,91 @@
 
 	if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
 	    vol->name_len < 0) {
-		ubi_err("negative values");
+		ubi_err(ubi, "negative values");
 		goto fail;
 	}
 	if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
-		ubi_err("bad alignment");
+		ubi_err(ubi, "bad alignment");
 		goto fail;
 	}
 
 	n = vol->alignment & (ubi->min_io_size - 1);
 	if (vol->alignment != 1 && n) {
-		ubi_err("alignment is not multiple of min I/O unit");
+		ubi_err(ubi, "alignment is not multiple of min I/O unit");
 		goto fail;
 	}
 
 	n = ubi->leb_size % vol->alignment;
 	if (vol->data_pad != n) {
-		ubi_err("bad data_pad, has to be %lld", n);
+		ubi_err(ubi, "bad data_pad, has to be %lld", n);
 		goto fail;
 	}
 
 	if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
 	    vol->vol_type != UBI_STATIC_VOLUME) {
-		ubi_err("bad vol_type");
+		ubi_err(ubi, "bad vol_type");
 		goto fail;
 	}
 
 	if (vol->upd_marker && vol->corrupted) {
-		ubi_err("update marker and corrupted simultaneously");
+		ubi_err(ubi, "update marker and corrupted simultaneously");
 		goto fail;
 	}
 
 	if (vol->reserved_pebs > ubi->good_peb_count) {
-		ubi_err("too large reserved_pebs");
+		ubi_err(ubi, "too large reserved_pebs");
 		goto fail;
 	}
 
 	n = ubi->leb_size - vol->data_pad;
 	if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
-		ubi_err("bad usable_leb_size, has to be %lld", n);
+		ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
 		goto fail;
 	}
 
 	if (vol->name_len > UBI_VOL_NAME_MAX) {
-		ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
+		ubi_err(ubi, "too long volume name, max is %d",
+			UBI_VOL_NAME_MAX);
 		goto fail;
 	}
 
 	n = strnlen(vol->name, vol->name_len + 1);
 	if (n != vol->name_len) {
-		ubi_err("bad name_len %lld", n);
+		ubi_err(ubi, "bad name_len %lld", n);
 		goto fail;
 	}
 
 	n = (long long)vol->used_ebs * vol->usable_leb_size;
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 		if (vol->corrupted) {
-			ubi_err("corrupted dynamic volume");
+			ubi_err(ubi, "corrupted dynamic volume");
 			goto fail;
 		}
 		if (vol->used_ebs != vol->reserved_pebs) {
-			ubi_err("bad used_ebs");
+			ubi_err(ubi, "bad used_ebs");
 			goto fail;
 		}
 		if (vol->last_eb_bytes != vol->usable_leb_size) {
-			ubi_err("bad last_eb_bytes");
+			ubi_err(ubi, "bad last_eb_bytes");
 			goto fail;
 		}
 		if (vol->used_bytes != n) {
-			ubi_err("bad used_bytes");
+			ubi_err(ubi, "bad used_bytes");
 			goto fail;
 		}
 	} else {
 		if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
-			ubi_err("bad used_ebs");
+			ubi_err(ubi, "bad used_ebs");
 			goto fail;
 		}
 		if (vol->last_eb_bytes < 0 ||
 		    vol->last_eb_bytes > vol->usable_leb_size) {
-			ubi_err("bad last_eb_bytes");
+			ubi_err(ubi, "bad last_eb_bytes");
 			goto fail;
 		}
 		if (vol->used_bytes < 0 || vol->used_bytes > n ||
 		    vol->used_bytes < n - vol->usable_leb_size) {
-			ubi_err("bad used_bytes");
+			ubi_err(ubi, "bad used_bytes");
 			goto fail;
 		}
 	}
@@ -818,7 +769,7 @@
 	if (alignment != vol->alignment || data_pad != vol->data_pad ||
 	    upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
 	    name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
-		ubi_err("volume info is different");
+		ubi_err(ubi, "volume info is different");
 		goto fail;
 	}
 
@@ -826,7 +777,7 @@
 	return 0;
 
 fail:
-	ubi_err("self-check failed for volume %d", vol_id);
+	ubi_err(ubi, "self-check failed for volume %d", vol_id);
 	if (vol)
 		ubi_dump_vol_info(vol);
 	ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index ae8ea38..993716f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -18,9 +18,12 @@
  * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
  * other. This redundancy guarantees robustness to unclean reboots. The volume
  * table is basically an array of volume table records. Each record contains
- * full information about the volume and protected by a CRC checksum.
+ * full information about the volume and protected by a CRC checksum. Note,
+ * nowadays we use the atomic LEB change operation when updating the volume
+ * table, so we do not really need 2 LEBs anymore, but we preserve the older
+ * design for the backward compatibility reasons.
  *
- * The volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
  * erased, and the updated volume table is written back to LEB 0. Then same for
  * LEB 1. This scheme guarantees recoverability from unclean reboots.
  *
@@ -61,6 +64,26 @@
 static struct ubi_vtbl_record empty_vtbl_record;
 
 /**
+ * ubi_update_layout_vol - helper for updatting layout volumes on flash
+ * @ubi: UBI device description object
+ */
+static int ubi_update_layout_vol(struct ubi_device *ubi)
+{
+	struct ubi_volume *layout_vol;
+	int i, err;
+
+	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+		err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+						ubi->vtbl_size);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/**
  * ubi_change_vtbl_record - change volume table record.
  * @ubi: UBI device description object
  * @idx: table index to change
@@ -74,12 +97,10 @@
 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
 			   struct ubi_vtbl_record *vtbl_rec)
 {
-	int i, err;
+	int err;
 	uint32_t crc;
-	struct ubi_volume *layout_vol;
 
 	ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
-	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
 
 	if (!vtbl_rec)
 		vtbl_rec = &empty_vtbl_record;
@@ -89,19 +110,10 @@
 	}
 
 	memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
-	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
-		err = ubi_eba_unmap_leb(ubi, layout_vol, i);
-		if (err)
-			return err;
-
-		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
-					ubi->vtbl_size);
-		if (err)
-			return err;
-	}
+	err = ubi_update_layout_vol(ubi);
 
 	self_vtbl_check(ubi);
-	return 0;
+	return err ? err : 0;
 }
 
 /**
@@ -116,9 +128,7 @@
 int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
 			    struct list_head *rename_list)
 {
-	int i, err;
 	struct ubi_rename_entry *re;
-	struct ubi_volume *layout_vol;
 
 	list_for_each_entry(re, rename_list, list) {
 		uint32_t crc;
@@ -140,19 +150,7 @@
 		vtbl_rec->crc = cpu_to_be32(crc);
 	}
 
-	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
-	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
-		err = ubi_eba_unmap_leb(ubi, layout_vol, i);
-		if (err)
-			return err;
-
-		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
-					ubi->vtbl_size);
-		if (err)
-			return err;
-	}
-
-	return 0;
+	return ubi_update_layout_vol(ubi);
 }
 
 /**
@@ -184,7 +182,7 @@
 
 		crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
 		if (be32_to_cpu(vtbl[i].crc) != crc) {
-			ubi_err("bad CRC at record %u: %#08x, not %#08x",
+			ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
 				 i, crc, be32_to_cpu(vtbl[i].crc));
 			ubi_dump_vtbl_record(&vtbl[i], i);
 			return 1;
@@ -218,7 +216,7 @@
 
 		n = ubi->leb_size % alignment;
 		if (data_pad != n) {
-			ubi_err("bad data_pad, has to be %d", n);
+			ubi_err(ubi, "bad data_pad, has to be %d", n);
 			err = 6;
 			goto bad;
 		}
@@ -234,7 +232,7 @@
 		}
 
 		if (reserved_pebs > ubi->good_peb_count) {
-			ubi_err("too large reserved_pebs %d, good PEBs %d",
+			ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
 				reserved_pebs, ubi->good_peb_count);
 			err = 9;
 			goto bad;
@@ -268,7 +266,7 @@
 #else
 			    !strncmp((char *)vtbl[i].name, vtbl[n].name, len1)) {
 #endif
-				ubi_err("volumes %d and %d have the same name \"%s\"",
+				ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
 					i, n, vtbl[i].name);
 				ubi_dump_vtbl_record(&vtbl[i], i);
 				ubi_dump_vtbl_record(&vtbl[n], n);
@@ -280,7 +278,7 @@
 	return 0;
 
 bad:
-	ubi_err("volume table check failed: record %d, error %d", i, err);
+	ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
 	ubi_dump_vtbl_record(&vtbl[i], i);
 	return -EINVAL;
 }
@@ -444,11 +442,11 @@
 			leb_corrupted[1] = memcmp(leb[0], leb[1],
 						  ubi->vtbl_size);
 		if (leb_corrupted[1]) {
-			ubi_warn("volume table copy #2 is corrupted");
+			ubi_warn(ubi, "volume table copy #2 is corrupted");
 			err = create_vtbl(ubi, ai, 1, leb[0]);
 			if (err)
 				goto out_free;
-			ubi_msg("volume table was restored");
+			ubi_msg(ubi, "volume table was restored");
 		}
 
 		/* Both LEB 1 and LEB 2 are OK and consistent */
@@ -463,15 +461,15 @@
 		}
 		if (leb_corrupted[1]) {
 			/* Both LEB 0 and LEB 1 are corrupted */
-			ubi_err("both volume tables are corrupted");
+			ubi_err(ubi, "both volume tables are corrupted");
 			goto out_free;
 		}
 
-		ubi_warn("volume table copy #1 is corrupted");
+		ubi_warn(ubi, "volume table copy #1 is corrupted");
 		err = create_vtbl(ubi, ai, 0, leb[1]);
 		if (err)
 			goto out_free;
-		ubi_msg("volume table was restored");
+		ubi_msg(ubi, "volume table was restored");
 
 		vfree(leb[0]);
 		return leb[1];
@@ -560,7 +558,7 @@
 		if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
 			/* Auto re-size flag may be set only for one volume */
 			if (ubi->autoresize_vol_id != -1) {
-				ubi_err("more than one auto-resize volume (%d and %d)",
+				ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
 					ubi->autoresize_vol_id, i);
 				kfree(vol);
 				return -EINVAL;
@@ -589,7 +587,7 @@
 
 		/* Static volumes only */
 		av = ubi_find_av(ai, i);
-		if (!av) {
+		if (!av || !av->leb_count) {
 			/*
 			 * No eraseblocks belonging to this volume found. We
 			 * don't actually know whether this static volume is
@@ -606,7 +604,7 @@
 			 * We found a static volume which misses several
 			 * eraseblocks. Treat it as corrupted.
 			 */
-			ubi_warn("static volume %d misses %d LEBs - corrupted",
+			ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
 				 av->vol_id, av->used_ebs - av->leb_count);
 			vol->corrupted = 1;
 			continue;
@@ -644,10 +642,10 @@
 	vol->ubi = ubi;
 
 	if (reserved_pebs > ubi->avail_pebs) {
-		ubi_err("not enough PEBs, required %d, available %d",
+		ubi_err(ubi, "not enough PEBs, required %d, available %d",
 			reserved_pebs, ubi->avail_pebs);
 		if (ubi->corr_peb_count)
-			ubi_err("%d PEBs are corrupted and not used",
+			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
 	}
 	ubi->rsvd_pebs += reserved_pebs;
@@ -692,7 +690,7 @@
 	return 0;
 
 bad:
-	ubi_err("bad attaching information, error %d", err);
+	ubi_err(vol->ubi, "bad attaching information, error %d", err);
 	ubi_dump_av(av);
 	ubi_dump_vol_info(vol);
 	return -EINVAL;
@@ -716,14 +714,15 @@
 	struct ubi_volume *vol;
 
 	if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
-		ubi_err("found %d volumes while attaching, maximum is %d + %d",
+		ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
 			ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
 		return -EINVAL;
 	}
 
 	if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
 	    ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
-		ubi_err("too large volume ID %d found", ai->highest_vol_id);
+		ubi_err(ubi, "too large volume ID %d found",
+			ai->highest_vol_id);
 		return -EINVAL;
 	}
 
@@ -751,7 +750,7 @@
 			 * reboot while the volume was being removed. Discard
 			 * these eraseblocks.
 			 */
-			ubi_msg("finish volume %d removal", av->vol_id);
+			ubi_msg(ubi, "finish volume %d removal", av->vol_id);
 			ubi_remove_av(ai, av);
 		} else if (av) {
 			err = check_av(vol, av);
@@ -805,13 +804,13 @@
 			if (IS_ERR(ubi->vtbl))
 				return PTR_ERR(ubi->vtbl);
 		} else {
-			ubi_err("the layout volume was not found");
+			ubi_err(ubi, "the layout volume was not found");
 			return -EINVAL;
 		}
 	} else {
 		if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
 			/* This must not happen with proper UBI images */
-			ubi_err("too many LEBs (%d) in layout volume",
+			ubi_err(ubi, "too many LEBs (%d) in layout volume",
 				av->leb_count);
 			return -EINVAL;
 		}
@@ -860,7 +859,7 @@
 		return;
 
 	if (vtbl_check(ubi, ubi->vtbl)) {
-		ubi_err("self-check failed");
+		ubi_err(ubi, "self-check failed");
 		BUG();
 	}
 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6886f89..507b091 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -96,6 +96,7 @@
 #endif
 
 #include "ubi.h"
+#include "wl.h"
 
 /* Number of physical eraseblocks reserved for wear-leveling purposes */
 #define WL_RESERVED_PEBS 1
@@ -133,44 +134,6 @@
 static int self_check_in_pq(const struct ubi_device *ubi,
 			    struct ubi_wl_entry *e);
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
-#ifndef __UBOOT__
-/**
- * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
- * @wrk: the work description object
- */
-static void update_fastmap_work_fn(struct work_struct *wrk)
-{
-	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
-	ubi_update_fastmap(ubi);
-}
-#endif
-
-/**
- *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
- *  @ubi: UBI device description object
- *  @pnum: the to be checked PEB
- */
-static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
-{
-	int i;
-
-	if (!ubi->fm)
-		return 0;
-
-	for (i = 0; i < ubi->fm->used_blocks; i++)
-		if (ubi->fm->e[i]->pnum == pnum)
-			return 1;
-
-	return 0;
-}
-#else
-static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
-{
-	return 0;
-}
-#endif
-
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
@@ -208,13 +171,31 @@
 }
 
 /**
+ * wl_tree_destroy - destroy a wear-leveling entry.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to add
+ *
+ * This function destroys a wear leveling entry and removes
+ * the reference from the lookup table.
+ */
+static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+	ubi->lookuptbl[e->pnum] = NULL;
+	kmem_cache_free(ubi_wl_entry_slab, e);
+}
+
+/**
  * do_work - do one pending work.
  * @ubi: UBI device description object
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
+#ifndef __UBOOT__
 static int do_work(struct ubi_device *ubi)
+#else
+int do_work(struct ubi_device *ubi)
+#endif
 {
 	int err;
 	struct ubi_work *wrk;
@@ -248,40 +229,13 @@
 	 */
 	err = wrk->func(ubi, wrk, 0);
 	if (err)
-		ubi_err("work failed with error code %d", err);
+		ubi_err(ubi, "work failed with error code %d", err);
 	up_read(&ubi->work_sem);
 
 	return err;
 }
 
 /**
- * produce_free_peb - produce a free physical eraseblock.
- * @ubi: UBI device description object
- *
- * This function tries to make a free PEB by means of synchronous execution of
- * pending works. This may be needed if, for example the background thread is
- * disabled. Returns zero in case of success and a negative error code in case
- * of failure.
- */
-static int produce_free_peb(struct ubi_device *ubi)
-{
-	int err;
-
-	while (!ubi->free.rb_node) {
-		spin_unlock(&ubi->wl_lock);
-
-		dbg_wl("do one work synchronously");
-		err = do_work(ubi);
-
-		spin_lock(&ubi->wl_lock);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-/**
  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
  * @e: the wear-leveling entry to check
  * @root: the root of the tree
@@ -404,119 +358,32 @@
 	if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
 		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
 		/* If no fastmap has been written and this WL entry can be used
 		 * as anchor PEB, hold it back and return the second best
 		 * WL entry such that fastmap can use the anchor PEB later. */
-		if (e && !ubi->fm_disabled && !ubi->fm &&
-		    e->pnum < UBI_FM_MAX_START)
-			e = rb_entry(rb_next(root->rb_node),
-				     struct ubi_wl_entry, u.rb);
-#endif
+		e = may_reserve_for_fm(ubi, e, root);
 	} else
 		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
 
 	return e;
 }
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
 /**
- * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
- * @root: the RB-tree where to look for
- */
-static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
-{
-	struct rb_node *p;
-	struct ubi_wl_entry *e, *victim = NULL;
-	int max_ec = UBI_MAX_ERASECOUNTER;
-
-	ubi_rb_for_each_entry(p, e, root, u.rb) {
-		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
-			victim = e;
-			max_ec = e->ec;
-		}
-	}
-
-	return victim;
-}
-
-static int anchor_pebs_avalible(struct rb_root *root)
-{
-	struct rb_node *p;
-	struct ubi_wl_entry *e;
-
-	ubi_rb_for_each_entry(p, e, root, u.rb)
-		if (e->pnum < UBI_FM_MAX_START)
-			return 1;
-
-	return 0;
-}
-
-/**
- * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
- * @ubi: UBI device description object
- * @anchor: This PEB will be used as anchor PEB by fastmap
- *
- * The function returns a physical erase block with a given maximal number
- * and removes it from the wl subsystem.
- * Must be called with wl_lock held!
- */
-struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
-{
-	struct ubi_wl_entry *e = NULL;
-
-	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
-		goto out;
-
-	if (anchor)
-		e = find_anchor_wl_entry(&ubi->free);
-	else
-		e = find_mean_wl_entry(ubi, &ubi->free);
-
-	if (!e)
-		goto out;
-
-	self_check_in_wl_tree(ubi, e, &ubi->free);
-
-	/* remove it from the free list,
-	 * the wl subsystem does no longer know this erase block */
-	rb_erase(&e->u.rb, &ubi->free);
-	ubi->free_count--;
-out:
-	return e;
-}
-#endif
-
-/**
- * __wl_get_peb - get a physical eraseblock.
+ * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
+ * refill_wl_user_pool().
  * @ubi: UBI device description object
  *
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure.
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
  */
-static int __wl_get_peb(struct ubi_device *ubi)
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
 {
-	int err;
 	struct ubi_wl_entry *e;
 
-retry:
-	if (!ubi->free.rb_node) {
-		if (ubi->works_count == 0) {
-			ubi_err("no free eraseblocks");
-			ubi_assert(list_empty(&ubi->works));
-			return -ENOSPC;
-		}
-
-		err = produce_free_peb(ubi);
-		if (err < 0)
-			return err;
-		goto retry;
-	}
-
 	e = find_mean_wl_entry(ubi, &ubi->free);
 	if (!e) {
-		ubi_err("no free eraseblocks");
-		return -ENOSPC;
+		ubi_err(ubi, "no free eraseblocks");
+		return NULL;
 	}
 
 	self_check_in_wl_tree(ubi, e, &ubi->free);
@@ -528,178 +395,10 @@
 	rb_erase(&e->u.rb, &ubi->free);
 	ubi->free_count--;
 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
-#ifndef CONFIG_MTD_UBI_FASTMAP
-	/* We have to enqueue e only if fastmap is disabled,
-	 * is fastmap enabled prot_queue_add() will be called by
-	 * ubi_wl_get_peb() after removing e from the pool. */
-	prot_queue_add(ubi, e);
-#endif
-	return e->pnum;
-}
-
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * return_unused_pool_pebs - returns unused PEB to the free tree.
- * @ubi: UBI device description object
- * @pool: fastmap pool description object
- */
-static void return_unused_pool_pebs(struct ubi_device *ubi,
-				    struct ubi_fm_pool *pool)
-{
-	int i;
-	struct ubi_wl_entry *e;
-
-	for (i = pool->used; i < pool->size; i++) {
-		e = ubi->lookuptbl[pool->pebs[i]];
-		wl_tree_add(e, &ubi->free);
-		ubi->free_count++;
-	}
-}
-
-/**
- * refill_wl_pool - refills all the fastmap pool used by the
- * WL sub-system.
- * @ubi: UBI device description object
- */
-static void refill_wl_pool(struct ubi_device *ubi)
-{
-	struct ubi_wl_entry *e;
-	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
-
-	return_unused_pool_pebs(ubi, pool);
-
-	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
-		if (!ubi->free.rb_node ||
-		   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
-			break;
-
-		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
-		self_check_in_wl_tree(ubi, e, &ubi->free);
-		rb_erase(&e->u.rb, &ubi->free);
-		ubi->free_count--;
-
-		pool->pebs[pool->size] = e->pnum;
-	}
-	pool->used = 0;
-}
-
-/**
- * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
- * @ubi: UBI device description object
- */
-static void refill_wl_user_pool(struct ubi_device *ubi)
-{
-	struct ubi_fm_pool *pool = &ubi->fm_pool;
-
-	return_unused_pool_pebs(ubi, pool);
-
-	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
-		pool->pebs[pool->size] = __wl_get_peb(ubi);
-		if (pool->pebs[pool->size] < 0)
-			break;
-	}
-	pool->used = 0;
-}
-
-/**
- * ubi_refill_pools - refills all fastmap PEB pools.
- * @ubi: UBI device description object
- */
-void ubi_refill_pools(struct ubi_device *ubi)
-{
-	spin_lock(&ubi->wl_lock);
-	refill_wl_pool(ubi);
-	refill_wl_user_pool(ubi);
-	spin_unlock(&ubi->wl_lock);
-}
-
-/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
- * the fastmap pool.
- */
-int ubi_wl_get_peb(struct ubi_device *ubi)
-{
-	int ret;
-	struct ubi_fm_pool *pool = &ubi->fm_pool;
-	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
-
-	if (!pool->size || !wl_pool->size || pool->used == pool->size ||
-	    wl_pool->used == wl_pool->size)
-		ubi_update_fastmap(ubi);
-
-	/* we got not a single free PEB */
-	if (!pool->size)
-		ret = -ENOSPC;
-	else {
-		spin_lock(&ubi->wl_lock);
-		ret = pool->pebs[pool->used++];
-		prot_queue_add(ubi, ubi->lookuptbl[ret]);
-		spin_unlock(&ubi->wl_lock);
-	}
-
-	return ret;
-}
-
-/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
- *
- * @ubi: UBI device description object
- */
-static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
-{
-	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
-	int pnum;
-
-	if (pool->used == pool->size || !pool->size) {
-		/* We cannot update the fastmap here because this
-		 * function is called in atomic context.
-		 * Let's fail here and refill/update it as soon as possible. */
-#ifndef __UBOOT__
-		schedule_work(&ubi->fm_work);
-#else
-		/* In U-Boot we must call this directly */
-	        ubi_update_fastmap(ubi);
-#endif
-		return NULL;
-	} else {
-		pnum = pool->pebs[pool->used++];
-		return ubi->lookuptbl[pnum];
-	}
-}
-#else
-static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
-{
-	struct ubi_wl_entry *e;
-
-	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
-	self_check_in_wl_tree(ubi, e, &ubi->free);
-	ubi->free_count--;
-	ubi_assert(ubi->free_count >= 0);
-	rb_erase(&e->u.rb, &ubi->free);
 
 	return e;
 }
 
-int ubi_wl_get_peb(struct ubi_device *ubi)
-{
-	int peb, err;
-
-	spin_lock(&ubi->wl_lock);
-	peb = __wl_get_peb(ubi);
-	spin_unlock(&ubi->wl_lock);
-
-	if (peb < 0)
-		return peb;
-
-	err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
-				    ubi->peb_size - ubi->vid_hdr_aloffset);
-	if (err) {
-		ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
-		return err;
-	}
-
-	return peb;
-}
-#endif
-
 /**
  * prot_queue_del - remove a physical eraseblock from the protection queue.
  * @ubi: UBI device description object
@@ -760,7 +459,7 @@
 		 * Erase counter overflow. Upgrade UBI and use 64-bit
 		 * erase counters internally.
 		 */
-		ubi_err("erase counter overflow at PEB %d, EC %llu",
+		ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
 			e->pnum, ec);
 		err = -EINVAL;
 		goto out_free;
@@ -835,7 +534,7 @@
  * @wrk: the work to schedule
  *
  * This function adds a work defined by @wrk to the tail of the pending works
- * list. Can only be used of ubi->work_sem is already held in read mode!
+ * list. Can only be used if ubi->work_sem is already held in read mode!
  */
 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
 {
@@ -847,11 +546,16 @@
 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
 		wake_up_process(ubi->bgt_thread);
 #else
+	int err;
 	/*
 	 * U-Boot special: We have no bgt_thread in U-Boot!
 	 * So just call do_work() here directly.
 	 */
-	do_work(ubi);
+	err = do_work(ubi);
+	if (err) {
+		ubi_err(ubi, "%s: work failed with error code %d",
+			ubi->bgt_name, err);
+	}
 #endif
 	spin_unlock(&ubi->wl_lock);
 }
@@ -872,18 +576,7 @@
 }
 
 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
-			int cancel);
-
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_is_erase_work - checks whether a work is erase work.
- * @wrk: The work object to be checked
- */
-int ubi_is_erase_work(struct ubi_work *wrk)
-{
-	return wrk->func == erase_worker;
-}
-#endif
+			int shutdown);
 
 /**
  * schedule_erase - schedule an erase work.
@@ -902,7 +595,6 @@
 	struct ubi_work *wl_wrk;
 
 	ubi_assert(e);
-	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 
 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
 	       e->pnum, e->ec, torture);
@@ -949,66 +641,22 @@
 	return erase_worker(ubi, wl_wrk, 0);
 }
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
- * sub-system.
- * see: ubi_wl_put_peb()
- *
- * @ubi: UBI device description object
- * @fm_e: physical eraseblock to return
- * @lnum: the last used logical eraseblock number for the PEB
- * @torture: if this physical eraseblock has to be tortured
- */
-int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
-		      int lnum, int torture)
-{
-	struct ubi_wl_entry *e;
-	int vol_id, pnum = fm_e->pnum;
-
-	dbg_wl("PEB %d", pnum);
-
-	ubi_assert(pnum >= 0);
-	ubi_assert(pnum < ubi->peb_count);
-
-	spin_lock(&ubi->wl_lock);
-	e = ubi->lookuptbl[pnum];
-
-	/* This can happen if we recovered from a fastmap the very
-	 * first time and writing now a new one. In this case the wl system
-	 * has never seen any PEB used by the original fastmap.
-	 */
-	if (!e) {
-		e = fm_e;
-		ubi_assert(e->ec >= 0);
-		ubi->lookuptbl[pnum] = e;
-	} else {
-		e->ec = fm_e->ec;
-		kfree(fm_e);
-	}
-
-	spin_unlock(&ubi->wl_lock);
-
-	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
-	return schedule_erase(ubi, e, vol_id, lnum, torture);
-}
-#endif
-
 /**
  * wear_leveling_worker - wear-leveling worker function.
  * @ubi: UBI device description object
  * @wrk: the work object
- * @cancel: non-zero if the worker has to free memory and exit
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL-subsystem is shutting down
  *
  * This function copies a more worn out physical eraseblock to a less worn out
  * one. Returns zero in case of success and a negative error code in case of
  * failure.
  */
 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
-				int cancel)
+				int shutdown)
 {
 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
-	int vol_id = -1, uninitialized_var(lnum);
+	int vol_id = -1, lnum = -1;
 #ifdef CONFIG_MTD_UBI_FASTMAP
 	int anchor = wrk->anchor;
 #endif
@@ -1016,7 +664,7 @@
 	struct ubi_vid_hdr *vid_hdr;
 
 	kfree(wrk);
-	if (cancel)
+	if (shutdown)
 		return 0;
 
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1144,7 +792,7 @@
 			goto out_not_moved;
 		}
 
-		ubi_err("error %d while reading VID header from PEB %d",
+		ubi_err(ubi, "error %d while reading VID header from PEB %d",
 			err, e1->pnum);
 		goto out_error;
 	}
@@ -1188,7 +836,7 @@
 			 * UBI from trying to move it over and over again.
 			 */
 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
-				ubi_err("too many erroneous eraseblocks (%d)",
+				ubi_err(ubi, "too many erroneous eraseblocks (%d)",
 					ubi->erroneous_peb_count);
 				goto out_error;
 			}
@@ -1204,7 +852,7 @@
 
 	/* The PEB has been successfully moved */
 	if (scrubbing)
-		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
 			e1->pnum, vol_id, lnum, e2->pnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 
@@ -1219,9 +867,8 @@
 
 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
 	if (err) {
-		kmem_cache_free(ubi_wl_entry_slab, e1);
 		if (e2)
-			kmem_cache_free(ubi_wl_entry_slab, e2);
+			wl_entry_destroy(ubi, e2);
 		goto out_ro;
 	}
 
@@ -1233,10 +880,8 @@
 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
 		       e2->pnum, vol_id, lnum);
 		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
-		if (err) {
-			kmem_cache_free(ubi_wl_entry_slab, e2);
+		if (err)
 			goto out_ro;
-		}
 	}
 
 	dbg_wl("done");
@@ -1272,19 +917,18 @@
 
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
-	if (err) {
-		kmem_cache_free(ubi_wl_entry_slab, e2);
+	if (err)
 		goto out_ro;
-	}
+
 	mutex_unlock(&ubi->move_mutex);
 	return 0;
 
 out_error:
 	if (vol_id != -1)
-		ubi_err("error %d while moving PEB %d to PEB %d",
+		ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
 			err, e1->pnum, e2->pnum);
 	else
-		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+		ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
 			err, e1->pnum, vol_id, lnum, e2->pnum);
 	spin_lock(&ubi->wl_lock);
 	ubi->move_from = ubi->move_to = NULL;
@@ -1292,8 +936,8 @@
 	spin_unlock(&ubi->wl_lock);
 
 	ubi_free_vid_hdr(ubi, vid_hdr);
-	kmem_cache_free(ubi_wl_entry_slab, e1);
-	kmem_cache_free(ubi_wl_entry_slab, e2);
+	wl_entry_destroy(ubi, e1);
+	wl_entry_destroy(ubi, e2);
 
 out_ro:
 	ubi_ro_mode(ubi);
@@ -1379,43 +1023,12 @@
 	return err;
 }
 
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
- * @ubi: UBI device description object
- */
-int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
-{
-	struct ubi_work *wrk;
-
-	spin_lock(&ubi->wl_lock);
-	if (ubi->wl_scheduled) {
-		spin_unlock(&ubi->wl_lock);
-		return 0;
-	}
-	ubi->wl_scheduled = 1;
-	spin_unlock(&ubi->wl_lock);
-
-	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
-	if (!wrk) {
-		spin_lock(&ubi->wl_lock);
-		ubi->wl_scheduled = 0;
-		spin_unlock(&ubi->wl_lock);
-		return -ENOMEM;
-	}
-
-	wrk->anchor = 1;
-	wrk->func = &wear_leveling_worker;
-	schedule_ubi_work(ubi, wrk);
-	return 0;
-}
-#endif
-
 /**
  * erase_worker - physical eraseblock erase worker function.
  * @ubi: UBI device description object
  * @wl_wrk: the work object
- * @cancel: non-zero if the worker has to free memory and exit
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL sub-system is shutting down
  *
  * This function erases a physical eraseblock and perform torture testing if
  * needed. It also takes care about marking the physical eraseblock bad if
@@ -1423,7 +1036,7 @@
  * failure.
  */
 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
-			int cancel)
+			int shutdown)
 {
 	struct ubi_wl_entry *e = wl_wrk->e;
 	int pnum = e->pnum;
@@ -1431,18 +1044,16 @@
 	int lnum = wl_wrk->lnum;
 	int err, available_consumed = 0;
 
-	if (cancel) {
+	if (shutdown) {
 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
 		kfree(wl_wrk);
-		kmem_cache_free(ubi_wl_entry_slab, e);
+		wl_entry_destroy(ubi, e);
 		return 0;
 	}
 
 	dbg_wl("erase PEB %d EC %d LEB %d:%d",
 	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
 
-	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
-
 	err = sync_erase(ubi, e, wl_wrk->torture);
 	if (!err) {
 		/* Fine, we've erased it successfully */
@@ -1464,7 +1075,7 @@
 		return err;
 	}
 
-	ubi_err("failed to erase PEB %d, error %d", pnum, err);
+	ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
 	kfree(wl_wrk);
 
 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
@@ -1480,7 +1091,7 @@
 		return err;
 	}
 
-	kmem_cache_free(ubi_wl_entry_slab, e);
+	wl_entry_destroy(ubi, e);
 	if (err != -EIO)
 		/*
 		 * If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1492,7 +1103,7 @@
 	/* It is %-EIO, the PEB went bad */
 
 	if (!ubi->bad_allowed) {
-		ubi_err("bad physical eraseblock %d detected", pnum);
+		ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
 		goto out_ro;
 	}
 
@@ -1500,7 +1111,7 @@
 	if (ubi->beb_rsvd_pebs == 0) {
 		if (ubi->avail_pebs == 0) {
 			spin_unlock(&ubi->volumes_lock);
-			ubi_err("no reserved/available physical eraseblocks");
+			ubi_err(ubi, "no reserved/available physical eraseblocks");
 			goto out_ro;
 		}
 		ubi->avail_pebs -= 1;
@@ -1508,7 +1119,7 @@
 	}
 	spin_unlock(&ubi->volumes_lock);
 
-	ubi_msg("mark PEB %d as bad", pnum);
+	ubi_msg(ubi, "mark PEB %d as bad", pnum);
 	err = ubi_io_mark_bad(ubi, pnum);
 	if (err)
 		goto out_ro;
@@ -1529,11 +1140,12 @@
 	ubi->good_peb_count -= 1;
 	ubi_calculate_reserved(ubi);
 	if (available_consumed)
-		ubi_warn("no PEBs in the reserved pool, used an available PEB");
+		ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
 	else if (ubi->beb_rsvd_pebs)
-		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+		ubi_msg(ubi, "%d PEBs left in the reserve",
+			ubi->beb_rsvd_pebs);
 	else
-		ubi_warn("last PEB from the reserve was used");
+		ubi_warn(ubi, "last PEB from the reserve was used");
 	spin_unlock(&ubi->volumes_lock);
 
 	return err;
@@ -1571,6 +1183,8 @@
 	ubi_assert(pnum >= 0);
 	ubi_assert(pnum < ubi->peb_count);
 
+	down_read(&ubi->fm_protect);
+
 retry:
 	spin_lock(&ubi->wl_lock);
 	e = ubi->lookuptbl[pnum];
@@ -1601,6 +1215,7 @@
 		ubi_assert(!ubi->move_to_put);
 		ubi->move_to_put = 1;
 		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->fm_protect);
 		return 0;
 	} else {
 		if (in_wl_tree(e, &ubi->used)) {
@@ -1619,9 +1234,10 @@
 		} else {
 			err = prot_queue_del(ubi, e->pnum);
 			if (err) {
-				ubi_err("PEB %d not found", pnum);
+				ubi_err(ubi, "PEB %d not found", pnum);
 				ubi_ro_mode(ubi);
 				spin_unlock(&ubi->wl_lock);
+				up_read(&ubi->fm_protect);
 				return err;
 			}
 		}
@@ -1635,6 +1251,7 @@
 		spin_unlock(&ubi->wl_lock);
 	}
 
+	up_read(&ubi->fm_protect);
 	return err;
 }
 
@@ -1652,7 +1269,7 @@
 {
 	struct ubi_wl_entry *e;
 
-	ubi_msg("schedule PEB %d for scrubbing", pnum);
+	ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
 
 retry:
 	spin_lock(&ubi->wl_lock);
@@ -1684,7 +1301,7 @@
 
 		err = prot_queue_del(ubi, e->pnum);
 		if (err) {
-			ubi_err("PEB %d not found", pnum);
+			ubi_err(ubi, "PEB %d not found", pnum);
 			ubi_ro_mode(ubi);
 			spin_unlock(&ubi->wl_lock);
 			return err;
@@ -1726,12 +1343,12 @@
 	       vol_id, lnum, ubi->works_count);
 
 	while (found) {
-		struct ubi_work *wrk;
+		struct ubi_work *wrk, *tmp;
 		found = 0;
 
 		down_read(&ubi->work_sem);
 		spin_lock(&ubi->wl_lock);
-		list_for_each_entry(wrk, &ubi->works, list) {
+		list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
 			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
 			    (lnum == UBI_ALL || wrk->lnum == lnum)) {
 				list_del(&wrk->list);
@@ -1766,9 +1383,10 @@
 
 /**
  * tree_destroy - destroy an RB-tree.
+ * @ubi: UBI device description object
  * @root: the root of the tree to destroy
  */
-static void tree_destroy(struct rb_root *root)
+static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
 {
 	struct rb_node *rb;
 	struct ubi_wl_entry *e;
@@ -1790,7 +1408,7 @@
 					rb->rb_right = NULL;
 			}
 
-			kmem_cache_free(ubi_wl_entry_slab, e);
+			wl_entry_destroy(ubi, e);
 		}
 	}
 }
@@ -1804,7 +1422,7 @@
 	int failures = 0;
 	struct ubi_device *ubi = u;
 
-	ubi_msg("background thread \"%s\" started, PID %d",
+	ubi_msg(ubi, "background thread \"%s\" started, PID %d",
 		ubi->bgt_name, task_pid_nr(current));
 
 	set_freezable();
@@ -1829,14 +1447,14 @@
 
 		err = do_work(ubi);
 		if (err) {
-			ubi_err("%s: work failed with error code %d",
+			ubi_err(ubi, "%s: work failed with error code %d",
 				ubi->bgt_name, err);
 			if (failures++ > WL_MAX_FAILURES) {
 				/*
 				 * Too many failures, disable the thread and
 				 * switch to read-only mode.
 				 */
-				ubi_msg("%s: %d consecutive failures",
+				ubi_msg(ubi, "%s: %d consecutive failures",
 					ubi->bgt_name, WL_MAX_FAILURES);
 				ubi_ro_mode(ubi);
 				ubi->thread_enabled = 0;
@@ -1853,11 +1471,18 @@
 }
 
 /**
- * cancel_pending - cancel all pending works.
+ * shutdown_work - shutdown all pending works.
  * @ubi: UBI device description object
  */
-static void cancel_pending(struct ubi_device *ubi)
+static void shutdown_work(struct ubi_device *ubi)
 {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+#ifndef __UBOOT__
+	flush_work(&ubi->fm_work);
+#else
+	/* in U-Boot, we have all work done */
+#endif
+#endif
 	while (!list_empty(&ubi->works)) {
 		struct ubi_work *wrk;
 
@@ -1891,11 +1516,6 @@
 	init_rwsem(&ubi->work_sem);
 	ubi->max_ec = ai->max_ec;
 	INIT_LIST_HEAD(&ubi->works);
-#ifndef __UBOOT__
-#ifdef CONFIG_MTD_UBI_FASTMAP
-	INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
-#endif
-#endif
 
 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
 
@@ -1917,10 +1537,9 @@
 
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
-		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
-			kmem_cache_free(ubi_wl_entry_slab, e);
+			wl_entry_destroy(ubi, e);
 			goto out_free;
 		}
 
@@ -1938,7 +1557,6 @@
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
 		ubi_assert(e->ec >= 0);
-		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 
 		wl_tree_add(e, &ubi->free);
 		ubi->free_count++;
@@ -1976,23 +1594,26 @@
 
 	dbg_wl("found %i PEBs", found_pebs);
 
-	if (ubi->fm)
-		ubi_assert(ubi->good_peb_count == \
+	if (ubi->fm) {
+		ubi_assert(ubi->good_peb_count ==
 			   found_pebs + ubi->fm->used_blocks);
+
+		for (i = 0; i < ubi->fm->used_blocks; i++) {
+			e = ubi->fm->e[i];
+			ubi->lookuptbl[e->pnum] = e;
+		}
+	}
 	else
 		ubi_assert(ubi->good_peb_count == found_pebs);
 
 	reserved_pebs = WL_RESERVED_PEBS;
-#ifdef CONFIG_MTD_UBI_FASTMAP
-	/* Reserve enough LEBs to store two fastmaps. */
-	reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
-#endif
+	ubi_fastmap_init(ubi, &reserved_pebs);
 
 	if (ubi->avail_pebs < reserved_pebs) {
-		ubi_err("no enough physical eraseblocks (%d, need %d)",
+		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
 			ubi->avail_pebs, reserved_pebs);
 		if (ubi->corr_peb_count)
-			ubi_err("%d PEBs are corrupted and not used",
+			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
 		goto out_free;
 	}
@@ -2007,10 +1628,10 @@
 	return 0;
 
 out_free:
-	cancel_pending(ubi);
-	tree_destroy(&ubi->used);
-	tree_destroy(&ubi->free);
-	tree_destroy(&ubi->scrub);
+	shutdown_work(ubi);
+	tree_destroy(ubi, &ubi->used);
+	tree_destroy(ubi, &ubi->free);
+	tree_destroy(ubi, &ubi->scrub);
 	kfree(ubi->lookuptbl);
 	return err;
 }
@@ -2027,7 +1648,7 @@
 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
 			list_del(&e->u.list);
-			kmem_cache_free(ubi_wl_entry_slab, e);
+			wl_entry_destroy(ubi, e);
 		}
 	}
 }
@@ -2039,12 +1660,13 @@
 void ubi_wl_close(struct ubi_device *ubi)
 {
 	dbg_wl("close the WL sub-system");
-	cancel_pending(ubi);
+	ubi_fastmap_close(ubi);
+	shutdown_work(ubi);
 	protection_queue_destroy(ubi);
-	tree_destroy(&ubi->used);
-	tree_destroy(&ubi->erroneous);
-	tree_destroy(&ubi->free);
-	tree_destroy(&ubi->scrub);
+	tree_destroy(ubi, &ubi->used);
+	tree_destroy(ubi, &ubi->erroneous);
+	tree_destroy(ubi, &ubi->free);
+	tree_destroy(ubi, &ubi->scrub);
 	kfree(ubi->lookuptbl);
 }
 
@@ -2080,8 +1702,8 @@
 
 	read_ec = be64_to_cpu(ec_hdr->ec);
 	if (ec != read_ec && read_ec - ec > 1) {
-		ubi_err("self-check failed for PEB %d", pnum);
-		ubi_err("read EC is %lld, should be %d", read_ec, ec);
+		ubi_err(ubi, "self-check failed for PEB %d", pnum);
+		ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
 		dump_stack();
 		err = 1;
 	} else
@@ -2110,7 +1732,7 @@
 	if (in_wl_tree(e, root))
 		return 0;
 
-	ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
+	ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
 		e->pnum, e->ec, root);
 	dump_stack();
 	return -EINVAL;
@@ -2138,8 +1760,99 @@
 			if (p == e)
 				return 0;
 
-	ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
+	ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
 		e->pnum, e->ec);
 	dump_stack();
 	return -EINVAL;
 }
+#ifndef CONFIG_MTD_UBI_FASTMAP
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+	struct ubi_wl_entry *e;
+
+	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+	self_check_in_wl_tree(ubi, e, &ubi->free);
+	ubi->free_count--;
+	ubi_assert(ubi->free_count >= 0);
+	rb_erase(&e->u.rb, &ubi->free);
+
+	return e;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+	int err;
+
+	while (!ubi->free.rb_node && ubi->works_count) {
+		spin_unlock(&ubi->wl_lock);
+
+		dbg_wl("do one work synchronously");
+		err = do_work(ubi);
+
+		spin_lock(&ubi->wl_lock);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+	int err;
+	struct ubi_wl_entry *e;
+
+retry:
+	down_read(&ubi->fm_eba_sem);
+	spin_lock(&ubi->wl_lock);
+	if (!ubi->free.rb_node) {
+		if (ubi->works_count == 0) {
+			ubi_err(ubi, "no free eraseblocks");
+			ubi_assert(list_empty(&ubi->works));
+			spin_unlock(&ubi->wl_lock);
+			return -ENOSPC;
+		}
+
+		err = produce_free_peb(ubi);
+		if (err < 0) {
+			spin_unlock(&ubi->wl_lock);
+			return err;
+		}
+		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->fm_eba_sem);
+		goto retry;
+
+	}
+	e = wl_get_wle(ubi);
+	prot_queue_add(ubi, e);
+	spin_unlock(&ubi->wl_lock);
+
+	err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+				    ubi->peb_size - ubi->vid_hdr_aloffset);
+	if (err) {
+		ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
+		return err;
+	}
+
+	return e->pnum;
+}
+#else
+#include "fastmap-wl.c"
+#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 0000000..662dbe3
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,34 @@
+#ifndef UBI_WL_H
+#define UBI_WL_H
+#ifdef CONFIG_MTD_UBI_FASTMAP
+static int anchor_pebs_avalible(struct rb_root *root);
+#ifndef __UBOOT__
+static void update_fastmap_work_fn(struct work_struct *wrk);
+#else
+void update_fastmap_work_fn(struct ubi_device *ubi);
+#endif
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static void ubi_fastmap_close(struct ubi_device *ubi);
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+{
+	/* Reserve enough LEBs to store two fastmaps. */
+	*count += (ubi->fm_size / ubi->leb_size) * 2;
+#ifndef __UBOOT__
+	INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
+}
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+					       struct ubi_wl_entry *e,
+					       struct rb_root *root);
+#else /* !CONFIG_MTD_UBI_FASTMAP */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+					       struct ubi_wl_entry *e,
+					       struct rb_root *root) {
+	return e;
+}
+#endif /* CONFIG_MTD_UBI_FASTMAP */
+#endif /* UBI_WL_H */