mtd, ubi, ubifs: resync with Linux-3.14

resync ubi subsystem with linux:

commit 455c6fdbd219161bd09b1165f11699d6d73de11c
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Sun Mar 30 20:40:15 2014 -0700

    Linux 3.14

A nice side effect of this, is we introduce UBI Fastmap support
to U-Boot.

Signed-off-by: Heiko Schocher <hs@denx.de>
Signed-off-by: Tom Rini <trini@ti.com>
Cc: Marek Vasut <marex@denx.de>
Cc: Sergey Lapin <slapin@ossfans.org>
Cc: Scott Wood <scottwood@freescale.com>
Cc: Joerg Krause <jkrause@posteo.de>
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 31e4289..39daeab 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -1,16 +1,32 @@
 /*
  * MTD device concatenation layer
  *
- * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
  *
  * NAND support by Christian Gan <cgan@iders.ca>
  *
- * This code is GPL
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
  */
 
-#include <linux/mtd/mtd.h>
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/backing-dev.h>
+#include <asm/div64.h>
+#else
+#include <div64.h>
 #include <linux/compat.h>
+#endif
+
+#include <linux/mtd/mtd.h>
 #include <linux/mtd/concat.h>
+
 #include <ubi_uboot.h>
 
 /*
@@ -51,7 +67,9 @@
 	int ret = 0, err;
 	int i;
 
+#ifdef __UBOOT__
 	*retlen = 0;
+#endif
 
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
@@ -105,7 +123,9 @@
 	int err = -EINVAL;
 	int i;
 
+#ifdef __UBOOT__
 	*retlen = 0;
+#endif
 
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
@@ -137,6 +157,83 @@
 	return err;
 }
 
+#ifndef __UBOOT__
+static int
+concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
+		unsigned long count, loff_t to, size_t * retlen)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	struct kvec *vecs_copy;
+	unsigned long entry_low, entry_high;
+	size_t total_len = 0;
+	int i;
+	int err = -EINVAL;
+
+	/* Calculate total length of data */
+	for (i = 0; i < count; i++)
+		total_len += vecs[i].iov_len;
+
+	/* Check alignment */
+	if (mtd->writesize > 1) {
+		uint64_t __to = to;
+		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
+			return -EINVAL;
+	}
+
+	/* make a copy of vecs */
+	vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+	if (!vecs_copy)
+		return -ENOMEM;
+
+	entry_low = 0;
+	for (i = 0; i < concat->num_subdev; i++) {
+		struct mtd_info *subdev = concat->subdev[i];
+		size_t size, wsize, retsize, old_iov_len;
+
+		if (to >= subdev->size) {
+			to -= subdev->size;
+			continue;
+		}
+
+		size = min_t(uint64_t, total_len, subdev->size - to);
+		wsize = size; /* store for future use */
+
+		entry_high = entry_low;
+		while (entry_high < count) {
+			if (size <= vecs_copy[entry_high].iov_len)
+				break;
+			size -= vecs_copy[entry_high++].iov_len;
+		}
+
+		old_iov_len = vecs_copy[entry_high].iov_len;
+		vecs_copy[entry_high].iov_len = size;
+
+		err = mtd_writev(subdev, &vecs_copy[entry_low],
+				 entry_high - entry_low + 1, to, &retsize);
+
+		vecs_copy[entry_high].iov_len = old_iov_len - size;
+		vecs_copy[entry_high].iov_base += size;
+
+		entry_low = entry_high;
+
+		if (err)
+			break;
+
+		*retlen += retsize;
+		total_len -= wsize;
+
+		if (total_len == 0)
+			break;
+
+		err = -EINVAL;
+		to = 0;
+	}
+
+	kfree(vecs_copy);
+	return err;
+}
+#endif
+
 static int
 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 {
@@ -204,7 +301,7 @@
 	if (!(mtd->flags & MTD_WRITEABLE))
 		return -EROFS;
 
-	ops->retlen = 0;
+	ops->retlen = ops->oobretlen = 0;
 
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
@@ -219,7 +316,7 @@
 			devops.len = subdev->size - to;
 
 		err = mtd_write_oob(subdev, to, &devops);
-		ops->retlen += devops.retlen;
+		ops->retlen += devops.oobretlen;
 		if (err)
 			return err;
 
@@ -243,6 +340,9 @@
 static void concat_erase_callback(struct erase_info *instr)
 {
 	/* Nothing to do here in U-Boot */
+#ifndef __UBOOT__
+	wake_up((wait_queue_head_t *) instr->priv);
+#endif
 }
 
 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
@@ -316,7 +416,7 @@
 		 * to-be-erased area begins. Verify that the starting
 		 * offset is aligned to this region's erase size:
 		 */
-		if (instr->addr & (erase_regions[i].erasesize - 1))
+		if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
 			return -EINVAL;
 
 		/*
@@ -329,8 +429,8 @@
 		/*
 		 * check if the ending offset is aligned to this region's erase size
 		 */
-		if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
-						  1))
+		if (i < 0 || ((instr->addr + instr->len) &
+					(erase_regions[i].erasesize - 1)))
 			return -EINVAL;
 	}
 
@@ -422,7 +522,6 @@
 			size = len;
 
 		err = mtd_lock(subdev, ofs, size);
-
 		if (err)
 			break;
 
@@ -457,7 +556,6 @@
 			size = len;
 
 		err = mtd_unlock(subdev, ofs, size);
-
 		if (err)
 			break;
 
@@ -483,6 +581,32 @@
 	}
 }
 
+#ifndef __UBOOT__
+static int concat_suspend(struct mtd_info *mtd)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	int i, rc = 0;
+
+	for (i = 0; i < concat->num_subdev; i++) {
+		struct mtd_info *subdev = concat->subdev[i];
+		if ((rc = mtd_suspend(subdev)) < 0)
+			return rc;
+	}
+	return rc;
+}
+
+static void concat_resume(struct mtd_info *mtd)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	int i;
+
+	for (i = 0; i < concat->num_subdev; i++) {
+		struct mtd_info *subdev = concat->subdev[i];
+		mtd_resume(subdev);
+	}
+}
+#endif
+
 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
 	struct mtd_concat *concat = CONCAT(mtd);
@@ -511,9 +635,6 @@
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = -EINVAL;
 
-	if (!mtd_can_have_bb(concat->subdev[0]))
-		return 0;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 
@@ -532,6 +653,32 @@
 }
 
 /*
+ * try to support NOMMU mmaps on concatenated devices
+ * - we don't support subdev spanning as we can't guarantee it'll work
+ */
+static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
+					      unsigned long len,
+					      unsigned long offset,
+					      unsigned long flags)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	int i;
+
+	for (i = 0; i < concat->num_subdev; i++) {
+		struct mtd_info *subdev = concat->subdev[i];
+
+		if (offset >= subdev->size) {
+			offset -= subdev->size;
+			continue;
+		}
+
+		return mtd_get_unmapped_area(subdev, len, offset, flags);
+	}
+
+	return (unsigned long) -ENOSYS;
+}
+
+/*
  * This function constructs a virtual MTD device by concatenating
  * num_devs MTD devices. A pointer to the new device object is
  * stored to *new_dev upon success. This function does _not_
@@ -539,17 +686,22 @@
  */
 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
 				   int num_devs,	/* number of subdevices      */
+#ifndef __UBOOT__
 				   const char *name)
+#else
+				   char *name)
+#endif
 {				/* name for the new device   */
 	int i;
 	size_t size;
 	struct mtd_concat *concat;
 	uint32_t max_erasesize, curr_erasesize;
 	int num_erase_region;
+	int max_writebufsize = 0;
 
 	debug("Concatenating MTD devices:\n");
 	for (i = 0; i < num_devs; i++)
-		debug("(%d): \"%s\"\n", i, subdev[i]->name);
+		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
 	debug("into device \"%s\"\n", name);
 
 	/* allocate the device structure */
@@ -565,16 +717,26 @@
 
 	/*
 	 * Set up the new "super" device's MTD object structure, check for
-	 * incompatibilites between the subdevices.
+	 * incompatibilities between the subdevices.
 	 */
 	concat->mtd.type = subdev[0]->type;
 	concat->mtd.flags = subdev[0]->flags;
 	concat->mtd.size = subdev[0]->size;
 	concat->mtd.erasesize = subdev[0]->erasesize;
 	concat->mtd.writesize = subdev[0]->writesize;
+
+	for (i = 0; i < num_devs; i++)
+		if (max_writebufsize < subdev[i]->writebufsize)
+			max_writebufsize = subdev[i]->writebufsize;
+	concat->mtd.writebufsize = max_writebufsize;
+
 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 	concat->mtd.oobsize = subdev[0]->oobsize;
 	concat->mtd.oobavail = subdev[0]->oobavail;
+#ifndef __UBOOT__
+	if (subdev[0]->_writev)
+		concat->mtd._writev = concat_writev;
+#endif
 	if (subdev[0]->_read_oob)
 		concat->mtd._read_oob = concat_read_oob;
 	if (subdev[0]->_write_oob)
@@ -586,6 +748,10 @@
 
 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
 
+#ifndef __UBOOT__
+	concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
+#endif
+
 	concat->subdev[0] = subdev[0];
 
 	for (i = 1; i < num_devs; i++) {
@@ -613,6 +779,16 @@
 				    subdev[i]->flags & MTD_WRITEABLE;
 		}
 
+#ifndef __UBOOT__
+		/* only permit direct mapping if the BDIs are all the same
+		 * - copy-mapping is still permitted
+		 */
+		if (concat->mtd.backing_dev_info !=
+		    subdev[i]->backing_dev_info)
+			concat->mtd.backing_dev_info =
+				&default_backing_dev_info;
+#endif
+
 		concat->mtd.size += subdev[i]->size;
 		concat->mtd.ecc_stats.badblocks +=
 			subdev[i]->ecc_stats.badblocks;
@@ -641,6 +817,11 @@
 	concat->mtd._sync = concat_sync;
 	concat->mtd._lock = concat_lock;
 	concat->mtd._unlock = concat_unlock;
+#ifndef __UBOOT__
+	concat->mtd._suspend = concat_suspend;
+	concat->mtd._resume = concat_resume;
+#endif
+	concat->mtd._get_unmapped_area = concat_get_unmapped_area;
 
 	/*
 	 * Combine the erase block size info of the subdevices:
@@ -771,3 +952,22 @@
 
 	return &concat->mtd;
 }
+
+/*
+ * This function destroys an MTD object obtained from concat_mtd_devs()
+ */
+
+void mtd_concat_destroy(struct mtd_info *mtd)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	if (concat->mtd.numeraseregions)
+		kfree(concat->mtd.eraseregions);
+	kfree(concat);
+}
+
+EXPORT_SYMBOL(mtd_concat_create);
+EXPORT_SYMBOL(mtd_concat_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
+MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 0a38fbe..796ac07 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -2,130 +2,767 @@
  * Core registration and callback routines for MTD
  * drivers and users.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006      Red Hat UK Limited 
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
  */
 
-#include <linux/mtd/mtd.h>
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/idr.h>
+#include <linux/backing-dev.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#else
 #include <linux/compat.h>
+#include <linux/err.h>
 #include <ubi_uboot.h>
+#endif
 
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include "mtdcore.h"
+
+#ifndef __UBOOT__
+/*
+ * backing device capabilities for non-mappable devices (such as NAND flash)
+ * - permits private mappings, copies are taken of the data
+ */
+static struct backing_dev_info mtd_bdi_unmappable = {
+	.capabilities	= BDI_CAP_MAP_COPY,
+};
+
+/*
+ * backing device capabilities for R/O mappable devices (such as ROM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+static struct backing_dev_info mtd_bdi_ro_mappable = {
+	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
+};
+
+/*
+ * backing device capabilities for writable mappable devices (such as RAM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+static struct backing_dev_info mtd_bdi_rw_mappable = {
+	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
+			   BDI_CAP_WRITE_MAP),
+};
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+	.name = "mtd",
+	.owner = THIS_MODULE,
+	.suspend = mtd_cls_suspend,
+	.resume = mtd_cls_resume,
+};
+#else
 struct mtd_info *mtd_table[MAX_MTD_DEVICES];
 
+#define MAX_IDR_ID	64
+
+struct idr_layer {
+	int	used;
+	void	*ptr;
+};
+
+struct idr {
+	struct idr_layer id[MAX_IDR_ID];
+};
+
+#define DEFINE_IDR(name)	struct idr name;
+
+void idr_remove(struct idr *idp, int id)
+{
+	if (idp->id[id].used)
+		idp->id[id].used = 0;
+
+	return;
+}
+void *idr_find(struct idr *idp, int id)
+{
+	if (idp->id[id].used)
+		return idp->id[id].ptr;
+
+	return NULL;
+}
+
+void *idr_get_next(struct idr *idp, int *next)
+{
+	void *ret;
+	int id = *next;
+
+	ret = idr_find(idp, id);
+	if (ret) {
+		id ++;
+		if (!idp->id[id].used)
+			id = 0;
+		*next = id;
+	} else {
+		*next = 0;
+	}
+	
+	return ret;
+}
+
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+	struct idr_layer *idl;
+	int i = 0;
+
+	while (i < MAX_IDR_ID) {
+		idl = &idp->id[i];
+		if (idl->used == 0) {
+			idl->used = 1;
+			idl->ptr = ptr;
+			return i;
+		}
+		i++;
+	}
+	return -ENOSPC;
+}
+#endif
+
+static DEFINE_IDR(mtd_idr);
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+   should not use them for _anything_ else */
+DEFINE_MUTEX(mtd_table_mutex);
+EXPORT_SYMBOL_GPL(mtd_table_mutex);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+	return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
+
+#ifndef __UBOOT__
+static LIST_HEAD(mtd_notifiers);
+
+
+#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+
+/* REVISIT once MTD uses the driver model better, whoever allocates
+ * the mtd_info will probably want to use the release() hook...
+ */
+static void mtd_release(struct device *dev)
+{
+	struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
+	dev_t index = MTD_DEVT(mtd->index);
+
+	/* remove /dev/mtdXro node if needed */
+	if (index)
+		device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return mtd ? mtd_suspend(mtd) : 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	if (mtd)
+		mtd_resume(mtd);
+	return 0;
+}
+
+static ssize_t mtd_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	char *type;
+
+	switch (mtd->type) {
+	case MTD_ABSENT:
+		type = "absent";
+		break;
+	case MTD_RAM:
+		type = "ram";
+		break;
+	case MTD_ROM:
+		type = "rom";
+		break;
+	case MTD_NORFLASH:
+		type = "nor";
+		break;
+	case MTD_NANDFLASH:
+		type = "nand";
+		break;
+	case MTD_DATAFLASH:
+		type = "dataflash";
+		break;
+	case MTD_UBIVOLUME:
+		type = "ubi";
+		break;
+	case MTD_MLCNANDFLASH:
+		type = "mlc-nand";
+		break;
+	default:
+		type = "unknown";
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", type);
+}
+static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
+
+static ssize_t mtd_flags_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
+
+}
+static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
+
+static ssize_t mtd_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)mtd->size);
+
+}
+static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
+
+static ssize_t mtd_erasesize_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
+
+}
+static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
+
+static ssize_t mtd_writesize_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
+
+}
+static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
+
+static ssize_t mtd_subpagesize_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
+
+}
+static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
+
+static ssize_t mtd_oobsize_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
+
+}
+static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
+
+static ssize_t mtd_numeraseregions_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
+
+}
+static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
+	NULL);
+
+static ssize_t mtd_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
+
+}
+static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	unsigned int bitflip_threshold;
+	int retval;
+
+	retval = kstrtouint(buf, 0, &bitflip_threshold);
+	if (retval)
+		return retval;
+
+	mtd->bitflip_threshold = bitflip_threshold;
+	return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+		   mtd_bitflip_threshold_show,
+		   mtd_bitflip_threshold_store);
+
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
+static struct attribute *mtd_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_size.attr,
+	&dev_attr_erasesize.attr,
+	&dev_attr_writesize.attr,
+	&dev_attr_subpagesize.attr,
+	&dev_attr_oobsize.attr,
+	&dev_attr_numeraseregions.attr,
+	&dev_attr_name.attr,
+	&dev_attr_ecc_strength.attr,
+	&dev_attr_ecc_step_size.attr,
+	&dev_attr_bitflip_threshold.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(mtd);
+
+static struct device_type mtd_devtype = {
+	.name		= "mtd",
+	.groups		= mtd_groups,
+	.release	= mtd_release,
+};
+#endif
+
+/**
+ *	add_mtd_device - register an MTD device
+ *	@mtd: pointer to new MTD device info structure
+ *
+ *	Add a device to the list of MTD devices present in the system, and
+ *	notify each currently active MTD 'user' of its arrival. Returns
+ *	zero on success or 1 on failure, which currently will only happen
+ *	if there is insufficient memory or a sysfs error.
+ */
+
 int add_mtd_device(struct mtd_info *mtd)
 {
-	int i;
+#ifndef __UBOOT__
+	struct mtd_notifier *not;
+#endif
+	int i, error;
+
+#ifndef __UBOOT__
+	if (!mtd->backing_dev_info) {
+		switch (mtd->type) {
+		case MTD_RAM:
+			mtd->backing_dev_info = &mtd_bdi_rw_mappable;
+			break;
+		case MTD_ROM:
+			mtd->backing_dev_info = &mtd_bdi_ro_mappable;
+			break;
+		default:
+			mtd->backing_dev_info = &mtd_bdi_unmappable;
+			break;
+		}
+	}
+#endif
 
 	BUG_ON(mtd->writesize == 0);
+	mutex_lock(&mtd_table_mutex);
 
-	for (i = 0; i < MAX_MTD_DEVICES; i++)
-		if (!mtd_table[i]) {
-			mtd_table[i] = mtd;
-			mtd->index = i;
-			mtd->usecount = 0;
+	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+	if (i < 0)
+		goto fail_locked;
 
-			/* default value if not set by driver */
-			if (mtd->bitflip_threshold == 0)
-				mtd->bitflip_threshold = mtd->ecc_strength;
+	mtd->index = i;
+	mtd->usecount = 0;
 
+	/* default value if not set by driver */
+	if (mtd->bitflip_threshold == 0)
+		mtd->bitflip_threshold = mtd->ecc_strength;
 
-			/* No need to get a refcount on the module containing
-			   the notifier, since we hold the mtd_table_mutex */
+	if (is_power_of_2(mtd->erasesize))
+		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+	else
+		mtd->erasesize_shift = 0;
 
-			/* We _know_ we aren't being removed, because
-			   our caller is still holding us here. So none
-			   of this try_ nonsense, and no bitching about it
-			   either. :) */
-			return 0;
-		}
+	if (is_power_of_2(mtd->writesize))
+		mtd->writesize_shift = ffs(mtd->writesize) - 1;
+	else
+		mtd->writesize_shift = 0;
 
+	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+	/* Some chips always power up locked. Unlock them now */
+	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+		error = mtd_unlock(mtd, 0, mtd->size);
+		if (error && error != -EOPNOTSUPP)
+			printk(KERN_WARNING
+			       "%s: unlock failed, writes may not work\n",
+			       mtd->name);
+	}
+
+#ifndef __UBOOT__
+	/* Caller should have set dev.parent to match the
+	 * physical device.
+	 */
+	mtd->dev.type = &mtd_devtype;
+	mtd->dev.class = &mtd_class;
+	mtd->dev.devt = MTD_DEVT(i);
+	dev_set_name(&mtd->dev, "mtd%d", i);
+	dev_set_drvdata(&mtd->dev, mtd);
+	if (device_register(&mtd->dev) != 0)
+		goto fail_added;
+
+	if (MTD_DEVT(i))
+		device_create(&mtd_class, mtd->dev.parent,
+			      MTD_DEVT(i) + 1,
+			      NULL, "mtd%dro", i);
+
+	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+	/* No need to get a refcount on the module containing
+	   the notifier, since we hold the mtd_table_mutex */
+	list_for_each_entry(not, &mtd_notifiers, list)
+		not->add(mtd);
+#endif
+
+	mutex_unlock(&mtd_table_mutex);
+	/* We _know_ we aren't being removed, because
+	   our caller is still holding us here. So none
+	   of this try_ nonsense, and no bitching about it
+	   either. :) */
+	__module_get(THIS_MODULE);
+	return 0;
+
+#ifndef __UBOOT__
+fail_added:
+	idr_remove(&mtd_idr, i);
+#endif
+fail_locked:
+	mutex_unlock(&mtd_table_mutex);
 	return 1;
 }
 
 /**
- *      del_mtd_device - unregister an MTD device
- *      @mtd: pointer to MTD device info structure
+ *	del_mtd_device - unregister an MTD device
+ *	@mtd: pointer to MTD device info structure
  *
- *      Remove a device from the list of MTD devices present in the system,
- *      and notify each currently active MTD 'user' of its departure.
- *      Returns zero on success or 1 on failure, which currently will happen
- *      if the requested device does not appear to be present in the list.
+ *	Remove a device from the list of MTD devices present in the system,
+ *	and notify each currently active MTD 'user' of its departure.
+ *	Returns zero on success or 1 on failure, which currently will happen
+ *	if the requested device does not appear to be present in the list.
  */
+
 int del_mtd_device(struct mtd_info *mtd)
 {
 	int ret;
+#ifndef __UBOOT__
+	struct mtd_notifier *not;
+#endif
 
-	if (mtd_table[mtd->index] != mtd) {
+	mutex_lock(&mtd_table_mutex);
+
+	if (idr_find(&mtd_idr, mtd->index) != mtd) {
 		ret = -ENODEV;
-	} else if (mtd->usecount) {
-		printk(KERN_NOTICE "Removing MTD device #%d (%s)"
-				" with use count %d\n",
-				mtd->index, mtd->name, mtd->usecount);
+		goto out_error;
+	}
+
+#ifndef __UBOOT__
+	/* No need to get a refcount on the module containing
+		the notifier, since we hold the mtd_table_mutex */
+	list_for_each_entry(not, &mtd_notifiers, list)
+		not->remove(mtd);
+#endif
+
+	if (mtd->usecount) {
+		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
+		       mtd->index, mtd->name, mtd->usecount);
 		ret = -EBUSY;
 	} else {
-		/* No need to get a refcount on the module containing
-		 * the notifier, since we hold the mtd_table_mutex */
-		mtd_table[mtd->index] = NULL;
+#ifndef __UBOOT__
+		device_unregister(&mtd->dev);
+#endif
 
+		idr_remove(&mtd_idr, mtd->index);
+
+		module_put(THIS_MODULE);
 		ret = 0;
 	}
 
+out_error:
+	mutex_unlock(&mtd_table_mutex);
 	return ret;
 }
 
+#ifndef __UBOOT__
+/**
+ * mtd_device_parse_register - parse partitions and register an MTD device.
+ *
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ *         'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ *         only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ *            MTD device is registered if no partition info is found
+ *
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ *   specified in @types (if @types is %NULL, then the default list of parsers
+ *   is used, see 'parse_mtd_partitions()' for more information). If none are
+ *   found this functions tries to fallback to information specified in
+ *   @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ *   partitions.
+ * * If no partitions were found this function just registers the MTD device
+ *   @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
+ */
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+			      struct mtd_part_parser_data *parser_data,
+			      const struct mtd_partition *parts,
+			      int nr_parts)
+{
+	int err;
+	struct mtd_partition *real_parts;
+
+	err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+	if (err <= 0 && nr_parts && parts) {
+		real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+				     GFP_KERNEL);
+		if (!real_parts)
+			err = -ENOMEM;
+		else
+			err = nr_parts;
+	}
+
+	if (err > 0) {
+		err = add_mtd_partitions(mtd, real_parts, err);
+		kfree(real_parts);
+	} else if (err == 0) {
+		err = add_mtd_device(mtd);
+		if (err == 1)
+			err = -ENODEV;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister.  This will unregister both the master
+ *          and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+	int err;
+
+	err = del_mtd_partitions(master);
+	if (err)
+		return err;
+
+	if (!device_is_registered(&master->dev))
+		return 0;
+
+	return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
+ *	register_mtd_user - register a 'user' of MTD devices.
+ *	@new: pointer to notifier info structure
+ *
+ *	Registers a pair of callbacks function to be called upon addition
+ *	or removal of MTD devices. Causes the 'add' callback to be immediately
+ *	invoked for each MTD device currently present in the system.
+ */
+void register_mtd_user (struct mtd_notifier *new)
+{
+	struct mtd_info *mtd;
+
+	mutex_lock(&mtd_table_mutex);
+
+	list_add(&new->list, &mtd_notifiers);
+
+	__module_get(THIS_MODULE);
+
+	mtd_for_each_device(mtd)
+		new->add(mtd);
+
+	mutex_unlock(&mtd_table_mutex);
+}
+EXPORT_SYMBOL_GPL(register_mtd_user);
+
+/**
+ *	unregister_mtd_user - unregister a 'user' of MTD devices.
+ *	@old: pointer to notifier info structure
+ *
+ *	Removes a callback function pair from the list of 'users' to be
+ *	notified upon addition or removal of MTD devices. Causes the
+ *	'remove' callback to be immediately invoked for each MTD device
+ *	currently present in the system.
+ */
+int unregister_mtd_user (struct mtd_notifier *old)
+{
+	struct mtd_info *mtd;
+
+	mutex_lock(&mtd_table_mutex);
+
+	module_put(THIS_MODULE);
+
+	mtd_for_each_device(mtd)
+		old->remove(mtd);
+
+	list_del(&old->list);
+	mutex_unlock(&mtd_table_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
+#endif
+
 /**
  *	get_mtd_device - obtain a validated handle for an MTD device
  *	@mtd: last known address of the required MTD device
  *	@num: internal device number of the required MTD device
  *
  *	Given a number and NULL address, return the num'th entry in the device
- *      table, if any.  Given an address and num == -1, search the device table
- *      for a device with that address and return if it's still present. Given
- *      both, return the num'th driver only if its address matches. Return
- *      error code if not.
+ *	table, if any.	Given an address and num == -1, search the device table
+ *	for a device with that address and return if it's still present. Given
+ *	both, return the num'th driver only if its address matches. Return
+ *	error code if not.
  */
 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
 {
-	struct mtd_info *ret = NULL;
-	int i, err = -ENODEV;
+	struct mtd_info *ret = NULL, *other;
+	int err = -ENODEV;
+
+	mutex_lock(&mtd_table_mutex);
 
 	if (num == -1) {
-		for (i = 0; i < MAX_MTD_DEVICES; i++)
-			if (mtd_table[i] == mtd)
-				ret = mtd_table[i];
-	} else if (num < MAX_MTD_DEVICES) {
-		ret = mtd_table[num];
+		mtd_for_each_device(other) {
+			if (other == mtd) {
+				ret = mtd;
+				break;
+			}
+		}
+	} else if (num >= 0) {
+		ret = idr_find(&mtd_idr, num);
 		if (mtd && mtd != ret)
 			ret = NULL;
 	}
 
-	if (!ret)
-		goto out_unlock;
+	if (!ret) {
+		ret = ERR_PTR(err);
+		goto out;
+	}
 
-	ret->usecount++;
+	err = __get_mtd_device(ret);
+	if (err)
+		ret = ERR_PTR(err);
+out:
+	mutex_unlock(&mtd_table_mutex);
 	return ret;
-
-out_unlock:
-	return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(get_mtd_device);
+
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+	int err;
+
+	if (!try_module_get(mtd->owner))
+		return -ENODEV;
+
+	if (mtd->_get_device) {
+		err = mtd->_get_device(mtd);
+
+		if (err) {
+			module_put(mtd->owner);
+			return err;
+		}
+	}
+	mtd->usecount++;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
 
 /**
- *      get_mtd_device_nm - obtain a validated handle for an MTD device by
- *      device name
- *      @name: MTD device name to open
+ *	get_mtd_device_nm - obtain a validated handle for an MTD device by
+ *	device name
+ *	@name: MTD device name to open
  *
- *      This function returns MTD device description structure in case of
- *      success and an error code in case of failure.
+ * 	This function returns MTD device description structure in case of
+ * 	success and an error code in case of failure.
  */
 struct mtd_info *get_mtd_device_nm(const char *name)
 {
-	int i, err = -ENODEV;
-	struct mtd_info *mtd = NULL;
+	int err = -ENODEV;
+	struct mtd_info *mtd = NULL, *other;
 
-	for (i = 0; i < MAX_MTD_DEVICES; i++) {
-		if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
-			mtd = mtd_table[i];
+	mutex_lock(&mtd_table_mutex);
+
+	mtd_for_each_device(other) {
+		if (!strcmp(name, other->name)) {
+			mtd = other;
 			break;
 		}
 	}
@@ -133,20 +770,18 @@
 	if (!mtd)
 		goto out_unlock;
 
-	mtd->usecount++;
+	err = __get_mtd_device(mtd);
+	if (err)
+		goto out_unlock;
+
+	mutex_unlock(&mtd_table_mutex);
 	return mtd;
 
 out_unlock:
+	mutex_unlock(&mtd_table_mutex);
 	return ERR_PTR(err);
 }
-
-void put_mtd_device(struct mtd_info *mtd)
-{
-	int c;
-
-	c = --mtd->usecount;
-	BUG_ON(c < 0);
-}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
 
 #if defined(CONFIG_CMD_MTDPARTS_SPREAD)
 /**
@@ -192,7 +827,28 @@
 }
 #endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
 
- /*
+void put_mtd_device(struct mtd_info *mtd)
+{
+	mutex_lock(&mtd_table_mutex);
+	__put_mtd_device(mtd);
+	mutex_unlock(&mtd_table_mutex);
+
+}
+EXPORT_SYMBOL_GPL(put_mtd_device);
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+	--mtd->usecount;
+	BUG_ON(mtd->usecount < 0);
+
+	if (mtd->_put_device)
+		mtd->_put_device(mtd);
+
+	module_put(mtd->owner);
+}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
+
+/*
  * Erase is an asynchronous operation.  Device drivers are supposed
  * to call instr->callback() whenever the operation completes, even
  * if it completes with a failure.
@@ -213,11 +869,64 @@
 	}
 	return mtd->_erase(mtd, instr);
 }
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+#ifndef __UBOOT__
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	      void **virt, resource_size_t *phys)
+{
+	*retlen = 0;
+	*virt = NULL;
+	if (phys)
+		*phys = 0;
+	if (!mtd->_point)
+		return -EOPNOTSUPP;
+	if (from < 0 || from > mtd->size || len > mtd->size - from)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+	if (!mtd->_point)
+		return -EOPNOTSUPP;
+	if (from < 0 || from > mtd->size || len > mtd->size - from)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+#endif
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+				    unsigned long offset, unsigned long flags)
+{
+	if (!mtd->_get_unmapped_area)
+		return -EOPNOTSUPP;
+	if (offset > mtd->size || len > mtd->size - offset)
+		return -EINVAL;
+	return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
 
 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
 	     u_char *buf)
 {
 	int ret_code;
+	*retlen = 0;
 	if (from < 0 || from > mtd->size || len > mtd->size - from)
 		return -EINVAL;
 	if (!len)
@@ -235,6 +944,7 @@
 		return 0;	/* device lacks ecc */
 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
 }
+EXPORT_SYMBOL_GPL(mtd_read);
 
 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
 	      const u_char *buf)
@@ -248,6 +958,7 @@
 		return 0;
 	return mtd->_write(mtd, to, len, retlen, buf);
 }
+EXPORT_SYMBOL_GPL(mtd_write);
 
 /*
  * In blackbox flight recorder like scenarios we want to make successful writes
@@ -270,14 +981,28 @@
 		return 0;
 	return mtd->_panic_write(mtd, to, len, retlen, buf);
 }
+EXPORT_SYMBOL_GPL(mtd_panic_write);
 
 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 {
+	int ret_code;
 	ops->retlen = ops->oobretlen = 0;
 	if (!mtd->_read_oob)
 		return -EOPNOTSUPP;
-	return mtd->_read_oob(mtd, from, ops);
+	/*
+	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+	 * similar to mtd->_read(), returning a non-negative integer
+	 * representing max bitflips. In other cases, mtd->_read_oob() may
+	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+	 */
+	ret_code = mtd->_read_oob(mtd, from, ops);
+	if (unlikely(ret_code < 0))
+		return ret_code;
+	if (mtd->ecc_strength == 0)
+		return 0;	/* device lacks ecc */
+	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
 }
+EXPORT_SYMBOL_GPL(mtd_read_oob);
 
 /*
  * Method to access the protection register area, present in some flash
@@ -293,6 +1018,7 @@
 		return 0;
 	return mtd->_get_fact_prot_info(mtd, buf, len);
 }
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
 
 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
 			   size_t *retlen, u_char *buf)
@@ -304,6 +1030,7 @@
 		return 0;
 	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
 }
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
 
 int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
 			   size_t len)
@@ -314,6 +1041,7 @@
 		return 0;
 	return mtd->_get_user_prot_info(mtd, buf, len);
 }
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
 
 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
 			   size_t *retlen, u_char *buf)
@@ -325,6 +1053,7 @@
 		return 0;
 	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
 }
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
 
 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
 			    size_t *retlen, u_char *buf)
@@ -336,6 +1065,7 @@
 		return 0;
 	return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
 }
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
 
 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
 {
@@ -345,6 +1075,7 @@
 		return 0;
 	return mtd->_lock_user_prot_reg(mtd, from, len);
 }
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
 
 /* Chip-supported device locking */
 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -357,6 +1088,7 @@
 		return 0;
 	return mtd->_lock(mtd, ofs, len);
 }
+EXPORT_SYMBOL_GPL(mtd_lock);
 
 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
@@ -368,6 +1100,19 @@
 		return 0;
 	return mtd->_unlock(mtd, ofs, len);
 }
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	if (!mtd->_is_locked)
+		return -EOPNOTSUPP;
+	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
 
 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
@@ -377,6 +1122,7 @@
 		return -EINVAL;
 	return mtd->_block_isbad(mtd, ofs);
 }
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
 
 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
@@ -388,3 +1134,225 @@
 		return -EROFS;
 	return mtd->_block_markbad(mtd, ofs);
 }
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+#ifndef __UBOOT__
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+			      unsigned long count, loff_t to, size_t *retlen)
+{
+	unsigned long i;
+	size_t totlen = 0, thislen;
+	int ret = 0;
+
+	for (i = 0; i < count; i++) {
+		if (!vecs[i].iov_len)
+			continue;
+		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+				vecs[i].iov_base);
+		totlen += thislen;
+		if (ret || thislen != vecs[i].iov_len)
+			break;
+		to += vecs[i].iov_len;
+	}
+	*retlen = totlen;
+	return ret;
+}
+
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+	       unsigned long count, loff_t to, size_t *retlen)
+{
+	*retlen = 0;
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	if (!mtd->_writev)
+		return default_mtd_writev(mtd, vecs, count, to, retlen);
+	return mtd->_writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
+ *        to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+	gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+		       __GFP_NORETRY | __GFP_NO_KSWAPD;
+	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+	void *kbuf;
+
+	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+	while (*size > min_alloc) {
+		kbuf = kmalloc(*size, flags);
+		if (kbuf)
+			return kbuf;
+
+		*size >>= 1;
+		*size = ALIGN(*size, mtd->writesize);
+	}
+
+	/*
+	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
+	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+	 */
+	return kmalloc(*size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
+#endif
+
+#ifdef CONFIG_PROC_FS
+
+/*====================================================================*/
+/* Support for /proc/mtd */
+
+static int mtd_proc_show(struct seq_file *m, void *v)
+{
+	struct mtd_info *mtd;
+
+	seq_puts(m, "dev:    size   erasesize  name\n");
+	mutex_lock(&mtd_table_mutex);
+	mtd_for_each_device(mtd) {
+		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+			   mtd->index, (unsigned long long)mtd->size,
+			   mtd->erasesize, mtd->name);
+	}
+	mutex_unlock(&mtd_table_mutex);
+	return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtd_proc_show, NULL);
+}
+
+static const struct file_operations mtd_proc_ops = {
+	.open		= mtd_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+/*====================================================================*/
+/* Init code */
+
+#ifndef __UBOOT__
+static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+{
+	int ret;
+
+	ret = bdi_init(bdi);
+	if (!ret)
+		ret = bdi_register(bdi, NULL, "%s", name);
+
+	if (ret)
+		bdi_destroy(bdi);
+
+	return ret;
+}
+
+static struct proc_dir_entry *proc_mtd;
+
+static int __init init_mtd(void)
+{
+	int ret;
+
+	ret = class_register(&mtd_class);
+	if (ret)
+		goto err_reg;
+
+	ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
+	if (ret)
+		goto err_bdi1;
+
+	ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
+	if (ret)
+		goto err_bdi2;
+
+	ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
+	if (ret)
+		goto err_bdi3;
+
+	proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
+
+	ret = init_mtdchar();
+	if (ret)
+		goto out_procfs;
+
+	return 0;
+
+out_procfs:
+	if (proc_mtd)
+		remove_proc_entry("mtd", NULL);
+err_bdi3:
+	bdi_destroy(&mtd_bdi_ro_mappable);
+err_bdi2:
+	bdi_destroy(&mtd_bdi_unmappable);
+err_bdi1:
+	class_unregister(&mtd_class);
+err_reg:
+	pr_err("Error registering mtd class or bdi: %d\n", ret);
+	return ret;
+}
+
+static void __exit cleanup_mtd(void)
+{
+	cleanup_mtdchar();
+	if (proc_mtd)
+		remove_proc_entry("mtd", NULL);
+	class_unregister(&mtd_class);
+	bdi_destroy(&mtd_bdi_unmappable);
+	bdi_destroy(&mtd_bdi_ro_mappable);
+	bdi_destroy(&mtd_bdi_rw_mappable);
+}
+
+module_init(init_mtd);
+module_exit(cleanup_mtd);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core MTD registration and access routines");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
new file mode 100644
index 0000000..7b03533
--- /dev/null
+++ b/drivers/mtd/mtdcore.h
@@ -0,0 +1,23 @@
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
+ */
+
+extern struct mutex mtd_table_mutex;
+
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+			 struct mtd_partition **pparts,
+			 struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
+
+#define mtd_for_each_device(mtd)			\
+	for ((mtd) = __mtd_next_device(0);		\
+	     (mtd) != NULL;				\
+	     (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 146ce11..d20b857 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -1,35 +1,50 @@
 /*
  * Simple MTD partitioning layer
  *
- * (C) 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
  *
- * This code is GPL
+ * SPDX-License-Identifier:	GPL-2.0+
  *
- * 	02-21-2002	Thomas Gleixner <gleixner@autronix.de>
- *			added support for read_oob, write_oob
  */
 
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#endif
+
 #include <common.h>
 #include <malloc.h>
 #include <asm/errno.h>
+#include <linux/compat.h>
+#include <ubi_uboot.h>
 
-#include <linux/types.h>
-#include <linux/list.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
-#include <linux/compat.h>
+#include <linux/err.h>
+
+#include "mtdcore.h"
 
 /* Our partition linked list */
-struct list_head mtd_partitions;
+static LIST_HEAD(mtd_partitions);
+#ifndef __UBOOT__
+static DEFINE_MUTEX(mtd_partitions_mutex);
+#else
+DEFINE_MUTEX(mtd_partitions_mutex);
+#endif
 
 /* Our partition node structure */
 struct mtd_part {
 	struct mtd_info mtd;
 	struct mtd_info *master;
 	uint64_t offset;
-	int index;
 	struct list_head list;
-	int registered;
 };
 
 /*
@@ -39,6 +54,30 @@
 #define PART(x)  ((struct mtd_part *)(x))
 
 
+#ifdef __UBOOT__
+/* from mm/util.c */
+
+/**
+ * kstrdup - allocate space for and copy an existing string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kstrdup(const char *s, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strlen(s) + 1;
+	buf = kmalloc(len, gfp);
+	if (buf)
+		memcpy(buf, s, len);
+	return buf;
+}
+#endif
+
 /*
  * MTD methods which simply translate the effective address and pass through
  * to the _real_ device.
@@ -52,7 +91,8 @@
 	int res;
 
 	stats = part->master->ecc_stats;
-	res = mtd_read(part->master, from + part->offset, len, retlen, buf);
+	res = part->master->_read(part->master, from + part->offset, len,
+				  retlen, buf);
 	if (unlikely(mtd_is_eccerr(res)))
 		mtd->ecc_stats.failed +=
 			part->master->ecc_stats.failed - stats.failed;
@@ -62,6 +102,36 @@
 	return res;
 }
 
+#ifndef __UBOOT__
+static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
+		size_t *retlen, void **virt, resource_size_t *phys)
+{
+	struct mtd_part *part = PART(mtd);
+
+	return part->master->_point(part->master, from + part->offset, len,
+				    retlen, virt, phys);
+}
+
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+	struct mtd_part *part = PART(mtd);
+
+	return part->master->_unpoint(part->master, from + part->offset, len);
+}
+#endif
+
+static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
+					    unsigned long len,
+					    unsigned long offset,
+					    unsigned long flags)
+{
+	struct mtd_part *part = PART(mtd);
+
+	offset += part->offset;
+	return part->master->_get_unmapped_area(part->master, len, offset,
+						flags);
+}
+
 static int part_read_oob(struct mtd_info *mtd, loff_t from,
 		struct mtd_oob_ops *ops)
 {
@@ -72,8 +142,25 @@
 		return -EINVAL;
 	if (ops->datbuf && from + ops->len > mtd->size)
 		return -EINVAL;
-	res = mtd_read_oob(part->master, from + part->offset, ops);
 
+	/*
+	 * If OOB is also requested, make sure that we do not read past the end
+	 * of this partition.
+	 */
+	if (ops->oobbuf) {
+		size_t len, pages;
+
+		if (ops->mode == MTD_OPS_AUTO_OOB)
+			len = mtd->oobavail;
+		else
+			len = mtd->oobsize;
+		pages = mtd_div_by_ws(mtd->size, mtd);
+		pages -= mtd_div_by_ws(from, mtd);
+		if (ops->ooboffs + ops->ooblen > pages * len)
+			return -EINVAL;
+	}
+
+	res = part->master->_read_oob(part->master, from + part->offset, ops);
 	if (unlikely(res)) {
 		if (mtd_is_bitflip(res))
 			mtd->ecc_stats.corrected++;
@@ -87,35 +174,46 @@
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_read_user_prot_reg(part->master, from, len,
+						 retlen, buf);
 }
 
 static int part_get_user_prot_info(struct mtd_info *mtd,
 		struct otp_info *buf, size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_get_user_prot_info(part->master, buf, len);
+	return part->master->_get_user_prot_info(part->master, buf, len);
 }
 
 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_read_fact_prot_reg(part->master, from, len,
+						 retlen, buf);
 }
 
 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
 		size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_get_fact_prot_info(part->master, buf, len);
+	return part->master->_get_fact_prot_info(part->master, buf, len);
 }
 
 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
 		size_t *retlen, const u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_write(part->master, to + part->offset, len, retlen, buf);
+	return part->master->_write(part->master, to + part->offset, len,
+				    retlen, buf);
+}
+
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+		size_t *retlen, const u_char *buf)
+{
+	struct mtd_part *part = PART(mtd);
+	return part->master->_panic_write(part->master, to + part->offset, len,
+					  retlen, buf);
 }
 
 static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -127,30 +225,41 @@
 		return -EINVAL;
 	if (ops->datbuf && to + ops->len > mtd->size)
 		return -EINVAL;
-	return mtd_write_oob(part->master, to + part->offset, ops);
+	return part->master->_write_oob(part->master, to + part->offset, ops);
 }
 
 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_write_user_prot_reg(part->master, from, len,
+						  retlen, buf);
 }
 
 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_lock_user_prot_reg(part->master, from, len);
+	return part->master->_lock_user_prot_reg(part->master, from, len);
 }
 
+#ifndef __UBOOT__
+static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
+		unsigned long count, loff_t to, size_t *retlen)
+{
+	struct mtd_part *part = PART(mtd);
+	return part->master->_writev(part->master, vecs, count,
+				     to + part->offset, retlen);
+}
+#endif
+
 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
 	struct mtd_part *part = PART(mtd);
 	int ret;
 
 	instr->addr += part->offset;
-	ret = mtd_erase(part->master, instr);
+	ret = part->master->_erase(part->master, instr);
 	if (ret) {
 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 			instr->fail_addr -= part->offset;
@@ -171,30 +280,51 @@
 	if (instr->callback)
 		instr->callback(instr);
 }
+EXPORT_SYMBOL_GPL(mtd_erase_callback);
 
 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_lock(part->master, ofs + part->offset, len);
+	return part->master->_lock(part->master, ofs + part->offset, len);
 }
 
 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_unlock(part->master, ofs + part->offset, len);
+	return part->master->_unlock(part->master, ofs + part->offset, len);
+}
+
+static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct mtd_part *part = PART(mtd);
+	return part->master->_is_locked(part->master, ofs + part->offset, len);
 }
 
 static void part_sync(struct mtd_info *mtd)
 {
 	struct mtd_part *part = PART(mtd);
-	mtd_sync(part->master);
+	part->master->_sync(part->master);
 }
 
+#ifndef __UBOOT__
+static int part_suspend(struct mtd_info *mtd)
+{
+	struct mtd_part *part = PART(mtd);
+	return part->master->_suspend(part->master);
+}
+
+static void part_resume(struct mtd_info *mtd)
+{
+	struct mtd_part *part = PART(mtd);
+	part->master->_resume(part->master);
+}
+#endif
+
 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
 	struct mtd_part *part = PART(mtd);
 	ofs += part->offset;
-	return mtd_block_isbad(part->master, ofs);
+	return part->master->_block_isbad(part->master, ofs);
 }
 
 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -203,12 +333,18 @@
 	int res;
 
 	ofs += part->offset;
-	res = mtd_block_markbad(part->master, ofs);
+	res = part->master->_block_markbad(part->master, ofs);
 	if (!res)
 		mtd->ecc_stats.badblocks++;
 	return res;
 }
 
+static inline void free_partition(struct mtd_part *p)
+{
+	kfree(p->mtd.name);
+	kfree(p);
+}
+
 /*
  * This function unregisters and destroy all slave MTD objects which are
  * attached to the given master MTD object.
@@ -217,49 +353,78 @@
 int del_mtd_partitions(struct mtd_info *master)
 {
 	struct mtd_part *slave, *next;
+	int ret, err = 0;
 
+	mutex_lock(&mtd_partitions_mutex);
 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
 		if (slave->master == master) {
+			ret = del_mtd_device(&slave->mtd);
+			if (ret < 0) {
+				err = ret;
+				continue;
+			}
 			list_del(&slave->list);
-			if (slave->registered)
-				del_mtd_device(&slave->mtd);
-			kfree(slave);
+			free_partition(slave);
 		}
+	mutex_unlock(&mtd_partitions_mutex);
 
-	return 0;
+	return err;
 }
 
-static struct mtd_part *add_one_partition(struct mtd_info *master,
-		const struct mtd_partition *part, int partno,
-		uint64_t cur_offset)
+static struct mtd_part *allocate_partition(struct mtd_info *master,
+			const struct mtd_partition *part, int partno,
+			uint64_t cur_offset)
 {
 	struct mtd_part *slave;
+	char *name;
 
 	/* allocate the partition structure */
 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
-	if (!slave) {
+	name = kstrdup(part->name, GFP_KERNEL);
+	if (!name || !slave) {
 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
-			master->name);
-		del_mtd_partitions(master);
-		return NULL;
+		       master->name);
+		kfree(name);
+		kfree(slave);
+		return ERR_PTR(-ENOMEM);
 	}
-	list_add(&slave->list, &mtd_partitions);
 
 	/* set up the MTD object for this partition */
 	slave->mtd.type = master->type;
 	slave->mtd.flags = master->flags & ~part->mask_flags;
 	slave->mtd.size = part->size;
 	slave->mtd.writesize = master->writesize;
+	slave->mtd.writebufsize = master->writebufsize;
 	slave->mtd.oobsize = master->oobsize;
 	slave->mtd.oobavail = master->oobavail;
 	slave->mtd.subpage_sft = master->subpage_sft;
 
-	slave->mtd.name = part->name;
+	slave->mtd.name = name;
 	slave->mtd.owner = master->owner;
+#ifndef __UBOOT__
+	slave->mtd.backing_dev_info = master->backing_dev_info;
+
+	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
+	 * to have the same data be in two different partitions.
+	 */
+	slave->mtd.dev.parent = master->dev.parent;
+#endif
 
 	slave->mtd._read = part_read;
 	slave->mtd._write = part_write;
 
+	if (master->_panic_write)
+		slave->mtd._panic_write = part_panic_write;
+
+#ifndef __UBOOT__
+	if (master->_point && master->_unpoint) {
+		slave->mtd._point = part_point;
+		slave->mtd._unpoint = part_unpoint;
+	}
+#endif
+
+	if (master->_get_unmapped_area)
+		slave->mtd._get_unmapped_area = part_get_unmapped_area;
 	if (master->_read_oob)
 		slave->mtd._read_oob = part_read_oob;
 	if (master->_write_oob)
@@ -278,10 +443,21 @@
 		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
 	if (master->_sync)
 		slave->mtd._sync = part_sync;
+#ifndef __UBOOT__
+	if (!partno && !master->dev.class && master->_suspend &&
+	    master->_resume) {
+			slave->mtd._suspend = part_suspend;
+			slave->mtd._resume = part_resume;
+	}
+	if (master->_writev)
+		slave->mtd._writev = part_writev;
+#endif
 	if (master->_lock)
 		slave->mtd._lock = part_lock;
 	if (master->_unlock)
 		slave->mtd._unlock = part_unlock;
+	if (master->_is_locked)
+		slave->mtd._is_locked = part_is_locked;
 	if (master->_block_isbad)
 		slave->mtd._block_isbad = part_block_isbad;
 	if (master->_block_markbad)
@@ -289,7 +465,6 @@
 	slave->mtd._erase = part_erase;
 	slave->master = master;
 	slave->offset = part->offset;
-	slave->index = partno;
 
 	if (slave->offset == MTDPART_OFS_APPEND)
 		slave->offset = cur_offset;
@@ -298,18 +473,29 @@
 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
 			/* Round up to next erasesize */
 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
-			debug("Moving partition %d: 0x%012llx -> 0x%012llx\n",
-			      partno, (unsigned long long)cur_offset,
-			      (unsigned long long)slave->offset);
+			debug("Moving partition %d: "
+			       "0x%012llx -> 0x%012llx\n", partno,
+			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+		}
+	}
+	if (slave->offset == MTDPART_OFS_RETAIN) {
+		slave->offset = cur_offset;
+		if (master->size - slave->offset >= slave->mtd.size) {
+			slave->mtd.size = master->size - slave->offset
+							- slave->mtd.size;
+		} else {
+			debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+				part->name, master->size - slave->offset,
+				slave->mtd.size);
+			/* register to preserve ordering */
+			goto out_register;
 		}
 	}
 	if (slave->mtd.size == MTDPART_SIZ_FULL)
 		slave->mtd.size = master->size - slave->offset;
 
-	debug("0x%012llx-0x%012llx : \"%s\"\n",
-	      (unsigned long long)slave->offset,
-	      (unsigned long long)(slave->offset + slave->mtd.size),
-	      slave->mtd.name);
+	debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
 
 	/* let's do some sanity checks */
 	if (slave->offset >= master->size) {
@@ -336,7 +522,8 @@
 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
 			;
 		/* The loop searched for the region _behind_ the first one */
-		i--;
+		if (i > 0)
+			i--;
 
 		/* Pick biggest erasesize */
 		for (; i < max && regions[i].offset < end; i++) {
@@ -367,6 +554,10 @@
 	}
 
 	slave->mtd.ecclayout = master->ecclayout;
+	slave->mtd.ecc_step_size = master->ecc_step_size;
+	slave->mtd.ecc_strength = master->ecc_strength;
+	slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
 	if (master->_block_isbad) {
 		uint64_t offs = 0;
 
@@ -378,18 +569,89 @@
 	}
 
 out_register:
-	if (part->mtdp) {
-		/* store the object pointer (caller may or may not register it*/
-		*part->mtdp = &slave->mtd;
-		slave->registered = 0;
-	} else {
-		/* register our partition */
-		add_mtd_device(&slave->mtd);
-		slave->registered = 1;
-	}
 	return slave;
 }
 
+int mtd_add_partition(struct mtd_info *master, const char *name,
+		      long long offset, long long length)
+{
+	struct mtd_partition part;
+	struct mtd_part *p, *new;
+	uint64_t start, end;
+	int ret = 0;
+
+	/* the direct offset is expected */
+	if (offset == MTDPART_OFS_APPEND ||
+	    offset == MTDPART_OFS_NXTBLK)
+		return -EINVAL;
+
+	if (length == MTDPART_SIZ_FULL)
+		length = master->size - offset;
+
+	if (length <= 0)
+		return -EINVAL;
+
+	part.name = name;
+	part.size = length;
+	part.offset = offset;
+	part.mask_flags = 0;
+	part.ecclayout = NULL;
+
+	new = allocate_partition(master, &part, -1, offset);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	start = offset;
+	end = offset + length;
+
+	mutex_lock(&mtd_partitions_mutex);
+	list_for_each_entry(p, &mtd_partitions, list)
+		if (p->master == master) {
+			if ((start >= p->offset) &&
+			    (start < (p->offset + p->mtd.size)))
+				goto err_inv;
+
+			if ((end >= p->offset) &&
+			    (end < (p->offset + p->mtd.size)))
+				goto err_inv;
+		}
+
+	list_add(&new->list, &mtd_partitions);
+	mutex_unlock(&mtd_partitions_mutex);
+
+	add_mtd_device(&new->mtd);
+
+	return ret;
+err_inv:
+	mutex_unlock(&mtd_partitions_mutex);
+	free_partition(new);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtd_add_partition);
+
+int mtd_del_partition(struct mtd_info *master, int partno)
+{
+	struct mtd_part *slave, *next;
+	int ret = -EINVAL;
+
+	mutex_lock(&mtd_partitions_mutex);
+	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+		if ((slave->master == master) &&
+		    (slave->mtd.index == partno)) {
+			ret = del_mtd_device(&slave->mtd);
+			if (ret < 0)
+				break;
+
+			list_del(&slave->list);
+			free_partition(slave);
+			break;
+		}
+	mutex_unlock(&mtd_partitions_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_del_partition);
+
 /*
  * This function, given a master MTD object and a partition table, creates
  * and registers slave MTD objects which are bound to the master according to
@@ -407,6 +669,7 @@
 	uint64_t cur_offset = 0;
 	int i;
 
+#ifdef __UBOOT__
 	/*
 	 * Need to init the list here, since LIST_INIT() does not
 	 * work on platforms where relocation has problems (like MIPS
@@ -414,15 +677,147 @@
 	 */
 	if (mtd_partitions.next == NULL)
 		INIT_LIST_HEAD(&mtd_partitions);
+#endif
 
 	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
 
 	for (i = 0; i < nbparts; i++) {
-		slave = add_one_partition(master, parts + i, i, cur_offset);
-		if (!slave)
-			return -ENOMEM;
+		slave = allocate_partition(master, parts + i, i, cur_offset);
+		if (IS_ERR(slave))
+			return PTR_ERR(slave);
+
+		mutex_lock(&mtd_partitions_mutex);
+		list_add(&slave->list, &mtd_partitions);
+		mutex_unlock(&mtd_partitions_mutex);
+
+		add_mtd_device(&slave->mtd);
+
 		cur_offset = slave->offset + slave->mtd.size;
 	}
 
 	return 0;
 }
+
+#ifndef __UBOOT__
+static DEFINE_SPINLOCK(part_parser_lock);
+static LIST_HEAD(part_parsers);
+
+static struct mtd_part_parser *get_partition_parser(const char *name)
+{
+	struct mtd_part_parser *p, *ret = NULL;
+
+	spin_lock(&part_parser_lock);
+
+	list_for_each_entry(p, &part_parsers, list)
+		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+			ret = p;
+			break;
+		}
+
+	spin_unlock(&part_parser_lock);
+
+	return ret;
+}
+
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
+void register_mtd_parser(struct mtd_part_parser *p)
+{
+	spin_lock(&part_parser_lock);
+	list_add(&p->list, &part_parsers);
+	spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(register_mtd_parser);
+
+void deregister_mtd_parser(struct mtd_part_parser *p)
+{
+	spin_lock(&part_parser_lock);
+	list_del(&p->list);
+	spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char * const default_mtd_part_types[] = {
+	"cmdlinepart",
+	"ofpart",
+	NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ *   point to an array containing this number of &struct mtd_info objects.
+ */
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
+			 struct mtd_partition **pparts,
+			 struct mtd_part_parser_data *data)
+{
+	struct mtd_part_parser *parser;
+	int ret = 0;
+
+	if (!types)
+		types = default_mtd_part_types;
+
+	for ( ; ret <= 0 && *types; types++) {
+		parser = get_partition_parser(*types);
+		if (!parser && !request_module("%s", *types))
+			parser = get_partition_parser(*types);
+		if (!parser)
+			continue;
+		ret = (*parser->parse_fn)(master, pparts, data);
+		put_partition_parser(parser);
+		if (ret > 0) {
+			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+			       ret, parser->name, master->name);
+			break;
+		}
+	}
+	return ret;
+}
+#endif
+
+int mtd_is_partition(const struct mtd_info *mtd)
+{
+	struct mtd_part *part;
+	int ispart = 0;
+
+	mutex_lock(&mtd_partitions_mutex);
+	list_for_each_entry(part, &mtd_partitions, list)
+		if (&part->mtd == mtd) {
+			ispart = 1;
+			break;
+		}
+	mutex_unlock(&mtd_partitions_mutex);
+
+	return ispart;
+}
+EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+	if (!mtd_is_partition(mtd))
+		return mtd->size;
+
+	return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 2f31fc9..7e1e6ec 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -561,6 +561,7 @@
 		       len, avail);
 }
 
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 /*
  * Verify buffer against the FCM Controller Data Buffer
  */
@@ -593,6 +594,7 @@
 	ctrl->index += len;
 	return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
 }
+#endif
 
 /* This function is called after Program and Erase Operations to
  * check for success or failure.
@@ -725,7 +727,9 @@
 	nand->read_byte = fsl_elbc_read_byte;
 	nand->write_buf = fsl_elbc_write_buf;
 	nand->read_buf = fsl_elbc_read_buf;
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	nand->verify_buf = fsl_elbc_verify_buf;
+#endif
 	nand->select_chip = fsl_elbc_select_chip;
 	nand->cmdfunc = fsl_elbc_cmdfunc;
 	nand->waitfunc = fsl_elbc_wait;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 8b453cb..2f04c69 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -684,6 +684,7 @@
 		       __func__, len, avail);
 }
 
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 /*
  * Verify buffer against the IFC Controller Data Buffer
  */
@@ -716,6 +717,7 @@
 	ctrl->index += len;
 	return i == len && ctrl->status == IFC_NAND_EVTER_STAT_OPC ? 0 : -EIO;
 }
+#endif
 
 /* This function is called after Program and Erase Operations to
  * check for success or failure.
@@ -939,7 +941,9 @@
 
 	nand->write_buf = fsl_ifc_write_buf;
 	nand->read_buf = fsl_ifc_read_buf;
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	nand->verify_buf = fsl_ifc_verify_buf;
+#endif
 	nand->select_chip = fsl_ifc_select_chip;
 	nand->cmdfunc = fsl_ifc_cmdfunc;
 	nand->waitfunc = fsl_ifc_wait;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 3ae0044..65ce98a 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -153,6 +153,7 @@
 		buf[i] = in_8(chip->IO_ADDR_R);
 }
 
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 static int upm_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
 {
 	int i;
@@ -165,6 +166,7 @@
 
 	return 0;
 }
+#endif
 
 static int nand_dev_ready(struct mtd_info *mtd)
 {
@@ -191,7 +193,9 @@
 	chip->read_byte = upm_nand_read_byte;
 	chip->read_buf = upm_nand_read_buf;
 	chip->write_buf = upm_nand_write_buf;
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	chip->verify_buf = upm_nand_verify_buf;
+#endif
 	if (fun->dev_ready)
 		chip->dev_ready = nand_dev_ready;
 
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index d0f3a35..7233bfc 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -459,6 +459,7 @@
 	mpc5121_nfc_buf_copy(mtd, (u_char *) buf, len, 1);
 }
 
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 /* Compare buffer with NAND flash */
 static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
 				  const u_char * buf, int len)
@@ -479,6 +480,7 @@
 
 	return 0;
 }
+#endif
 
 /* Read byte from NFC buffers */
 static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
@@ -607,7 +609,9 @@
 	chip->read_word = mpc5121_nfc_read_word;
 	chip->read_buf = mpc5121_nfc_read_buf;
 	chip->write_buf = mpc5121_nfc_write_buf;
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	chip->verify_buf = mpc5121_nfc_verify_buf;
+#endif
 	chip->select_chip = mpc5121_nfc_select_chip;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ed0ca3a..2e5b5b9 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -949,6 +949,8 @@
 	host->col_addr = col;
 }
 
+#ifdef __UBOOT__
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 /*
  * Used by the upper layer to verify the data in NAND Flash
  * with the data in the buf.
@@ -972,6 +974,8 @@
 
 	return 0;
 }
+#endif
+#endif
 
 /*
  * This function is used by upper layer for select and
@@ -1203,7 +1207,11 @@
 	this->read_word = mxc_nand_read_word;
 	this->write_buf = mxc_nand_write_buf;
 	this->read_buf = mxc_nand_read_buf;
+#ifdef __UBOOT__
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	this->verify_buf = mxc_nand_verify_buf;
+#endif
+#endif
 
 	host->regs = (struct mxc_nand_regs __iomem *)CONFIG_MXC_NAND_REGS_BASE;
 #ifdef MXC_NFC_V3_2
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 376976d..ae61cca 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4,7 +4,6 @@
  *  Overview:
  *   This is the generic MTD driver for NAND flash devices. It should be
  *   capable of working with almost all NAND chips currently available.
- *   Basic support for AG-AND chips is provided.
  *
  *	Additional technical information is available on
  *	http://www.linux-mtd.infradead.org/doc/nand.html
@@ -22,8 +21,6 @@
  *	Enable cached programming for 2k page size chips
  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
  *	if we have HW ECC support.
- *	The AG-AND chips have nice features for speed improvement,
- *	which are not supported yet. Read / program 4 pages in one go.
  *	BBT table is not serialized, has to be fixed
  *
  * This program is free software; you can redistribute it and/or modify
@@ -32,10 +29,29 @@
  *
  */
 
+#define __UBOOT__
+#ifndef __UBOOT__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#else
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <common.h>
-
-#define ENOTSUPP	524	/* Operation is not supported */
-
 #include <malloc.h>
 #include <watchdog.h>
 #include <linux/err.h>
@@ -44,11 +60,9 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/nand_bch.h>
-
 #ifdef CONFIG_MTD_PARTITIONS
 #include <linux/mtd/partitions.h>
 #endif
-
 #include <asm/io.h>
 #include <asm/errno.h>
 
@@ -63,6 +77,9 @@
 #define CONFIG_SYS_NAND_RESET_CNT 200000
 #endif
 
+static bool is_module_text_address(unsigned long addr) {return 0;}
+#endif
+
 /* Define default oob placement schemes for large and small page devices */
 static struct nand_ecclayout nand_oob_8 = {
 	.eccbytes = 3,
@@ -107,13 +124,16 @@
 		 .length = 78} }
 };
 
-static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
-			   int new_state);
+static int nand_get_device(struct mtd_info *mtd, int new_state);
 
 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
 			     struct mtd_oob_ops *ops);
 
-static int nand_wait(struct mtd_info *mtd, struct nand_chip *this);
+/*
+ * For devices which display every fart in the system on a separate LED. Is
+ * compiled away when LED support is disabled.
+ */
+DEFINE_LED_TRIGGER(nand_led_trigger);
 
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
@@ -122,15 +142,14 @@
 	int ret = 0;
 
 	/* Start address must align on block boundary */
-	if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+		pr_debug("%s: unaligned address\n", __func__);
 		ret = -EINVAL;
 	}
 
 	/* Length must align on block boundary */
-	if (len & ((1 << chip->phys_erase_shift) - 1)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
-					__func__);
+	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+		pr_debug("%s: length not block aligned\n", __func__);
 		ret = -EINVAL;
 	}
 
@@ -141,30 +160,43 @@
  * nand_release_device - [GENERIC] release chip
  * @mtd: MTD device structure
  *
- * Deselect, release chip lock and wake up anyone waiting on the device.
+ * Release chip lock and wake up anyone waiting on the device.
  */
 static void nand_release_device(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
 
+#ifndef __UBOOT__
+	/* Release the controller and the chip */
+	spin_lock(&chip->controller->lock);
+	chip->controller->active = NULL;
+	chip->state = FL_READY;
+	wake_up(&chip->controller->wq);
+	spin_unlock(&chip->controller->lock);
+#else
 	/* De-select the NAND device */
 	chip->select_chip(mtd, -1);
+#endif
 }
 
 /**
  * nand_read_byte - [DEFAULT] read one byte from the chip
  * @mtd: MTD device structure
  *
- * Default read function for 8bit buswidth.
+ * Default read function for 8bit buswidth
  */
+#ifndef __UBOOT__
+static uint8_t nand_read_byte(struct mtd_info *mtd)
+#else
 uint8_t nand_read_byte(struct mtd_info *mtd)
+#endif
 {
 	struct nand_chip *chip = mtd->priv;
 	return readb(chip->IO_ADDR_R);
 }
 
 /**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  * @mtd: MTD device structure
  *
@@ -213,6 +245,88 @@
 }
 
 /**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+	struct nand_chip *chip = mtd->priv;
+	uint16_t word = byte;
+
+	/*
+	 * It's not entirely clear what should happen to I/O[15:8] when writing
+	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+	 *
+	 *    When the host supports a 16-bit bus width, only data is
+	 *    transferred at the 16-bit width. All address and command line
+	 *    transfers shall use only the lower 8-bits of the data bus. During
+	 *    command transfers, the host may place any value on the upper
+	 *    8-bits of the data bus. During address transfers, the host shall
+	 *    set the upper 8-bits of the data bus to 00h.
+	 *
+	 * One user of the write_byte callback is nand_onfi_set_features. The
+	 * four parameters are specified to be written to I/O[7:0], but this is
+	 * neither an address nor a command transfer. Let's assume a 0 on the
+	 * upper I/O lines is OK.
+	 */
+	chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+#if defined(__UBOOT__) && !defined(CONFIG_BLACKFIN)
+static void iowrite8_rep(void *addr, const uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		writeb(buf[i], addr);
+}
+static void ioread8_rep(void *addr, uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = readb(addr);
+}
+
+static void ioread16_rep(void *addr, void *buf, int len)
+{
+	int i;
+ 	u16 *p = (u16 *) buf;
+	len >>= 1;
+ 
+	for (i = 0; i < len; i++)
+		p[i] = readw(addr);
+}
+
+static void iowrite16_rep(void *addr, void *buf, int len)
+{
+	int i;
+        u16 *p = (u16 *) buf;
+        len >>= 1;
+
+        for (i = 0; i < len; i++)
+                writew(p[i], addr);
+}
+#endif
+
+/**
  * nand_write_buf - [DEFAULT] write buffer to chip
  * @mtd: MTD device structure
  * @buf: data buffer
@@ -220,13 +334,15 @@
  *
  * Default write function for 8bit buswidth.
  */
+#ifndef __UBOOT__
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+#else
 void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+#endif
 {
-	int i;
 	struct nand_chip *chip = mtd->priv;
 
-	for (i = 0; i < len; i++)
-		writeb(buf[i], chip->IO_ADDR_W);
+	iowrite8_rep(chip->IO_ADDR_W, buf, len);
 }
 
 /**
@@ -237,15 +353,19 @@
  *
  * Default read function for 8bit buswidth.
  */
+#ifndef __UBOOT__
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+#else
 void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+#endif
 {
-	int i;
 	struct nand_chip *chip = mtd->priv;
 
-	for (i = 0; i < len; i++)
-		buf[i] = readb(chip->IO_ADDR_R);
+	ioread8_rep(chip->IO_ADDR_R, buf, len);
 }
 
+#ifdef __UBOOT__
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 /**
  * nand_verify_buf - [DEFAULT] Verify chip data against buffer
  * @mtd: MTD device structure
@@ -266,45 +386,6 @@
 }
 
 /**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 16bit buswidth.
- */
-void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *chip = mtd->priv;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++)
-		writew(p[i], chip->IO_ADDR_W);
-
-}
-
-/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 16bit buswidth.
- */
-void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *chip = mtd->priv;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++)
-		p[i] = readw(chip->IO_ADDR_R);
-}
-
-/**
  * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
  * @mtd: MTD device structure
  * @buf: buffer containing the data to compare
@@ -325,6 +406,48 @@
 
 	return 0;
 }
+#endif
+#endif
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+#ifndef __UBOOT__
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+#else
+void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+#endif
+{
+	struct nand_chip *chip = mtd->priv;
+	u16 *p = (u16 *) buf;
+
+	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+#ifndef __UBOOT__
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+#else
+void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+#endif
+{
+	struct nand_chip *chip = mtd->priv;
+	u16 *p = (u16 *) buf;
+
+	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
+}
 
 /**
  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
@@ -348,7 +471,7 @@
 	if (getchip) {
 		chipnr = (int)(ofs >> chip->chip_shift);
 
-		nand_get_device(chip, mtd, FL_READING);
+		nand_get_device(mtd, FL_READING);
 
 		/* Select the NAND device */
 		chip->select_chip(mtd, chipnr);
@@ -378,87 +501,97 @@
 		i++;
 	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
 
-	if (getchip)
+	if (getchip) {
+		chip->select_chip(mtd, -1);
 		nand_release_device(mtd);
+	}
 
 	return res;
 }
 
 /**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  * @mtd: MTD device structure
  * @ofs: offset from device start
  *
  * This is the default implementation, which can be overridden by a hardware
- * specific driver. We try operations in the following order, according to our
- * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
- *  (1) erase the affected block, to allow OOB marker to be written cleanly
- *  (2) update in-memory BBT
- *  (3) write bad block marker to OOB area of affected block
- *  (4) update flash-based BBT
- * Note that we retain the first error encountered in (3) or (4), finish the
- * procedures, and dump the error in the end.
-*/
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
 	struct nand_chip *chip = mtd->priv;
+	struct mtd_oob_ops ops;
 	uint8_t buf[2] = { 0, 0 };
-	int block, res, ret = 0, i = 0;
-	int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
+	int ret = 0, res, i = 0;
 
-	if (write_oob) {
+	ops.datbuf = NULL;
+	ops.oobbuf = buf;
+	ops.ooboffs = chip->badblockpos;
+	if (chip->options & NAND_BUSWIDTH_16) {
+		ops.ooboffs &= ~0x01;
+		ops.len = ops.ooblen = 2;
+	} else {
+		ops.len = ops.ooblen = 1;
+	}
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	/* Write to first/last page(s) if necessary */
+	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+		ofs += mtd->erasesize - mtd->writesize;
+	do {
+		res = nand_do_write_oob(mtd, ofs, &ops);
+		if (!ret)
+			ret = res;
+
+		i++;
+		ofs += mtd->writesize;
+	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+	return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ *  (1) erase the affected block, to allow OOB marker to be written cleanly
+ *  (2) write bad block marker to OOB area of affected block (unless flag
+ *      NAND_BBT_NO_OOB_BBM is present)
+ *  (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd->priv;
+	int res, ret = 0;
+
+	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
 		struct erase_info einfo;
 
 		/* Attempt erase before marking OOB */
 		memset(&einfo, 0, sizeof(einfo));
 		einfo.mtd = mtd;
 		einfo.addr = ofs;
-		einfo.len = 1 << chip->phys_erase_shift;
+		einfo.len = 1ULL << chip->phys_erase_shift;
 		nand_erase_nand(mtd, &einfo, 0);
-	}
 
-	/* Get block number */
-	block = (int)(ofs >> chip->bbt_erase_shift);
-	/* Mark block bad in memory-based BBT */
-	if (chip->bbt)
-		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
-
-	/* Write bad block marker to OOB */
-	if (write_oob) {
-		struct mtd_oob_ops ops;
-		loff_t wr_ofs = ofs;
-
-		nand_get_device(chip, mtd, FL_WRITING);
-
-		ops.datbuf = NULL;
-		ops.oobbuf = buf;
-		ops.ooboffs = chip->badblockpos;
-		if (chip->options & NAND_BUSWIDTH_16) {
-			ops.ooboffs &= ~0x01;
-			ops.len = ops.ooblen = 2;
-		} else {
-			ops.len = ops.ooblen = 1;
-		}
-		ops.mode = MTD_OPS_PLACE_OOB;
-
-		/* Write to first/last page(s) if necessary */
-		if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
-			wr_ofs += mtd->erasesize - mtd->writesize;
-		do {
-			res = nand_do_write_oob(mtd, wr_ofs, &ops);
-			if (!ret)
-				ret = res;
-
-			i++;
-			wr_ofs += mtd->writesize;
-		} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
-
+		/* Write bad block marker to OOB */
+		nand_get_device(mtd, FL_WRITING);
+		ret = chip->block_markbad(mtd, ofs);
 		nand_release_device(mtd);
 	}
 
-	/* Update flash-based bad block table */
-	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
-		res = nand_update_bbt(mtd, ofs);
+	/* Mark block bad in BBT */
+	if (chip->bbt) {
+		res = nand_markbad_bbt(mtd, ofs);
 		if (!ret)
 			ret = res;
 	}
@@ -504,11 +637,6 @@
 {
 	struct nand_chip *chip = mtd->priv;
 
-	if (!(chip->options & NAND_BBT_SCANNED)) {
-		chip->options |= NAND_BBT_SCANNED;
-		chip->scan_bbt(mtd);
-	}
-
 	if (!chip->bbt)
 		return chip->block_bad(mtd, ofs, getchip);
 
@@ -516,22 +644,63 @@
 	return nand_isbad_bbt(mtd, ofs, allowbbt);
 }
 
+#ifndef __UBOOT__
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+	struct nand_chip *chip = mtd->priv;
+	int i;
+
+	/* Wait for the device to get ready */
+	for (i = 0; i < timeo; i++) {
+		if (chip->dev_ready(mtd))
+			break;
+		touch_softlockup_watchdog();
+		mdelay(1);
+	}
+}
+#endif
+
 /* Wait for the ready pin, after a command. The timeout is caught later. */
 void nand_wait_ready(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
+#ifndef __UBOOT__
+	unsigned long timeo = jiffies + msecs_to_jiffies(20);
+
+	/* 400ms timeout */
+	if (in_interrupt() || oops_in_progress)
+		return panic_nand_wait_ready(mtd, 400);
+
+	led_trigger_event(nand_led_trigger, LED_FULL);
+	/* Wait until command is processed or timeout occurs */
+	do {
+		if (chip->dev_ready(mtd))
+			break;
+		touch_softlockup_watchdog();
+	} while (time_before(jiffies, timeo));
+	led_trigger_event(nand_led_trigger, LED_OFF);
+#else
 	u32 timeo = (CONFIG_SYS_HZ * 20) / 1000;
 	u32 time_start;
 
 	time_start = get_timer(0);
-
 	/* Wait until command is processed or timeout occurs */
 	while (get_timer(time_start) < timeo) {
 		if (chip->dev_ready)
 			if (chip->dev_ready(mtd))
 				break;
 	}
+#endif
 }
+EXPORT_SYMBOL_GPL(nand_wait_ready);
 
 /**
  * nand_command - [DEFAULT] Send command to NAND device
@@ -541,7 +710,7 @@
  * @page_addr: the page address for this command, -1 if none
  *
  * Send command to NAND device. This function is used for small page devices
- * (256/512 Bytes per page).
+ * (512 Bytes per page).
  */
 static void nand_command(struct mtd_info *mtd, unsigned int command,
 			 int column, int page_addr)
@@ -660,8 +829,7 @@
 	}
 
 	/* Command latch cycle */
-	chip->cmd_ctrl(mtd, command & 0xff,
-		       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 
 	if (column != -1 || page_addr != -1) {
 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -701,16 +869,6 @@
 	case NAND_CMD_SEQIN:
 	case NAND_CMD_RNDIN:
 	case NAND_CMD_STATUS:
-	case NAND_CMD_DEPLETE1:
-		return;
-
-	case NAND_CMD_STATUS_ERROR:
-	case NAND_CMD_STATUS_ERROR0:
-	case NAND_CMD_STATUS_ERROR1:
-	case NAND_CMD_STATUS_ERROR2:
-	case NAND_CMD_STATUS_ERROR3:
-		/* Read error status commands require only a short delay */
-		udelay(chip->chip_delay);
 		return;
 
 	case NAND_CMD_RESET:
@@ -761,18 +919,91 @@
 }
 
 /**
- * nand_get_device - [GENERIC] Get chip for selected access
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
  * @chip: the nand chip descriptor
  * @mtd: MTD device structure
  * @new_state: the state which is requested
  *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+		      struct mtd_info *mtd, int new_state)
+{
+	/* Hardware controller shared among independent devices */
+	chip->controller->active = chip;
+	chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
  * Get the device and lock it for exclusive access
  */
 static int
-nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+nand_get_device(struct mtd_info *mtd, int new_state)
 {
+	struct nand_chip *chip = mtd->priv;
+#ifndef __UBOOT__
+	spinlock_t *lock = &chip->controller->lock;
+	wait_queue_head_t *wq = &chip->controller->wq;
+	DECLARE_WAITQUEUE(wait, current);
+retry:
+	spin_lock(lock);
+
+	/* Hardware controller shared among independent devices */
+	if (!chip->controller->active)
+		chip->controller->active = chip;
+
+	if (chip->controller->active == chip && chip->state == FL_READY) {
+		chip->state = new_state;
+		spin_unlock(lock);
+		return 0;
+	}
+	if (new_state == FL_PM_SUSPENDED) {
+		if (chip->controller->active->state == FL_PM_SUSPENDED) {
+			chip->state = FL_PM_SUSPENDED;
+			spin_unlock(lock);
+			return 0;
+		}
+	}
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	add_wait_queue(wq, &wait);
+	spin_unlock(lock);
+	schedule();
+	remove_wait_queue(wq, &wait);
+	goto retry;
+#else
 	chip->state = new_state;
 	return 0;
+#endif
+}
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+			    unsigned long timeo)
+{
+	int i;
+	for (i = 0; i < timeo; i++) {
+		if (chip->dev_ready) {
+			if (chip->dev_ready(mtd))
+				break;
+		} else {
+			if (chip->read_byte(mtd) & NAND_STATUS_READY)
+				break;
+		}
+		mdelay(1);
+	}
 }
 
 /**
@@ -786,28 +1017,42 @@
  */
 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
 {
-	unsigned long	timeo;
-	int state = chip->state;
-	u32 time_start;
 
-	if (state == FL_ERASING)
-		timeo = (CONFIG_SYS_HZ * 400) / 1000;
-	else
-		timeo = (CONFIG_SYS_HZ * 20) / 1000;
+	int status, state = chip->state;
+	unsigned long timeo = (state == FL_ERASING ? 400 : 20);
 
-	if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
-		chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
-	else
-		chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	led_trigger_event(nand_led_trigger, LED_FULL);
 
-	time_start = get_timer(0);
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in any
+	 * case on any machine.
+	 */
+	ndelay(100);
 
-	while (1) {
-		if (get_timer(time_start) > timeo) {
-			printf("Timeout!");
-			return 0x01;
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+
+#ifndef __UBOOT__
+	if (in_interrupt() || oops_in_progress)
+		panic_nand_wait(mtd, chip, timeo);
+	else {
+		timeo = jiffies + msecs_to_jiffies(timeo);
+		while (time_before(jiffies, timeo)) {
+			if (chip->dev_ready) {
+				if (chip->dev_ready(mtd))
+					break;
+			} else {
+				if (chip->read_byte(mtd) & NAND_STATUS_READY)
+					break;
+			}
+			cond_resched();
 		}
-
+	}
+#else
+ 	u32 timer = (CONFIG_SYS_HZ * timeo) / 1000;
+ 	u32 time_start;
+ 
+ 	time_start = get_timer(0);
+ 	while (get_timer(time_start) < timer) {
 		if (chip->dev_ready) {
 			if (chip->dev_ready(mtd))
 				break;
@@ -816,15 +1061,176 @@
 				break;
 		}
 	}
+#endif
 #ifdef PPCHAMELON_NAND_TIMER_HACK
 	time_start = get_timer(0);
 	while (get_timer(time_start) < 10)
 		;
 #endif /*  PPCHAMELON_NAND_TIMER_HACK */
+	led_trigger_event(nand_led_trigger, LED_OFF);
 
-	return (int)chip->read_byte(mtd);
+	status = (int)chip->read_byte(mtd);
+	/* This can happen if in case of timeout or buggy dev_ready */
+	WARN_ON(!(status & NAND_STATUS_READY));
+	return status;
 }
 
+#ifndef __UBOOT__
+/**
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ *                    upper boundary address
+ *          when = 1, unlock the range of blocks outside the boundaries
+ *                    of the lower and upper boundary address
+ *
+ * Returs unlock status.
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+					uint64_t len, int invert)
+{
+	int ret = 0;
+	int status, page;
+	struct nand_chip *chip = mtd->priv;
+
+	/* Submit address of first page to unlock */
+	page = ofs >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+	/* Submit address of last page to unlock */
+	page = (ofs + len) >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+				(page | invert) & chip->pagemask);
+
+	/* Call wait ready function */
+	status = chip->waitfunc(mtd, chip);
+	/* See if device thinks it succeeded */
+	if (status & NAND_STATUS_FAIL) {
+		pr_debug("%s: error status = 0x%08x\n",
+					__func__, status);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+/**
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * Returns unlock status.
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	int ret = 0;
+	int chipnr;
+	struct nand_chip *chip = mtd->priv;
+
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)ofs, len);
+
+	if (check_offs_len(mtd, ofs, len))
+		ret = -EINVAL;
+
+	/* Align to last block address if size addresses end of the device */
+	if (ofs + len == mtd->size)
+		len -= mtd->erasesize;
+
+	nand_get_device(mtd, FL_UNLOCKING);
+
+	/* Shift to get chip number */
+	chipnr = ofs >> chip->chip_shift;
+
+	chip->select_chip(mtd, chipnr);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		pr_debug("%s: device is write protected!\n",
+					__func__);
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	return ret;
+}
+EXPORT_SYMBOL(nand_unlock);
+
+/**
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
+ *
+ * Returns lock status.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	int ret = 0;
+	int chipnr, status, page;
+	struct nand_chip *chip = mtd->priv;
+
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)ofs, len);
+
+	if (check_offs_len(mtd, ofs, len))
+		ret = -EINVAL;
+
+	nand_get_device(mtd, FL_LOCKING);
+
+	/* Shift to get chip number */
+	chipnr = ofs >> chip->chip_shift;
+
+	chip->select_chip(mtd, chipnr);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		pr_debug("%s: device is write protected!\n",
+					__func__);
+		status = MTD_ERASE_FAILED;
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Submit address of first page to lock */
+	page = ofs >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+	/* Call wait ready function */
+	status = chip->waitfunc(mtd, chip);
+	/* See if device thinks it succeeded */
+	if (status & NAND_STATUS_FAIL) {
+		pr_debug("%s: error status = 0x%08x\n",
+					__func__, status);
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	return ret;
+}
+EXPORT_SYMBOL(nand_lock);
+#endif
+
 /**
  * nand_read_page_raw - [INTERN] read raw page data without ecc
  * @mtd: mtd info structure
@@ -906,6 +1312,7 @@
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	unsigned int max_bitflips = 0;
 
 	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
 
@@ -922,16 +1329,18 @@
 		int stat;
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
- * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @data_offs: offset of requested data within the page
@@ -948,6 +1357,7 @@
 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
 	int index = 0;
+	unsigned int max_bitflips = 0;
 
 	/* Column address within the page aligned to ECC size (256bytes) */
 	start_step = data_offs / chip->ecc.size;
@@ -1012,12 +1422,14 @@
 
 		stat = chip->ecc.correct(mtd, p,
 			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1040,6 +1452,7 @@
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	unsigned int max_bitflips = 0;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1058,12 +1471,14 @@
 		int stat;
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1090,6 +1505,7 @@
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	unsigned int max_bitflips = 0;
 
 	/* Read the OOB area first */
 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1107,12 +1523,14 @@
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1134,6 +1552,7 @@
 	int eccsteps = chip->ecc.steps;
 	uint8_t *p = buf;
 	uint8_t *oob = chip->oob_poi;
+	unsigned int max_bitflips = 0;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		int stat;
@@ -1150,10 +1569,12 @@
 		chip->read_buf(mtd, oob, eccbytes);
 		stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 
 		oob += eccbytes;
 
@@ -1168,7 +1589,7 @@
 	if (i)
 		chip->read_buf(mtd, oob, i);
 
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1220,6 +1641,30 @@
 }
 
 /**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+	if (retry_mode >= chip->read_retries)
+		return -EINVAL;
+
+	if (!chip->setup_read_retry)
+		return -EOPNOTSUPP;
+
+	return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
  * nand_do_read_ops - [INTERN] Read data with ECC
  * @mtd: MTD device structure
  * @from: offset to read from
@@ -1232,7 +1677,6 @@
 {
 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
 	struct nand_chip *chip = mtd->priv;
-	struct mtd_ecc_stats stats;
 	int ret = 0;
 	uint32_t readlen = ops->len;
 	uint32_t oobreadlen = ops->ooblen;
@@ -1241,8 +1685,8 @@
 
 	uint8_t *bufpoi, *oob, *buf;
 	unsigned int max_bitflips = 0;
-
-	stats = mtd->ecc_stats;
+	int retry_mode = 0;
+	bool ecc_fail = false;
 
 	chipnr = (int)(from >> chip->chip_shift);
 	chip->select_chip(mtd, chipnr);
@@ -1257,8 +1701,9 @@
 	oob_required = oob ? 1 : 0;
 
 	while (1) {
-		WATCHDOG_RESET();
+		unsigned int ecc_failures = mtd->ecc_stats.failed;
 
+		WATCHDOG_RESET();
 		bytes = min(mtd->writesize - col, readlen);
 		aligned = (bytes == mtd->writesize);
 
@@ -1266,6 +1711,7 @@
 		if (realpage != chip->pagebuf || oob) {
 			bufpoi = aligned ? buf : chip->buffers->databuf;
 
+read_retry:
 			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
 			/*
@@ -1277,7 +1723,7 @@
 							      oob_required,
 							      page);
 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
-			    !oob)
+				 !oob)
 				ret = chip->ecc.read_subpage(mtd, chip,
 							col, bytes, bufpoi);
 			else
@@ -1295,7 +1741,7 @@
 			/* Transfer not aligned data */
 			if (!aligned) {
 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
-				    !(mtd->ecc_stats.failed - stats.failed) &&
+				    !(mtd->ecc_stats.failed - ecc_failures) &&
 				    (ops->mode != MTD_OPS_RAW)) {
 					chip->pagebuf = realpage;
 					chip->pagebuf_bitflips = ret;
@@ -1306,8 +1752,6 @@
 				memcpy(buf, chip->buffers->databuf + col, bytes);
 			}
 
-			buf += bytes;
-
 			if (unlikely(oob)) {
 				int toread = min(oobreadlen, max_oobsize);
 
@@ -1317,6 +1761,33 @@
 					oobreadlen -= toread;
 				}
 			}
+
+			if (chip->options & NAND_NEED_READRDY) {
+				/* Apply delay or wait for ready/busy pin */
+				if (!chip->dev_ready)
+					udelay(chip->chip_delay);
+				else
+					nand_wait_ready(mtd);
+			}
+
+			if (mtd->ecc_stats.failed - ecc_failures) {
+				if (retry_mode + 1 < chip->read_retries) {
+					retry_mode++;
+					ret = nand_setup_read_retry(mtd,
+							retry_mode);
+					if (ret < 0)
+						break;
+
+					/* Reset failures; retry */
+					mtd->ecc_stats.failed = ecc_failures;
+					goto read_retry;
+				} else {
+					/* No more retry modes; real failure */
+					ecc_fail = true;
+				}
+			}
+
+			buf += bytes;
 		} else {
 			memcpy(buf, chip->buffers->databuf + col, bytes);
 			buf += bytes;
@@ -1326,6 +1797,14 @@
 
 		readlen -= bytes;
 
+		/* Reset to retry mode 0 */
+		if (retry_mode) {
+			ret = nand_setup_read_retry(mtd, 0);
+			if (ret < 0)
+				break;
+			retry_mode = 0;
+		}
+
 		if (!readlen)
 			break;
 
@@ -1342,15 +1821,16 @@
 			chip->select_chip(mtd, chipnr);
 		}
 	}
+	chip->select_chip(mtd, -1);
 
 	ops->retlen = ops->len - (size_t) readlen;
 	if (oob)
 		ops->oobretlen = ops->ooblen - oobreadlen;
 
-	if (ret)
+	if (ret < 0)
 		return ret;
 
-	if (mtd->ecc_stats.failed - stats.failed)
+	if (ecc_fail)
 		return -EBADMSG;
 
 	return max_bitflips;
@@ -1369,11 +1849,10 @@
 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
 		     size_t *retlen, uint8_t *buf)
 {
-	struct nand_chip *chip = mtd->priv;
 	struct mtd_oob_ops ops;
 	int ret;
 
-	nand_get_device(chip, mtd, FL_READING);
+	nand_get_device(mtd, FL_READING);
 	ops.len = len;
 	ops.datbuf = buf;
 	ops.oobbuf = NULL;
@@ -1537,7 +2016,7 @@
 	uint8_t *buf = ops->oobbuf;
 	int ret = 0;
 
-	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+	pr_debug("%s: from = 0x%08Lx, len = %i\n",
 			__func__, (unsigned long long)from, readlen);
 
 	stats = mtd->ecc_stats;
@@ -1548,8 +2027,8 @@
 		len = mtd->oobsize;
 
 	if (unlikely(ops->ooboffs >= len)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
-					"outside oob\n", __func__);
+		pr_debug("%s: attempt to start read outside oob\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -1557,8 +2036,8 @@
 	if (unlikely(from >= mtd->size ||
 		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
 					(from >> chip->page_shift)) * len)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
-					"of device\n", __func__);
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -1571,6 +2050,7 @@
 
 	while (1) {
 		WATCHDOG_RESET();
+
 		if (ops->mode == MTD_OPS_RAW)
 			ret = chip->ecc.read_oob_raw(mtd, chip, page);
 		else
@@ -1582,6 +2062,14 @@
 		len = min(len, readlen);
 		buf = nand_transfer_oob(chip, buf, ops, len);
 
+		if (chip->options & NAND_NEED_READRDY) {
+			/* Apply delay or wait for ready/busy pin */
+			if (!chip->dev_ready)
+				udelay(chip->chip_delay);
+			else
+				nand_wait_ready(mtd);
+		}
+
 		readlen -= len;
 		if (!readlen)
 			break;
@@ -1597,6 +2085,7 @@
 			chip->select_chip(mtd, chipnr);
 		}
 	}
+	chip->select_chip(mtd, -1);
 
 	ops->oobretlen = ops->ooblen - readlen;
 
@@ -1620,19 +2109,18 @@
 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
 			 struct mtd_oob_ops *ops)
 {
-	struct nand_chip *chip = mtd->priv;
 	int ret = -ENOTSUPP;
 
 	ops->retlen = 0;
 
 	/* Do not allow reads past end of device */
 	if (ops->datbuf && (from + ops->len) > mtd->size) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
-				"beyond end of device\n", __func__);
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
-	nand_get_device(chip, mtd, FL_READING);
+	nand_get_device(mtd, FL_READING);
 
 	switch (ops->mode) {
 	case MTD_OPS_PLACE_OOB:
@@ -1774,6 +2262,68 @@
 	return 0;
 }
 
+
+/**
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @offset:	column address of subpage within the page
+ * @data_len:	data length
+ * @buf:	data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+				struct nand_chip *chip, uint32_t offset,
+				uint32_t data_len, const uint8_t *buf,
+				int oob_required)
+{
+	uint8_t *oob_buf  = chip->oob_poi;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	int ecc_size      = chip->ecc.size;
+	int ecc_bytes     = chip->ecc.bytes;
+	int ecc_steps     = chip->ecc.steps;
+	uint32_t *eccpos  = chip->ecc.layout->eccpos;
+	uint32_t start_step = offset / ecc_size;
+	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
+	int oob_bytes       = mtd->oobsize / ecc_steps;
+	int step, i;
+
+	for (step = 0; step < ecc_steps; step++) {
+		/* configure controller for WRITE access */
+		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+		/* write data (untouched subpages already masked by 0xFF) */
+		chip->write_buf(mtd, buf, ecc_size);
+
+		/* mask ECC of un-touched subpages by padding 0xFF */
+		if ((step < start_step) || (step > end_step))
+			memset(ecc_calc, 0xff, ecc_bytes);
+		else
+			chip->ecc.calculate(mtd, buf, ecc_calc);
+
+		/* mask OOB of un-touched subpages by padding 0xFF */
+		/* if oob_required, preserve OOB metadata of written subpage */
+		if (!oob_required || (step < start_step) || (step > end_step))
+			memset(oob_buf, 0xff, oob_bytes);
+
+		buf += ecc_size;
+		ecc_calc += ecc_bytes;
+		oob_buf  += oob_bytes;
+	}
+
+	/* copy calculated ECC for whole page to chip->buffer->oob */
+	/* this include masked-value(0xFF) for unwritten subpages */
+	ecc_calc = chip->buffers->ecccalc;
+	for (i = 0; i < chip->ecc.total; i++)
+		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+	/* write OOB buffer to NAND device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+
 /**
  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  * @mtd: mtd info structure
@@ -1826,6 +2376,8 @@
  * nand_write_page - [REPLACEABLE] write one page
  * @mtd: MTD device structure
  * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
  * @buf: the data to write
  * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
@@ -1833,15 +2385,25 @@
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			   const uint8_t *buf, int oob_required, int page,
-			   int cached, int raw)
+		uint32_t offset, int data_len, const uint8_t *buf,
+		int oob_required, int page, int cached, int raw)
 {
-	int status;
+	int status, subpage;
+
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+		chip->ecc.write_subpage)
+		subpage = offset || (data_len < mtd->writesize);
+	else
+		subpage = 0;
 
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page_raw(mtd, chip, buf,
+							oob_required);
+	else if (subpage)
+		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+							 buf, oob_required);
 	else
 		status = chip->ecc.write_page(mtd, chip, buf, oob_required);
 
@@ -1854,7 +2416,7 @@
 	 */
 	cached = 0;
 
-	if (!cached || !(chip->options & NAND_CACHEPRG)) {
+	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
 
 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 		status = chip->waitfunc(mtd, chip);
@@ -1873,7 +2435,9 @@
 		status = chip->waitfunc(mtd, chip);
 	}
 
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+
+#ifdef __UBOOT__
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	/* Send command to read back the data */
 	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
 
@@ -1883,6 +2447,8 @@
 	/* Make sure the next page prog is preceded by a status read */
 	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 #endif
+#endif
+
 	return 0;
 }
 
@@ -1965,26 +2531,34 @@
 
 	uint8_t *oob = ops->oobbuf;
 	uint8_t *buf = ops->datbuf;
-	int ret, subpage;
+	int ret;
 	int oob_required = oob ? 1 : 0;
 
 	ops->retlen = 0;
 	if (!writelen)
 		return 0;
 
-	column = to & (mtd->writesize - 1);
-	subpage = column || (writelen & (mtd->writesize - 1));
-
-	if (subpage && oob)
+#ifndef __UBOOT__
+	/* Reject writes, which are not page aligned */
+	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+#else
+	/* Reject writes, which are not page aligned */
+	if (NOTALIGNED(to)) {
+#endif
+		pr_notice("%s: attempt to write non page aligned data\n",
+			   __func__);
 		return -EINVAL;
+	}
+
+	column = to & (mtd->writesize - 1);
 
 	chipnr = (int)(to >> chip->chip_shift);
 	chip->select_chip(mtd, chipnr);
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
-		printk (KERN_NOTICE "nand_do_write_ops: Device is write protected\n");
-		return -EIO;
+		ret = -EIO;
+		goto err_out;
 	}
 
 	realpage = (int)(to >> chip->page_shift);
@@ -1997,18 +2571,19 @@
 		chip->pagebuf = -1;
 
 	/* Don't allow multipage oob writes with offset */
-	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
-		return -EINVAL;
+	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+		ret = -EINVAL;
+		goto err_out;
+	}
 
 	while (1) {
-		WATCHDOG_RESET();
-
 		int bytes = mtd->writesize;
 		int cached = writelen > bytes && page != blockmask;
 		uint8_t *wbuf = buf;
 
+		WATCHDOG_RESET();
 		/* Partial page write? */
-		if (unlikely(column || writelen < mtd->writesize)) {
+		if (unlikely(column || writelen < (mtd->writesize - 1))) {
 			cached = 0;
 			bytes = min_t(int, bytes - column, (int) writelen);
 			chip->pagebuf = -1;
@@ -2025,9 +2600,9 @@
 			/* We still need to erase leftover OOB data */
 			memset(chip->oob_poi, 0xff, mtd->oobsize);
 		}
-
-		ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
-				       cached, (ops->mode == MTD_OPS_RAW));
+		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+					oob_required, page, cached,
+					(ops->mode == MTD_OPS_RAW));
 		if (ret)
 			break;
 
@@ -2051,6 +2626,44 @@
 	ops->retlen = ops->len - writelen;
 	if (unlikely(oob))
 		ops->oobretlen = ops->ooblen;
+
+err_out:
+	chip->select_chip(mtd, -1);
+	return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, const uint8_t *buf)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct mtd_oob_ops ops;
+	int ret;
+
+	/* Wait for the device to get ready */
+	panic_nand_wait(mtd, chip, 400);
+
+	/* Grab the device */
+	panic_nand_get_device(chip, mtd, FL_WRITING);
+
+	ops.len = len;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	ret = nand_do_write_ops(mtd, to, &ops);
+
+	*retlen = ops.retlen;
 	return ret;
 }
 
@@ -2067,11 +2680,10 @@
 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
 			  size_t *retlen, const uint8_t *buf)
 {
-	struct nand_chip *chip = mtd->priv;
 	struct mtd_oob_ops ops;
 	int ret;
 
-	nand_get_device(chip, mtd, FL_WRITING);
+	nand_get_device(mtd, FL_WRITING);
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
 	ops.oobbuf = NULL;
@@ -2096,7 +2708,7 @@
 	int chipnr, page, status, len;
 	struct nand_chip *chip = mtd->priv;
 
-	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+	pr_debug("%s: to = 0x%08x, len = %i\n",
 			 __func__, (unsigned int)to, (int)ops->ooblen);
 
 	if (ops->mode == MTD_OPS_AUTO_OOB)
@@ -2106,14 +2718,14 @@
 
 	/* Do not allow write past end of page */
 	if ((ops->ooboffs + ops->ooblen) > len) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
-				"past end of page\n", __func__);
+		pr_debug("%s: attempt to write past end of page\n",
+				__func__);
 		return -EINVAL;
 	}
 
 	if (unlikely(ops->ooboffs >= len)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
-				"write outside oob\n", __func__);
+		pr_debug("%s: attempt to start write outside oob\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -2122,8 +2734,8 @@
 		     ops->ooboffs + ops->ooblen >
 			((mtd->size >> chip->page_shift) -
 			 (to >> chip->page_shift)) * len)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
-				"end of device\n", __func__);
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -2142,8 +2754,10 @@
 	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
 
 	/* Check, if it is write protected */
-	if (nand_check_wp(mtd))
+	if (nand_check_wp(mtd)) {
+		chip->select_chip(mtd, -1);
 		return -EROFS;
+	}
 
 	/* Invalidate the page cache, if we write to the cached page */
 	if (page == chip->pagebuf)
@@ -2156,6 +2770,8 @@
 	else
 		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
 
+	chip->select_chip(mtd, -1);
+
 	if (status)
 		return status;
 
@@ -2173,19 +2789,18 @@
 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
 			  struct mtd_oob_ops *ops)
 {
-	struct nand_chip *chip = mtd->priv;
 	int ret = -ENOTSUPP;
 
 	ops->retlen = 0;
 
 	/* Do not allow writes past end of device */
 	if (ops->datbuf && (to + ops->len) > mtd->size) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
-				"end of device\n", __func__);
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
-	nand_get_device(chip, mtd, FL_WRITING);
+	nand_get_device(mtd, FL_WRITING);
 
 	switch (ops->mode) {
 	case MTD_OPS_PLACE_OOB:
@@ -2223,24 +2838,6 @@
 }
 
 /**
- * multi_erase_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function. Erase 4 consecutive blocks.
- */
-static void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
-	struct nand_chip *chip = mtd->priv;
-	/* Send commands to erase a block */
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
  * nand_erase - [MTD Interface] erase block(s)
  * @mtd: MTD device structure
  * @instr: erase instruction
@@ -2252,7 +2849,6 @@
 	return nand_erase_nand(mtd, instr, 0);
 }
 
-#define BBT_PAGE_MASK	0xffffff3f
 /**
  * nand_erase_nand - [INTERN] erase block(s)
  * @mtd: MTD device structure
@@ -2266,19 +2862,17 @@
 {
 	int page, status, pages_per_block, ret, chipnr;
 	struct nand_chip *chip = mtd->priv;
-	loff_t rewrite_bbt[CONFIG_SYS_NAND_MAX_CHIPS] = {0};
-	unsigned int bbt_masked_page = 0xffffffff;
 	loff_t len;
 
-	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
-				__func__, (unsigned long long)instr->addr,
-				(unsigned long long)instr->len);
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)instr->addr,
+			(unsigned long long)instr->len);
 
 	if (check_offs_len(mtd, instr->addr, instr->len))
 		return -EINVAL;
 
 	/* Grab the lock and see if the device is available */
-	nand_get_device(chip, mtd, FL_ERASING);
+	nand_get_device(mtd, FL_ERASING);
 
 	/* Shift to get first page */
 	page = (int)(instr->addr >> chip->page_shift);
@@ -2292,21 +2886,12 @@
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
-					__func__);
+		pr_debug("%s: device is write protected!\n",
+				__func__);
 		instr->state = MTD_ERASE_FAILED;
 		goto erase_exit;
 	}
 
-	/*
-	 * If BBT requires refresh, set the BBT page mask to see if the BBT
-	 * should be rewritten. Otherwise the mask is set to 0xffffffff which
-	 * can not be matched. This is also done when the bbt is actually
-	 * erased to avoid recursive updates.
-	 */
-	if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
-		bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
 	/* Loop through the pages */
 	len = instr->len;
 
@@ -2314,11 +2899,12 @@
 
 	while (len) {
 		WATCHDOG_RESET();
+
 		/* Check if we have a bad block, we do not erase bad blocks! */
-		if (!instr->scrub && nand_block_checkbad(mtd, ((loff_t) page) <<
+		if (nand_block_checkbad(mtd, ((loff_t) page) <<
 					chip->page_shift, 0, allowbbt)) {
 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
-				   __func__, page);
+				    __func__, page);
 			instr->state = MTD_ERASE_FAILED;
 			goto erase_exit;
 		}
@@ -2345,25 +2931,16 @@
 
 		/* See if block erase succeeded */
 		if (status & NAND_STATUS_FAIL) {
-			MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
-					"page 0x%08x\n", __func__, page);
+			pr_debug("%s: failed erase, page 0x%08x\n",
+					__func__, page);
 			instr->state = MTD_ERASE_FAILED;
 			instr->fail_addr =
 				((loff_t)page << chip->page_shift);
 			goto erase_exit;
 		}
 
-		/*
-		 * If BBT requires refresh, set the BBT rewrite flag to the
-		 * page being erased.
-		 */
-		if (bbt_masked_page != 0xffffffff &&
-		    (page & BBT_PAGE_MASK) == bbt_masked_page)
-			rewrite_bbt[chipnr] =
-				((loff_t)page << chip->page_shift);
-
 		/* Increment page address and decrement length */
-		len -= (1 << chip->phys_erase_shift);
+		len -= (1ULL << chip->phys_erase_shift);
 		page += pages_per_block;
 
 		/* Check, if we cross a chip boundary */
@@ -2371,15 +2948,6 @@
 			chipnr++;
 			chip->select_chip(mtd, -1);
 			chip->select_chip(mtd, chipnr);
-
-			/*
-			 * If BBT requires refresh and BBT-PERCHIP, set the BBT
-			 * page mask to see if this BBT should be rewritten.
-			 */
-			if (bbt_masked_page != 0xffffffff &&
-			    (chip->bbt_td->options & NAND_BBT_PERCHIP))
-				bbt_masked_page = chip->bbt_td->pages[chipnr] &
-					BBT_PAGE_MASK;
 		}
 	}
 	instr->state = MTD_ERASE_DONE;
@@ -2389,29 +2957,13 @@
 	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
 
 	/* Deselect and wake up anyone waiting on the device */
+	chip->select_chip(mtd, -1);
 	nand_release_device(mtd);
 
 	/* Do call back function */
 	if (!ret)
 		mtd_erase_callback(instr);
 
-	/*
-	 * If BBT requires refresh and erase was successful, rewrite any
-	 * selected bad block tables.
-	 */
-	if (bbt_masked_page == 0xffffffff || ret)
-		return ret;
-
-	for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
-		if (!rewrite_bbt[chipnr])
-			continue;
-		/* Update the BBT for chip */
-		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
-			"(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
-			rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
-		nand_update_bbt(mtd, rewrite_bbt[chipnr]);
-	}
-
 	/* Return more or less happy */
 	return ret;
 }
@@ -2424,12 +2976,10 @@
  */
 static void nand_sync(struct mtd_info *mtd)
 {
-	struct nand_chip *chip = mtd->priv;
-
-	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+	pr_debug("%s: called\n", __func__);
 
 	/* Grab the lock and see if the device is available */
-	nand_get_device(chip, mtd, FL_SYNCING);
+	nand_get_device(mtd, FL_SYNCING);
 	/* Release it and go back */
 	nand_release_device(mtd);
 }
@@ -2451,7 +3001,6 @@
  */
 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-	struct nand_chip *chip = mtd->priv;
 	int ret;
 
 	ret = nand_block_isbad(mtd, ofs);
@@ -2462,10 +3011,10 @@
 		return ret;
 	}
 
-	return chip->block_markbad(mtd, ofs);
+	return nand_block_markbad_lowlevel(mtd, ofs);
 }
 
- /**
+/**
  * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
  * @mtd: MTD device structure
  * @chip: nand chip info structure
@@ -2476,12 +3025,19 @@
 			int addr, uint8_t *subfeature_param)
 {
 	int status;
+	int i;
 
-	if (!chip->onfi_version)
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
 		return -EINVAL;
+#endif
 
 	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
-	chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		chip->write_byte(mtd, subfeature_param[i]);
+
 	status = chip->waitfunc(mtd, chip);
 	if (status & NAND_STATUS_FAIL)
 		return -EIO;
@@ -2498,17 +3054,50 @@
 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
 			int addr, uint8_t *subfeature_param)
 {
-	if (!chip->onfi_version)
+	int i;
+
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
 		return -EINVAL;
+#endif
 
 	/* clear the sub feature parameters */
 	memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
 
 	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
-	chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		*subfeature_param++ = chip->read_byte(mtd);
 	return 0;
 }
 
+#ifndef __UBOOT__
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+	return nand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	if (chip->state == FL_PM_SUSPENDED)
+		nand_release_device(mtd);
+	else
+		pr_err("%s called for a chip which is not in suspended state\n",
+			__func__);
+}
+#endif
+
 /* Set default functions */
 static void nand_set_defaults(struct nand_chip *chip, int busw)
 {
@@ -2526,7 +3115,15 @@
 
 	if (!chip->select_chip)
 		chip->select_chip = nand_select_chip;
-	if (!chip->read_byte)
+
+	/* set for ONFI nand */
+	if (!chip->onfi_set_features)
+		chip->onfi_set_features = nand_onfi_set_features;
+	if (!chip->onfi_get_features)
+		chip->onfi_get_features = nand_onfi_get_features;
+
+	/* If called twice, pointers that depend on busw may need to be reset */
+	if (!chip->read_byte || chip->read_byte == nand_read_byte)
 		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
 	if (!chip->read_word)
 		chip->read_word = nand_read_word;
@@ -2534,21 +3131,36 @@
 		chip->block_bad = nand_block_bad;
 	if (!chip->block_markbad)
 		chip->block_markbad = nand_default_block_markbad;
-	if (!chip->write_buf)
+	if (!chip->write_buf || chip->write_buf == nand_write_buf)
 		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
-	if (!chip->read_buf)
+	if (!chip->write_byte || chip->write_byte == nand_write_byte)
+		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+	if (!chip->read_buf || chip->read_buf == nand_read_buf)
 		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
-	if (!chip->verify_buf)
-		chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
 	if (!chip->scan_bbt)
 		chip->scan_bbt = nand_default_bbt;
-	if (!chip->controller)
+#ifdef __UBOOT__
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
+	if (!chip->verify_buf)
+		chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
+#endif
+#endif
+
+	if (!chip->controller) {
 		chip->controller = &chip->hwcontrol;
+		spin_lock_init(&chip->controller->lock);
+		init_waitqueue_head(&chip->controller->wq);
+	}
+
 }
 
 #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
 /* Sanitize ONFI strings so we can safely print them */
+#ifndef __UBOOT__
+static void sanitize_string(uint8_t *s, size_t len)
+#else
 static void sanitize_string(char *s, size_t len)
+#endif
 {
 	ssize_t i;
 
@@ -2577,6 +3189,105 @@
 	return crc;
 }
 
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+		struct nand_chip *chip, struct nand_onfi_params *p)
+{
+	struct onfi_ext_param_page *ep;
+	struct onfi_ext_section *s;
+	struct onfi_ext_ecc_info *ecc;
+	uint8_t *cursor;
+	int ret = -EINVAL;
+	int len;
+	int i;
+
+	len = le16_to_cpu(p->ext_param_page_length) * 16;
+	ep = kmalloc(len, GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	/* Send our own NAND_CMD_PARAM. */
+	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+	/* Use the Change Read Column command to skip the ONFI param pages. */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			sizeof(*p) * p->num_of_param_pages , -1);
+
+	/* Read out the Extended Parameter Page. */
+	chip->read_buf(mtd, (uint8_t *)ep, len);
+	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+		!= le16_to_cpu(ep->crc))) {
+		pr_debug("fail in the CRC.\n");
+		goto ext_out;
+	}
+
+	/*
+	 * Check the signature.
+	 * Do not strictly follow the ONFI spec, maybe changed in future.
+	 */
+#ifndef __UBOOT__
+	if (strncmp(ep->sig, "EPPS", 4)) {
+#else
+	if (strncmp((char *)ep->sig, "EPPS", 4)) {
+#endif
+		pr_debug("The signature is invalid.\n");
+		goto ext_out;
+	}
+
+	/* find the ECC section. */
+	cursor = (uint8_t *)(ep + 1);
+	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+		s = ep->sections + i;
+		if (s->type == ONFI_SECTION_TYPE_2)
+			break;
+		cursor += s->length * 16;
+	}
+	if (i == ONFI_EXT_SECTION_MAX) {
+		pr_debug("We can not find the ECC section.\n");
+		goto ext_out;
+	}
+
+	/* get the info we want. */
+	ecc = (struct onfi_ext_ecc_info *)cursor;
+
+	if (!ecc->codeword_size) {
+		pr_debug("Invalid codeword size\n");
+		goto ext_out;
+	}
+
+	chip->ecc_strength_ds = ecc->ecc_bits;
+	chip->ecc_step_ds = 1 << ecc->codeword_size;
+	ret = 0;
+
+ext_out:
+	kfree(ep);
+	return ret;
+}
+
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+	struct nand_chip *chip = mtd->priv;
+	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+			feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+		struct nand_onfi_params *p)
+{
+	struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+	if (le16_to_cpu(p->vendor_revision) < 1)
+		return;
+
+	chip->read_retries = micron->read_retry_options;
+	chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
 /*
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
@@ -2593,19 +3304,29 @@
 		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
 		return 0;
 
+	/*
+	 * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
+	 * with NAND_BUSWIDTH_16
+	 */
+	if (chip->options & NAND_BUSWIDTH_16) {
+		pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
+		return 0;
+	}
+
 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
 	for (i = 0; i < 3; i++) {
 		for (j = 0; j < sizeof(*p); j++)
 			((uint8_t *)p)[j] = chip->read_byte(mtd);
 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
 				le16_to_cpu(p->crc)) {
-			pr_info("ONFI param page %d valid\n", i);
 			break;
 		}
 	}
 
-	if (i == 3)
+	if (i == 3) {
+		pr_err("Could not find valid ONFI parameter page; aborting\n");
 		return 0;
+	}
 
 	/* Check version */
 	val = le16_to_cpu(p->revision);
@@ -2619,11 +3340,9 @@
 		chip->onfi_version = 20;
 	else if (val & (1 << 1))
 		chip->onfi_version = 10;
-	else
-		chip->onfi_version = 0;
 
 	if (!chip->onfi_version) {
-		pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
+		pr_info("unsupported ONFI version: %d\n", val);
 		return 0;
 	}
 
@@ -2631,21 +3350,58 @@
 	sanitize_string(p->model, sizeof(p->model));
 	if (!mtd->name)
 		mtd->name = p->model;
-	mtd->writesize = le32_to_cpu(p->byte_per_page);
-	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
-	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-	chip->chipsize = le32_to_cpu(p->blocks_per_lun);
-	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
-	*busw = 0;
-	if (le16_to_cpu(p->features) & 1)
-		*busw = NAND_BUSWIDTH_16;
 
-	pr_info("ONFI flash detected\n");
+	mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+	/*
+	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
+	 * (don't ask me who thought of this...). MTD assumes that these
+	 * dimensions will be power-of-2, so just truncate the remaining area.
+	 */
+	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+	mtd->erasesize *= mtd->writesize;
+
+	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+	/* See erasesize comment */
+	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+	chip->bits_per_cell = p->bits_per_cell;
+
+	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+		*busw = NAND_BUSWIDTH_16;
+	else
+		*busw = 0;
+
+	if (p->ecc_bits != 0xff) {
+		chip->ecc_strength_ds = p->ecc_bits;
+		chip->ecc_step_ds = 512;
+	} else if (chip->onfi_version >= 21 &&
+		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+		/*
+		 * The nand_flash_detect_ext_param_page() uses the
+		 * Change Read Column command which maybe not supported
+		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
+		 * now. We do not replace user supplied command function.
+		 */
+		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+			chip->cmdfunc = nand_command_lp;
+
+		/* The Extended Parameter Page is supported since ONFI 2.1. */
+		if (nand_flash_detect_ext_param_page(mtd, chip, p))
+			pr_warn("Failed to detect ONFI extended param page\n");
+	} else {
+		pr_warn("Could not retrieve ONFI ECC requirements\n");
+	}
+
+	if (p->jedec_id == NAND_MFR_MICRON)
+		nand_onfi_detect_micron(chip, p);
+
 	return 1;
 }
 #else
-static inline int nand_flash_detect_onfi(struct mtd_info *mtd,
-					struct nand_chip *chip,
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
 					int *busw)
 {
 	return 0;
@@ -2660,7 +3416,7 @@
  *
  * Check if an ID string is repeated within a given sequence of bytes at
  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 2). This is a helper function for nand_id_len(). Returns non-zero
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
  * if the repetition has a period of @period; otherwise, returns zero.
  */
 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
@@ -2711,6 +3467,16 @@
 	return arrlen;
 }
 
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+	int bits;
+
+	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+	bits >>= NAND_CI_CELLTYPE_SHIFT;
+	return bits + 1;
+}
+
 /*
  * Many new NAND share similar device ID codes, which represent the size of the
  * chip. The rest of the parameters must be decoded according to generic or
@@ -2721,7 +3487,7 @@
 {
 	int extid, id_len;
 	/* The 3rd id byte holds MLC / multichip data */
-	chip->cellinfo = id_data[2];
+	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
 	/* The 4th id byte is the important one */
 	extid = id_data[3];
 
@@ -2737,8 +3503,7 @@
 	 * ID to decide what to do.
 	 */
 	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
-			(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
-			id_data[5] != 0x00) {
+			!nand_is_slc(chip) && id_data[5] != 0x00) {
 		/* Calc pagesize */
 		mtd->writesize = 2048 << (extid & 0x03);
 		extid >>= 2;
@@ -2760,9 +3525,12 @@
 			mtd->oobsize = 512;
 			break;
 		case 6:
-		default: /* Other cases are "reserved" (unknown) */
 			mtd->oobsize = 640;
 			break;
+		case 7:
+		default: /* Other cases are "reserved" (unknown) */
+			mtd->oobsize = 1024;
+			break;
 		}
 		extid >>= 2;
 		/* Calc blocksize */
@@ -2770,7 +3538,7 @@
 			(((extid >> 1) & 0x04) | (extid & 0x03));
 		*busw = 0;
 	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
-			(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+			!nand_is_slc(chip)) {
 		unsigned int tmp;
 
 		/* Calc pagesize */
@@ -2823,16 +3591,32 @@
 		extid >>= 2;
 		/* Get buswidth information */
 		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+		/*
+		 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+		 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+		 * follows:
+		 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+		 *                         110b -> 24nm
+		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
+		 */
+		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+				nand_is_slc(chip) &&
+				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+				!(id_data[4] & 0x80) /* !BENAND */) {
+			mtd->oobsize = 32 * mtd->writesize >> 9;
+		}
+
 	}
 }
 
- /*
+/*
  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
  * decodes a matching ID table entry and assigns the MTD size parameters for
  * the chip.
  */
 static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
-				const struct nand_flash_dev *type, u8 id_data[8],
+				struct nand_flash_dev *type, u8 id_data[8],
 				int *busw)
 {
 	int maf_id = id_data[0];
@@ -2842,6 +3626,9 @@
 	mtd->oobsize = mtd->writesize / 32;
 	*busw = type->options & NAND_BUSWIDTH_16;
 
+	/* All legacy ID NAND are small-page, SLC */
+	chip->bits_per_cell = 1;
+
 	/*
 	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
 	 * some Spansion chips have erasesize that conflicts with size
@@ -2856,7 +3643,7 @@
 	}
 }
 
- /*
+/*
  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
  * heuristic patterns using various detected parameters (e.g., manufacturer,
  * page size, cell-type information).
@@ -2878,11 +3665,11 @@
 	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
 	 * AMD/Spansion, and Macronix.  All others scan only the first page.
 	 */
-	if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+	if (!nand_is_slc(chip) &&
 			(maf_id == NAND_MFR_SAMSUNG ||
 			 maf_id == NAND_MFR_HYNIX))
 		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
-	else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+	else if ((nand_is_slc(chip) &&
 				(maf_id == NAND_MFR_SAMSUNG ||
 				 maf_id == NAND_MFR_HYNIX ||
 				 maf_id == NAND_MFR_TOSHIBA ||
@@ -2893,16 +3680,48 @@
 		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 }
 
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+	return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+		   struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+#ifndef __UBOOT__
+	if (!strncmp(type->id, id_data, type->id_len)) {
+#else
+	if (!strncmp((char *)type->id, (char *)id_data, type->id_len)) {
+#endif
+		mtd->writesize = type->pagesize;
+		mtd->erasesize = type->erasesize;
+		mtd->oobsize = type->oobsize;
+
+		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+		chip->chipsize = (uint64_t)type->chipsize << 20;
+		chip->options |= type->options;
+		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+		chip->ecc_step_ds = NAND_ECC_STEP(type);
+
+		*busw = type->options & NAND_BUSWIDTH_16;
+
+		if (!mtd->name)
+			mtd->name = type->name;
+
+		return true;
+	}
+	return false;
+}
+
 /*
  * Get the flash and manufacturer id and lookup if the type is supported.
  */
-static const struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 						  struct nand_chip *chip,
 						  int busw,
 						  int *maf_id, int *dev_id,
-						  const struct nand_flash_dev *type)
+						  struct nand_flash_dev *type)
 {
-	const char *name;
 	int i, maf_idx;
 	u8 id_data[8];
 
@@ -2936,8 +3755,7 @@
 		id_data[i] = chip->read_byte(mtd);
 
 	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
-		pr_info("%s: second ID read did not match "
-			"%02x,%02x against %02x,%02x\n", __func__,
+		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
 			*maf_id, *dev_id, id_data[0], id_data[1]);
 		return ERR_PTR(-ENODEV);
 	}
@@ -2945,9 +3763,14 @@
 	if (!type)
 		type = nand_flash_ids;
 
-	for (; type->name != NULL; type++)
-		if (*dev_id == type->id)
-			break;
+	for (; type->name != NULL; type++) {
+		if (is_full_id_nand(type)) {
+			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+				goto ident_done;
+		} else if (*dev_id == type->dev_id) {
+				break;
+		}
+	}
 
 	chip->onfi_version = 0;
 	if (!type->name || !type->pagesize) {
@@ -2973,7 +3796,7 @@
 	} else {
 		nand_decode_id(mtd, chip, type, id_data, &busw);
 	}
-	/* Get chip options, preserve non chip based options */
+	/* Get chip options */
 	chip->options |= type->options;
 
 	/*
@@ -2990,15 +3813,19 @@
 			break;
 	}
 
-	/*
-	 * Check, if buswidth is correct. Hardware drivers should set
-	 * chip correct!
-	 */
-	if (busw != (chip->options & NAND_BUSWIDTH_16)) {
-		pr_info("NAND device: Manufacturer ID:"
-			" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
-			*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
-		pr_warn("NAND bus width %d instead %d bit\n",
+	if (chip->options & NAND_BUSWIDTH_AUTO) {
+		WARN_ON(chip->options & NAND_BUSWIDTH_16);
+		chip->options |= busw;
+		nand_set_defaults(chip, busw);
+	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+		/*
+		 * Check, if buswidth is correct. Hardware drivers should set
+		 * chip correct!
+		 */
+		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+			*maf_id, *dev_id);
+		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+		pr_warn("bus width %d instead %d bit\n",
 			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
 			   busw ? 16 : 8);
 		return ERR_PTR(-EINVAL);
@@ -3021,28 +3848,23 @@
 	}
 
 	chip->badblockbits = 8;
-
-	/* Check for AND chips with 4 page planes */
-	if (chip->options & NAND_4PAGE_ARRAY)
-		chip->erase_cmd = multi_erase_cmd;
-	else
-		chip->erase_cmd = single_erase_cmd;
+	chip->erase_cmd = single_erase_cmd;
 
 	/* Do not replace user supplied command function! */
 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
 		chip->cmdfunc = nand_command_lp;
 
-	name = type->name;
+	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+		*maf_id, *dev_id);
 #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
-	if (chip->onfi_version)
-		name = chip->onfi_params.model;
+	pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+		chip->onfi_version ? chip->onfi_params.model : type->name);
+#else
+	pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, type->name);
 #endif
-	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
-		" page size: %d, OOB size: %d\n",
-		*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
-		name,
+	pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
+		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
 		mtd->writesize, mtd->oobsize);
-
 	return type;
 }
 
@@ -3058,11 +3880,11 @@
  * The mtd->owner field must be set to the module of the caller.
  */
 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
-		    const struct nand_flash_dev *table)
+		    struct nand_flash_dev *table)
 {
 	int i, busw, nand_maf_id, nand_dev_id;
 	struct nand_chip *chip = mtd->priv;
-	const struct nand_flash_dev *type;
+	struct nand_flash_dev *type;
 
 	/* Get buswidth to select the correct functions */
 	busw = chip->options & NAND_BUSWIDTH_16;
@@ -3074,13 +3896,14 @@
 				&nand_maf_id, &nand_dev_id, table);
 
 	if (IS_ERR(type)) {
-#ifndef CONFIG_SYS_NAND_QUIET_TEST
-		pr_warn("No NAND device found\n");
-#endif
+		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+			pr_warn("No NAND device found\n");
 		chip->select_chip(mtd, -1);
 		return PTR_ERR(type);
 	}
 
+	chip->select_chip(mtd, -1);
+
 	/* Check for a chip array */
 	for (i = 1; i < maxchips; i++) {
 		chip->select_chip(mtd, i);
@@ -3090,12 +3913,16 @@
 		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 		/* Read manufacturer and device IDs */
 		if (nand_maf_id != chip->read_byte(mtd) ||
-		    nand_dev_id != chip->read_byte(mtd))
+		    nand_dev_id != chip->read_byte(mtd)) {
+			chip->select_chip(mtd, -1);
 			break;
+		}
+		chip->select_chip(mtd, -1);
 	}
+
 #ifdef DEBUG
 	if (i > 1)
-		pr_info("%d NAND chips detected\n", i);
+		pr_info("%d chips detected\n", i);
 #endif
 
 	/* Store the number of chips and calc total size for mtd */
@@ -3104,6 +3931,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(nand_scan_ident);
 
 
 /**
@@ -3118,14 +3946,14 @@
 {
 	int i;
 	struct nand_chip *chip = mtd->priv;
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
 
 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
 	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
 			!(chip->bbt_options & NAND_BBT_USE_FLASH));
 
 	if (!(chip->options & NAND_OWN_BUFFERS))
-		chip->buffers = memalign(ARCH_DMA_MINALIGN,
-					 sizeof(*chip->buffers));
+		chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
 	if (!chip->buffers)
 		return -ENOMEM;
 
@@ -3135,94 +3963,91 @@
 	/*
 	 * If no default placement scheme is given, select an appropriate one.
 	 */
-	if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+	if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
 		switch (mtd->oobsize) {
 		case 8:
-			chip->ecc.layout = &nand_oob_8;
+			ecc->layout = &nand_oob_8;
 			break;
 		case 16:
-			chip->ecc.layout = &nand_oob_16;
+			ecc->layout = &nand_oob_16;
 			break;
 		case 64:
-			chip->ecc.layout = &nand_oob_64;
+			ecc->layout = &nand_oob_64;
 			break;
 		case 128:
-			chip->ecc.layout = &nand_oob_128;
+			ecc->layout = &nand_oob_128;
 			break;
 		default:
 			pr_warn("No oob scheme defined for oobsize %d\n",
 				   mtd->oobsize);
+			BUG();
 		}
 	}
 
 	if (!chip->write_page)
 		chip->write_page = nand_write_page;
 
-	/* set for ONFI nand */
-	if (!chip->onfi_set_features)
-		chip->onfi_set_features = nand_onfi_set_features;
-	if (!chip->onfi_get_features)
-		chip->onfi_get_features = nand_onfi_get_features;
-
 	/*
 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
 	 * selected and we have 256 byte pagesize fallback to software ECC
 	 */
 
-	switch (chip->ecc.mode) {
+	switch (ecc->mode) {
 	case NAND_ECC_HW_OOB_FIRST:
 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
-		if (!chip->ecc.calculate || !chip->ecc.correct ||
-		     !chip->ecc.hwctl) {
+		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
 			pr_warn("No ECC functions supplied; "
 				   "hardware ECC not possible\n");
 			BUG();
 		}
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc_oob_first;
 
 	case NAND_ECC_HW:
 		/* Use standard hwecc read page function? */
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_hwecc;
-		if (!chip->ecc.write_page)
-			chip->ecc.write_page = nand_write_page_hwecc;
-		if (!chip->ecc.read_page_raw)
-			chip->ecc.read_page_raw = nand_read_page_raw;
-		if (!chip->ecc.write_page_raw)
-			chip->ecc.write_page_raw = nand_write_page_raw;
-		if (!chip->ecc.read_oob)
-			chip->ecc.read_oob = nand_read_oob_std;
-		if (!chip->ecc.write_oob)
-			chip->ecc.write_oob = nand_write_oob_std;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_hwecc;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_std;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_std;
+		if (!ecc->read_subpage)
+			ecc->read_subpage = nand_read_subpage;
+		if (!ecc->write_subpage)
+			ecc->write_subpage = nand_write_subpage_hwecc;
 
 	case NAND_ECC_HW_SYNDROME:
-		if ((!chip->ecc.calculate || !chip->ecc.correct ||
-		     !chip->ecc.hwctl) &&
-		    (!chip->ecc.read_page ||
-		     chip->ecc.read_page == nand_read_page_hwecc ||
-		     !chip->ecc.write_page ||
-		     chip->ecc.write_page == nand_write_page_hwecc)) {
+		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+		    (!ecc->read_page ||
+		     ecc->read_page == nand_read_page_hwecc ||
+		     !ecc->write_page ||
+		     ecc->write_page == nand_write_page_hwecc)) {
 			pr_warn("No ECC functions supplied; "
 				   "hardware ECC not possible\n");
 			BUG();
 		}
 		/* Use standard syndrome read/write page function? */
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_syndrome;
-		if (!chip->ecc.write_page)
-			chip->ecc.write_page = nand_write_page_syndrome;
-		if (!chip->ecc.read_page_raw)
-			chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
-		if (!chip->ecc.write_page_raw)
-			chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
-		if (!chip->ecc.read_oob)
-			chip->ecc.read_oob = nand_read_oob_syndrome;
-		if (!chip->ecc.write_oob)
-			chip->ecc.write_oob = nand_write_oob_syndrome;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_syndrome;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_syndrome;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw_syndrome;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw_syndrome;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_syndrome;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_syndrome;
 
-		if (mtd->writesize >= chip->ecc.size) {
-			if (!chip->ecc.strength) {
+		if (mtd->writesize >= ecc->size) {
+			if (!ecc->strength) {
 				pr_warn("Driver must set ecc.strength when using hardware ECC\n");
 				BUG();
 			}
@@ -3230,109 +4055,107 @@
 		}
 		pr_warn("%d byte HW ECC not possible on "
 			   "%d byte page size, fallback to SW ECC\n",
-			   chip->ecc.size, mtd->writesize);
-		chip->ecc.mode = NAND_ECC_SOFT;
+			   ecc->size, mtd->writesize);
+		ecc->mode = NAND_ECC_SOFT;
 
 	case NAND_ECC_SOFT:
-		chip->ecc.calculate = nand_calculate_ecc;
-		chip->ecc.correct = nand_correct_data;
-		chip->ecc.read_page = nand_read_page_swecc;
-		chip->ecc.read_subpage = nand_read_subpage;
-		chip->ecc.write_page = nand_write_page_swecc;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.write_oob = nand_write_oob_std;
-		if (!chip->ecc.size)
-			chip->ecc.size = 256;
-		chip->ecc.bytes = 3;
-		chip->ecc.strength = 1;
+		ecc->calculate = nand_calculate_ecc;
+		ecc->correct = nand_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+		if (!ecc->size)
+			ecc->size = 256;
+		ecc->bytes = 3;
+		ecc->strength = 1;
 		break;
 
 	case NAND_ECC_SOFT_BCH:
 		if (!mtd_nand_has_bch()) {
 			pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
-			return -EINVAL;
+			BUG();
 		}
-		chip->ecc.calculate = nand_bch_calculate_ecc;
-		chip->ecc.correct = nand_bch_correct_data;
-		chip->ecc.read_page = nand_read_page_swecc;
-		chip->ecc.read_subpage = nand_read_subpage;
-		chip->ecc.write_page = nand_write_page_swecc;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.write_oob = nand_write_oob_std;
+		ecc->calculate = nand_bch_calculate_ecc;
+		ecc->correct = nand_bch_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
 		/*
 		 * Board driver should supply ecc.size and ecc.bytes values to
 		 * select how many bits are correctable; see nand_bch_init()
 		 * for details. Otherwise, default to 4 bits for large page
 		 * devices.
 		 */
-		if (!chip->ecc.size && (mtd->oobsize >= 64)) {
-			chip->ecc.size = 512;
-			chip->ecc.bytes = 7;
+		if (!ecc->size && (mtd->oobsize >= 64)) {
+			ecc->size = 512;
+			ecc->bytes = 7;
 		}
-		chip->ecc.priv = nand_bch_init(mtd,
-					       chip->ecc.size,
-					       chip->ecc.bytes,
-					       &chip->ecc.layout);
-		if (!chip->ecc.priv)
+		ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
+					       &ecc->layout);
+		if (!ecc->priv) {
 			pr_warn("BCH ECC initialization failed!\n");
- 		chip->ecc.strength =
-			chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
+			BUG();
+		}
+		ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
 		break;
 
 	case NAND_ECC_NONE:
 		pr_warn("NAND_ECC_NONE selected by board driver. "
-			"This is not recommended !!\n");
-		chip->ecc.read_page = nand_read_page_raw;
-		chip->ecc.write_page = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.write_oob = nand_write_oob_std;
-		chip->ecc.size = mtd->writesize;
-		chip->ecc.bytes = 0;
+			   "This is not recommended!\n");
+		ecc->read_page = nand_read_page_raw;
+		ecc->write_page = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->write_oob = nand_write_oob_std;
+		ecc->size = mtd->writesize;
+		ecc->bytes = 0;
+		ecc->strength = 0;
 		break;
 
 	default:
-		pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
+		pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
 		BUG();
 	}
 
 	/* For many systems, the standard OOB write also works for raw */
-	if (!chip->ecc.read_oob_raw)
-		chip->ecc.read_oob_raw = chip->ecc.read_oob;
-	if (!chip->ecc.write_oob_raw)
-		chip->ecc.write_oob_raw = chip->ecc.write_oob;
+	if (!ecc->read_oob_raw)
+		ecc->read_oob_raw = ecc->read_oob;
+	if (!ecc->write_oob_raw)
+		ecc->write_oob_raw = ecc->write_oob;
 
 	/*
 	 * The number of bytes available for a client to place data into
 	 * the out of band area.
 	 */
-	chip->ecc.layout->oobavail = 0;
-	for (i = 0; chip->ecc.layout->oobfree[i].length
-			&& i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
-		chip->ecc.layout->oobavail +=
-			chip->ecc.layout->oobfree[i].length;
-	mtd->oobavail = chip->ecc.layout->oobavail;
+	ecc->layout->oobavail = 0;
+	for (i = 0; ecc->layout->oobfree[i].length
+			&& i < ARRAY_SIZE(ecc->layout->oobfree); i++)
+		ecc->layout->oobavail += ecc->layout->oobfree[i].length;
+	mtd->oobavail = ecc->layout->oobavail;
 
 	/*
 	 * Set the number of read / write steps for one page depending on ECC
 	 * mode.
 	 */
-	chip->ecc.steps = mtd->writesize / chip->ecc.size;
-	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+	ecc->steps = mtd->writesize / ecc->size;
+	if (ecc->steps * ecc->size != mtd->writesize) {
 		pr_warn("Invalid ECC parameters\n");
 		BUG();
 	}
-	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+	ecc->total = ecc->steps * ecc->bytes;
 
 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
-	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
-	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
-		switch (chip->ecc.steps) {
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+		switch (ecc->steps) {
 		case 2:
 			mtd->subpage_sft = 1;
 			break;
@@ -3348,36 +4171,42 @@
 	/* Initialize state */
 	chip->state = FL_READY;
 
-	/* De-select the device */
-	chip->select_chip(mtd, -1);
-
 	/* Invalidate the pagebuffer reference */
 	chip->pagebuf = -1;
 
 	/* Large page NAND with SOFT_ECC should support subpage reads */
-	if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
+	if ((ecc->mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
 		chip->options |= NAND_SUBPAGE_READ;
 
 	/* Fill in remaining MTD driver data */
-	mtd->type = MTD_NANDFLASH;
+	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
 						MTD_CAP_NANDFLASH;
 	mtd->_erase = nand_erase;
+#ifndef __UBOOT__
 	mtd->_point = NULL;
 	mtd->_unpoint = NULL;
+#endif
 	mtd->_read = nand_read;
 	mtd->_write = nand_write;
+	mtd->_panic_write = panic_nand_write;
 	mtd->_read_oob = nand_read_oob;
 	mtd->_write_oob = nand_write_oob;
 	mtd->_sync = nand_sync;
 	mtd->_lock = NULL;
 	mtd->_unlock = NULL;
+#ifndef __UBOOT__
+	mtd->_suspend = nand_suspend;
+	mtd->_resume = nand_resume;
+#endif
 	mtd->_block_isbad = nand_block_isbad;
 	mtd->_block_markbad = nand_block_markbad;
+	mtd->writebufsize = mtd->writesize;
 
 	/* propagate ecc info to mtd_info */
-	mtd->ecclayout = chip->ecc.layout;
-	mtd->ecc_strength = chip->ecc.strength;
+	mtd->ecclayout = ecc->layout;
+	mtd->ecc_strength = ecc->strength;
+	mtd->ecc_step_size = ecc->size;
 	/*
 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
@@ -3388,10 +4217,24 @@
 
 	/* Check, if we should skip the bad block table scan */
 	if (chip->options & NAND_SKIP_BBTSCAN)
-		chip->options |= NAND_BBT_SCANNED;
+		return 0;
 
-	return 0;
+	/* Build bad block table */
+	return chip->scan_bbt(mtd);
 }
+EXPORT_SYMBOL(nand_scan_tail);
+
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
+ * test if this is a module _anyway_ -- they'd have to try _really_ hard
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
+#ifdef MODULE
+#define caller_is_module() (1)
+#else
+#define caller_is_module() \
+	is_module_text_address((unsigned long)__builtin_return_address(0))
+#endif
 
 /**
  * nand_scan - [NAND Interface] Scan for the NAND device
@@ -3407,12 +4250,20 @@
 {
 	int ret;
 
+	/* Many callers got this wrong, so check for it for a while... */
+	if (!mtd->owner && caller_is_module()) {
+		pr_crit("%s called with NULL mtd->owner!\n", __func__);
+		BUG();
+	}
+
 	ret = nand_scan_ident(mtd, maxchips, NULL);
 	if (!ret)
 		ret = nand_scan_tail(mtd);
 	return ret;
 }
+EXPORT_SYMBOL(nand_scan);
 
+#ifndef __UBOOT__
 /**
  * nand_release - [NAND Interface] Free resources held by the NAND device
  * @mtd: MTD device structure
@@ -3424,10 +4275,7 @@
 	if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
 
-#ifdef CONFIG_MTD_PARTITIONS
-	/* Deregister partitions */
-	del_mtd_partitions(mtd);
-#endif
+	mtd_device_unregister(mtd);
 
 	/* Free bad block table memory */
 	kfree(chip->bbt);
@@ -3439,3 +4287,24 @@
 			& NAND_BBT_DYNAMICSTRUCT)
 		kfree(chip->badblock_pattern);
 }
+EXPORT_SYMBOL_GPL(nand_release);
+
+static int __init nand_base_init(void)
+{
+	led_trigger_register_simple("nand-disk", &nand_led_trigger);
+	return 0;
+}
+
+static void __exit nand_base_exit(void)
+{
+	led_trigger_unregister_simple(nand_led_trigger);
+}
+#endif
+
+module_init(nand_base_init);
+module_exit(nand_base_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 8ef5845..c8f28c7 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -59,17 +59,55 @@
  *
  */
 
-#include <common.h>
-#include <malloc.h>
-#include <linux/compat.h>
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/slab.h>
+#include <linux/types.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/bbm.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <linux/string.h>
+#else
+#include <common.h>
+#include <malloc.h>
+#include <linux/compat.h>
 
-#include <asm/errno.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/bbm.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nand_ecc.h>
+ #include <linux/bitops.h>
+ #include <linux/string.h>
+#endif
+
+#define BBT_BLOCK_GOOD		0x00
+#define BBT_BLOCK_WORN		0x01
+#define BBT_BLOCK_RESERVED	0x02
+#define BBT_BLOCK_FACTORY_BAD	0x03
+
+#define BBT_ENTRY_MASK		0x03
+#define BBT_ENTRY_SHIFT		2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+	uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+	entry >>= (block & BBT_ENTRY_MASK) * 2;
+	return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+		uint8_t mark)
+{
+	uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+	chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
 
 static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
 {
@@ -86,33 +124,17 @@
  * @td: search pattern descriptor
  *
  * Check for a pattern at the given place. Used to search bad block tables and
- * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
- * all bytes except the pattern area contain 0xff.
+ * good / bad block identifiers.
  */
 static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
 {
-	int end = 0;
-	uint8_t *p = buf;
-
 	if (td->options & NAND_BBT_NO_OOB)
 		return check_pattern_no_oob(buf, td);
 
-	end = paglen + td->offs;
-	if (td->options & NAND_BBT_SCANEMPTY)
-		if (memchr_inv(p, 0xff, end))
-			return -1;
-	p += end;
-
 	/* Compare the pattern */
-	if (memcmp(p, td->pattern, td->len))
+	if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
 		return -1;
 
-	if (td->options & NAND_BBT_SCANEMPTY) {
-		p += td->len;
-		end += td->len;
-		if (memchr_inv(p, 0xff, len - end))
-			return -1;
-	}
 	return 0;
 }
 
@@ -159,7 +181,7 @@
  * @page: the starting page
  * @num: the number of bbt descriptors to read
  * @td: the bbt describtion table
- * @offs: offset in the memory table
+ * @offs: block number offset in the table
  *
  * Read the bad block table starting from page.
  */
@@ -209,25 +231,33 @@
 		/* Analyse data */
 		for (i = 0; i < len; i++) {
 			uint8_t dat = buf[i];
-			for (j = 0; j < 8; j += bits, act += 2) {
+			for (j = 0; j < 8; j += bits, act++) {
 				uint8_t tmp = (dat >> j) & msk;
 				if (tmp == msk)
 					continue;
 				if (reserved_block_code && (tmp == reserved_block_code)) {
 					pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
-						 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
-					this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+						 (loff_t)(offs + act) <<
+						 this->bbt_erase_shift);
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_RESERVED);
 					mtd->ecc_stats.bbtblocks++;
 					continue;
 				}
-				pr_info("nand_read_bbt: Bad block at 0x%012llx\n",
-					(loff_t)((offs << 2) + (act >> 1))
-					<< this->bbt_erase_shift);
+				/*
+				 * Leave it for now, if it's matured we can
+				 * move this message to pr_debug.
+				 */
+				pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+					 (loff_t)(offs + act) <<
+					 this->bbt_erase_shift);
 				/* Factory marked bad or worn out? */
 				if (tmp == 0)
-					this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_FACTORY_BAD);
 				else
-					this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_WORN);
 				mtd->ecc_stats.badblocks++;
 			}
 		}
@@ -262,7 +292,7 @@
 					td, offs);
 			if (res)
 				return res;
-			offs += this->chipsize >> (this->bbt_erase_shift + 2);
+			offs += this->chipsize >> this->bbt_erase_shift;
 		}
 	} else {
 		res = read_bbt(mtd, buf, td->pages[0],
@@ -396,25 +426,6 @@
 	}
 }
 
-/* Scan a given block full */
-static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
-			   loff_t offs, uint8_t *buf, size_t readlen,
-			   int scanlen, int numpages)
-{
-	int ret, j;
-
-	ret = scan_read_oob(mtd, buf, offs, readlen);
-	/* Ignore ECC errors when checking for BBM */
-	if (ret && !mtd_is_bitflip_or_eccerr(ret))
-		return ret;
-
-	for (j = 0; j < numpages; j++, buf += scanlen) {
-		if (check_pattern(buf, scanlen, mtd->writesize, bd))
-			return 1;
-	}
-	return 0;
-}
-
 /* Scan a given block partially */
 static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
 			   loff_t offs, uint8_t *buf, int numpages)
@@ -461,36 +472,19 @@
 	struct nand_bbt_descr *bd, int chip)
 {
 	struct nand_chip *this = mtd->priv;
-	int i, numblocks, numpages, scanlen;
+	int i, numblocks, numpages;
 	int startblock;
 	loff_t from;
-	size_t readlen;
 
 	pr_info("Scanning device for bad blocks\n");
 
-	if (bd->options & NAND_BBT_SCANALLPAGES)
-		numpages = 1 << (this->bbt_erase_shift - this->page_shift);
-	else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+	if (bd->options & NAND_BBT_SCAN2NDPAGE)
 		numpages = 2;
 	else
 		numpages = 1;
 
-	if (!(bd->options & NAND_BBT_SCANEMPTY)) {
-		/* We need only read few bytes from the OOB area */
-		scanlen = 0;
-		readlen = bd->len;
-	} else {
-		/* Full page content should be read */
-		scanlen = mtd->writesize + mtd->oobsize;
-		readlen = numpages * mtd->writesize;
-	}
-
 	if (chip == -1) {
-		/*
-		 * Note that numblocks is 2 * (real numblocks) here, see i+=2
-		 * below as it makes shifting and masking less painful
-		 */
-		numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+		numblocks = mtd->size >> this->bbt_erase_shift;
 		startblock = 0;
 		from = 0;
 	} else {
@@ -499,37 +493,31 @@
 			       chip + 1, this->numchips);
 			return -EINVAL;
 		}
-		numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+		numblocks = this->chipsize >> this->bbt_erase_shift;
 		startblock = chip * numblocks;
 		numblocks += startblock;
-		from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+		from = (loff_t)startblock << this->bbt_erase_shift;
 	}
 
 	if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
 		from += mtd->erasesize - (mtd->writesize * numpages);
 
-	for (i = startblock; i < numblocks;) {
+	for (i = startblock; i < numblocks; i++) {
 		int ret;
 
 		BUG_ON(bd->options & NAND_BBT_NO_OOB);
 
-		if (bd->options & NAND_BBT_SCANALLPAGES)
-			ret = scan_block_full(mtd, bd, from, buf, readlen,
-					      scanlen, numpages);
-		else
-			ret = scan_block_fast(mtd, bd, from, buf, numpages);
-
+		ret = scan_block_fast(mtd, bd, from, buf, numpages);
 		if (ret < 0)
 			return ret;
 
 		if (ret) {
-			this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+			bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
 			pr_warn("Bad eraseblock %d at 0x%012llx\n",
-				  i >> 1, (unsigned long long)from);
+				i, (unsigned long long)from);
 			mtd->ecc_stats.badblocks++;
 		}
 
-		i += 2;
 		from += (1 << this->bbt_erase_shift);
 	}
 	return 0;
@@ -554,7 +542,11 @@
 {
 	struct nand_chip *this = mtd->priv;
 	int i, chips;
+#ifndef __UBOOT__
+	int bits, startblock, block, dir;
+#else
 	int startblock, block, dir;
+#endif
 	int scanlen = mtd->writesize + mtd->oobsize;
 	int bbtblocks;
 	int blocktopage = this->bbt_erase_shift - this->page_shift;
@@ -578,6 +570,11 @@
 		bbtblocks = mtd->size >> this->bbt_erase_shift;
 	}
 
+#ifndef __UBOOT__
+	/* Number of bits for each erase block in the bbt */
+	bits = td->options & NAND_BBT_NRBITS_MSK;
+#endif
+
 	for (i = 0; i < chips; i++) {
 		/* Reset version information */
 		td->version[i] = 0;
@@ -606,8 +603,8 @@
 		if (td->pages[i] == -1)
 			pr_warn("Bad block table not found for chip %d\n", i);
 		else
-			pr_info("Bad block table found at page %d, version 0x%02X\n", td->pages[i],
-				td->version[i]);
+			pr_info("Bad block table found at page %d, version "
+				 "0x%02X\n", td->pages[i], td->version[i]);
 	}
 	return 0;
 }
@@ -649,9 +646,9 @@
 {
 	struct nand_chip *this = mtd->priv;
 	struct erase_info einfo;
-	int i, j, res, chip = 0;
+	int i, res, chip = 0;
 	int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
-	int nrchips, bbtoffs, pageoffs, ooboffs;
+	int nrchips, pageoffs, ooboffs;
 	uint8_t msk[4];
 	uint8_t rcode = td->reserved_block_code;
 	size_t retlen, len = 0;
@@ -707,10 +704,9 @@
 		for (i = 0; i < td->maxblocks; i++) {
 			int block = startblock + dir * i;
 			/* Check, if the block is bad */
-			switch ((this->bbt[block >> 2] >>
-				 (2 * (block & 0x03))) & 0x03) {
-			case 0x01:
-			case 0x03:
+			switch (bbt_get_entry(this, block)) {
+			case BBT_BLOCK_WORN:
+			case BBT_BLOCK_FACTORY_BAD:
 				continue;
 			}
 			page = block <<
@@ -742,8 +738,6 @@
 		default: return -EINVAL;
 		}
 
-		bbtoffs = chip * (numblocks >> 2);
-
 		to = ((loff_t)page) << this->page_shift;
 
 		/* Must we save the block contents? */
@@ -808,16 +802,12 @@
 			buf[ooboffs + td->veroffs] = td->version[chip];
 
 		/* Walk through the memory table */
-		for (i = 0; i < numblocks;) {
+		for (i = 0; i < numblocks; i++) {
 			uint8_t dat;
-			dat = this->bbt[bbtoffs + (i >> 2)];
-			for (j = 0; j < 4; j++, i++) {
-				int sftcnt = (i << (3 - sft)) & sftmsk;
-				/* Do not store the reserved bbt blocks! */
-				buf[offs + (i >> sft)] &=
-					~(msk[dat & 0x03] << sftcnt);
-				dat >>= 2;
-			}
+			int sftcnt = (i << (3 - sft)) & sftmsk;
+			dat = bbt_get_entry(this, chip * numblocks + i);
+			/* Do not store the reserved bbt blocks! */
+			buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
 		}
 
 		memset(&einfo, 0, sizeof(einfo));
@@ -859,7 +849,6 @@
 {
 	struct nand_chip *this = mtd->priv;
 
-	bd->options &= ~NAND_BBT_SCANEMPTY;
 	return create_bbt(mtd, this->buffers->databuf, bd, -1);
 }
 
@@ -1003,7 +992,7 @@
 {
 	struct nand_chip *this = mtd->priv;
 	int i, j, chips, block, nrblocks, update;
-	uint8_t oldval, newval;
+	uint8_t oldval;
 
 	/* Do we have a bbt per chip? */
 	if (td->options & NAND_BBT_PERCHIP) {
@@ -1020,12 +1009,12 @@
 			if (td->pages[i] == -1)
 				continue;
 			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
-			block <<= 1;
-			oldval = this->bbt[(block >> 3)];
-			newval = oldval | (0x2 << (block & 0x06));
-			this->bbt[(block >> 3)] = newval;
-			if ((oldval != newval) && td->reserved_block_code)
-				nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+			oldval = bbt_get_entry(this, block);
+			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+			if ((oldval != BBT_BLOCK_RESERVED) &&
+					td->reserved_block_code)
+				nand_update_bbt(mtd, (loff_t)block <<
+						this->bbt_erase_shift);
 			continue;
 		}
 		update = 0;
@@ -1033,14 +1022,12 @@
 			block = ((i + 1) * nrblocks) - td->maxblocks;
 		else
 			block = i * nrblocks;
-		block <<= 1;
 		for (j = 0; j < td->maxblocks; j++) {
-			oldval = this->bbt[(block >> 3)];
-			newval = oldval | (0x2 << (block & 0x06));
-			this->bbt[(block >> 3)] = newval;
-			if (oldval != newval)
+			oldval = bbt_get_entry(this, block);
+			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+			if (oldval != BBT_BLOCK_RESERVED)
 				update = 1;
-			block += 2;
+			block++;
 		}
 		/*
 		 * If we want reserved blocks to be recorded to flash, and some
@@ -1048,7 +1035,8 @@
 		 * bbts.  This should only happen once.
 		 */
 		if (update && td->reserved_block_code)
-			nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+			nand_update_bbt(mtd, (loff_t)(block - 1) <<
+					this->bbt_erase_shift);
 	}
 }
 
@@ -1174,13 +1162,13 @@
 }
 
 /**
- * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * nand_update_bbt - update bad block table(s)
  * @mtd: MTD device structure
  * @offs: the offset of the newly marked block
  *
  * The function updates the bad block table(s).
  */
-int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
 {
 	struct nand_chip *this = mtd->priv;
 	int len, res = 0;
@@ -1234,15 +1222,6 @@
  */
 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
 
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
-	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
-	.offs = 0x20,
-	.len = 6,
-	.pattern = scan_agand_pattern
-};
-
 /* Generic flash bbt descriptors */
 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1327,22 +1306,6 @@
 {
 	struct nand_chip *this = mtd->priv;
 
-	/*
-	 * Default for AG-AND. We must use a flash based bad block table as the
-	 * devices have factory marked _good_ blocks. Erasing those blocks
-	 * leads to loss of the good / bad information, so we _must_ store this
-	 * information in a good / bad table during startup.
-	 */
-	if (this->options & NAND_IS_AND) {
-		/* Use the default pattern descriptors */
-		if (!this->bbt_td) {
-			this->bbt_td = &bbt_main_descr;
-			this->bbt_md = &bbt_mirror_descr;
-		}
-		this->bbt_options |= NAND_BBT_USE_FLASH;
-		return nand_scan_bbt(mtd, &agand_flashbased);
-	}
-
 	/* Is a flash based bad block table requested? */
 	if (this->bbt_options & NAND_BBT_USE_FLASH) {
 		/* Use the default pattern descriptors */
@@ -1375,23 +1338,46 @@
 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
 {
 	struct nand_chip *this = mtd->priv;
-	int block;
-	uint8_t res;
+	int block, res;
 
-	/* Get block number * 2 */
-	block = (int)(offs >> (this->bbt_erase_shift - 1));
-	res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+	block = (int)(offs >> this->bbt_erase_shift);
+	res = bbt_get_entry(this, block);
 
-	MTDDEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
-	      (unsigned int)offs, block >> 1, res);
+	pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+			"(block %d) 0x%02x\n",
+			(unsigned int)offs, block, res);
 
-	switch ((int)res) {
-	case 0x00:
+	switch (res) {
+	case BBT_BLOCK_GOOD:
 		return 0;
-	case 0x01:
+	case BBT_BLOCK_WORN:
 		return 1;
-	case 0x02:
+	case BBT_BLOCK_RESERVED:
 		return allowbbt ? 0 : 1;
 	}
 	return 1;
 }
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+	struct nand_chip *this = mtd->priv;
+	int block, ret = 0;
+
+	block = (int)(offs >> this->bbt_erase_shift);
+
+	/* Mark bad block in memory */
+	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+	/* Update flash-based bad block table */
+	if (this->bbt_options & NAND_BBT_USE_FLASH)
+		ret = nand_update_bbt(mtd, offs);
+
+	return ret;
+}
+
+EXPORT_SYMBOL(nand_scan_bbt);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index f3f0cb6..2da8d08 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -8,165 +8,172 @@
  * published by the Free Software Foundation.
  *
  */
-
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#else
 #include <common.h>
 #include <linux/mtd/nand.h>
-/*
-*	Chip ID list
-*
-*	Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-*	options
-*
-*	Pagesize; 0, 256, 512
-*	0	get this information from the extended chip ID
-+	256	256 Byte page size
-*	512	512 Byte page size
-*/
-const struct nand_flash_dev nand_flash_ids[] = {
-
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
-	{"NAND 1MiB 5V 8-bit",		0x6e, 256, 1, 0x1000, 0},
-	{"NAND 2MiB 5V 8-bit",		0x64, 256, 2, 0x1000, 0},
-	{"NAND 4MiB 5V 8-bit",		0x6b, 512, 4, 0x2000, 0},
-	{"NAND 1MiB 3,3V 8-bit",	0xe8, 256, 1, 0x1000, 0},
-	{"NAND 1MiB 3,3V 8-bit",	0xec, 256, 1, 0x1000, 0},
-	{"NAND 2MiB 3,3V 8-bit",	0xea, 256, 2, 0x1000, 0},
-	{"NAND 4MiB 3,3V 8-bit", 	0xd5, 512, 4, 0x2000, 0},
-	{"NAND 4MiB 3,3V 8-bit",	0xe3, 512, 4, 0x2000, 0},
-	{"NAND 4MiB 3,3V 8-bit",	0xe5, 512, 4, 0x2000, 0},
-	{"NAND 8MiB 3,3V 8-bit",	0xd6, 512, 8, 0x2000, 0},
-
-	{"NAND 8MiB 1,8V 8-bit",	0x39, 512, 8, 0x2000, 0},
-	{"NAND 8MiB 3,3V 8-bit",	0xe6, 512, 8, 0x2000, 0},
-	{"NAND 8MiB 1,8V 16-bit",	0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
-	{"NAND 8MiB 3,3V 16-bit",	0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
 #endif
+#include <linux/sizes.h>
 
-	{"NAND 16MiB 1,8V 8-bit",	0x33, 512, 16, 0x4000, 0},
-	{"NAND 16MiB 3,3V 8-bit",	0x73, 512, 16, 0x4000, 0},
-	{"NAND 16MiB 1,8V 16-bit",	0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 16MiB 3,3V 16-bit",	0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-
-	{"NAND 32MiB 1,8V 8-bit",	0x35, 512, 32, 0x4000, 0},
-	{"NAND 32MiB 3,3V 8-bit",	0x75, 512, 32, 0x4000, 0},
-	{"NAND 32MiB 1,8V 16-bit",	0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 32MiB 3,3V 16-bit",	0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-
-	{"NAND 64MiB 1,8V 8-bit",	0x36, 512, 64, 0x4000, 0},
-	{"NAND 64MiB 3,3V 8-bit",	0x76, 512, 64, 0x4000, 0},
-	{"NAND 64MiB 1,8V 16-bit",	0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 64MiB 3,3V 16-bit",	0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-
-	{"NAND 128MiB 1,8V 8-bit",	0x78, 512, 128, 0x4000, 0},
-	{"NAND 128MiB 1,8V 8-bit",	0x39, 512, 128, 0x4000, 0},
-	{"NAND 128MiB 3,3V 8-bit",	0x79, 512, 128, 0x4000, 0},
-	{"NAND 128MiB 1,8V 16-bit",	0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 128MiB 1,8V 16-bit",	0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 128MiB 3,3V 16-bit",	0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-	{"NAND 128MiB 3,3V 16-bit",	0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-
-	{"NAND 256MiB 3,3V 8-bit",	0x71, 512, 256, 0x4000, 0},
-
-	/*
-	 * These are the new chips with large page size. The pagesize and the
-	 * erasesize is determined from the extended id bytes
-	 */
 #define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
-	/* 512 Megabit */
-	{"NAND 64MiB 1,8V 8-bit",	0xA2, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 1,8V 8-bit",	0xA0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xF2, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xD0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xF0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 1,8V 16-bit",	0xB2, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 1,8V 16-bit",	0xB0, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 3,3V 16-bit",	0xC2, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 3,3V 16-bit",	0xC0, 0,  64, 0, LP_OPTIONS16},
-
-	/* 1 Gigabit */
-	{"NAND 128MiB 1,8V 8-bit",	0xA1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 3,3V 8-bit",	0xF1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 3,3V 8-bit",	0xD1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 1,8V 16-bit",	0xB1, 0, 128, 0, LP_OPTIONS16},
-	{"NAND 128MiB 3,3V 16-bit",	0xC1, 0, 128, 0, LP_OPTIONS16},
-	{"NAND 128MiB 1,8V 16-bit",     0xAD, 0, 128, 0, LP_OPTIONS16},
-
-	/* 2 Gigabit */
-	{"NAND 256MiB 1,8V 8-bit",	0xAA, 0, 256, 0, LP_OPTIONS},
-	{"NAND 256MiB 3,3V 8-bit",	0xDA, 0, 256, 0, LP_OPTIONS},
-	{"NAND 256MiB 1,8V 16-bit",	0xBA, 0, 256, 0, LP_OPTIONS16},
-	{"NAND 256MiB 3,3V 16-bit",	0xCA, 0, 256, 0, LP_OPTIONS16},
-
-	/* 4 Gigabit */
-	{"NAND 512MiB 1,8V 8-bit",	0xAC, 0, 512, 0, LP_OPTIONS},
-	{"NAND 512MiB 3,3V 8-bit",	0xDC, 0, 512, 0, LP_OPTIONS},
-	{"NAND 512MiB 1,8V 16-bit",	0xBC, 0, 512, 0, LP_OPTIONS16},
-	{"NAND 512MiB 3,3V 16-bit",	0xCC, 0, 512, 0, LP_OPTIONS16},
-
-	/* 8 Gigabit */
-	{"NAND 1GiB 1,8V 8-bit",	0xA3, 0, 1024, 0, LP_OPTIONS},
-	{"NAND 1GiB 3,3V 8-bit",	0xD3, 0, 1024, 0, LP_OPTIONS},
-	{"NAND 1GiB 1,8V 16-bit",	0xB3, 0, 1024, 0, LP_OPTIONS16},
-	{"NAND 1GiB 3,3V 16-bit",	0xC3, 0, 1024, 0, LP_OPTIONS16},
-
-	/* 16 Gigabit */
-	{"NAND 2GiB 1,8V 8-bit",	0xA5, 0, 2048, 0, LP_OPTIONS},
-	{"NAND 2GiB 3,3V 8-bit",	0xD5, 0, 2048, 0, LP_OPTIONS},
-	{"NAND 2GiB 1,8V 16-bit",	0xB5, 0, 2048, 0, LP_OPTIONS16},
-	{"NAND 2GiB 3,3V 16-bit",	0xC5, 0, 2048, 0, LP_OPTIONS16},
-
-	/* 32 Gigabit */
-	{"NAND 4GiB 1,8V 8-bit",	0xA7, 0, 4096, 0, LP_OPTIONS},
-	{"NAND 4GiB 3,3V 8-bit",	0xD7, 0, 4096, 0, LP_OPTIONS},
-	{"NAND 4GiB 1,8V 16-bit",	0xB7, 0, 4096, 0, LP_OPTIONS16},
-	{"NAND 4GiB 3,3V 16-bit",	0xC7, 0, 4096, 0, LP_OPTIONS16},
-
-	/* 64 Gigabit */
-	{"NAND 8GiB 1,8V 8-bit",	0xAE, 0, 8192, 0, LP_OPTIONS},
-	{"NAND 8GiB 3,3V 8-bit",	0xDE, 0, 8192, 0, LP_OPTIONS},
-	{"NAND 8GiB 1,8V 16-bit",	0xBE, 0, 8192, 0, LP_OPTIONS16},
-	{"NAND 8GiB 3,3V 16-bit",	0xCE, 0, 8192, 0, LP_OPTIONS16},
-
-	/* 128 Gigabit */
-	{"NAND 16GiB 1,8V 8-bit",	0x1A, 0, 16384, 0, LP_OPTIONS},
-	{"NAND 16GiB 3,3V 8-bit",	0x3A, 0, 16384, 0, LP_OPTIONS},
-	{"NAND 16GiB 1,8V 16-bit",	0x2A, 0, 16384, 0, LP_OPTIONS16},
-	{"NAND 16GiB 3,3V 16-bit",	0x4A, 0, 16384, 0, LP_OPTIONS16},
-
-	/* 256 Gigabit */
-	{"NAND 32GiB 1,8V 8-bit",	0x1C, 0, 32768, 0, LP_OPTIONS},
-	{"NAND 32GiB 3,3V 8-bit",	0x3C, 0, 32768, 0, LP_OPTIONS},
-	{"NAND 32GiB 1,8V 16-bit",	0x2C, 0, 32768, 0, LP_OPTIONS16},
-	{"NAND 32GiB 3,3V 16-bit",	0x4C, 0, 32768, 0, LP_OPTIONS16},
-
-	/* 512 Gigabit */
-	{"NAND 64GiB 1,8V 8-bit",	0x1E, 0, 65536, 0, LP_OPTIONS},
-	{"NAND 64GiB 3,3V 8-bit",	0x3E, 0, 65536, 0, LP_OPTIONS},
-	{"NAND 64GiB 1,8V 16-bit",	0x2E, 0, 65536, 0, LP_OPTIONS16},
-	{"NAND 64GiB 3,3V 16-bit",	0x4E, 0, 65536, 0, LP_OPTIONS16},
-
-	/*
-	 * Renesas AND 1 Gigabit. Those chips do not support extended id and
-	 * have a strange page/block layout !  The chosen minimum erasesize is
-	 * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
-	 * planes 1 block = 2 pages, but due to plane arrangement the blocks
-	 * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
-	 * increase the eraseblock size so we chose a combined one which can be
-	 * erased in one go There are more speed improvements for reads and
-	 * writes possible, but not implemented now
-	 */
-	{"AND 128MiB 3,3V 8-bit",	0x01, 2048, 128, 0x4000,
-	 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
-
-	{NULL,}
-};
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
 
 /*
-*	Manufacturer ID list
-*/
-const struct nand_manufacturers nand_manuf_ids[] = {
+ * The chip ID list:
+ *    name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
+	LEGACY_ID_NAND("NAND 1MiB 5V 8-bit",	0x6e, 1, SZ_4K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 2MiB 5V 8-bit",	0x64, 2, SZ_4K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 1MiB 3,3V 8-bit",	0xe8, 1, SZ_4K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 1MiB 3,3V 8-bit",	0xec, 1, SZ_4K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 2MiB 3,3V 8-bit",	0xea, 2, SZ_4K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 	0xd5, 4, SZ_8K, SP_OPTIONS),
+
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit",	0xe6, 8, SZ_8K, SP_OPTIONS),
+#endif
+	/*
+	 * Some incompatible NAND chips share device ID's and so must be
+	 * listed by full ID. We list them first so that we can easily identify
+	 * the most specific match.
+	 */
+	{"TC58NVG2S0F 4G 3.3V 8-bit",
+		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+	{"TC58NVG3S0F 8G 3.3V 8-bit",
+		{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+		  SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+	{"TC58NVG5D2 32G 3.3V 8-bit",
+		{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+		  SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+	{"TC58NVG6D2 64G 3.3V 8-bit",
+		{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+		  SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+
+	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit",  0x33, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit",  0x73, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit",  0x35, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit",  0x75, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit",  0x36, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit",  0x76, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x78, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x39, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit",  0x79, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+	/*
+	 * These are the new chips with large page size. Their page size and
+	 * eraseblock size are determined from the extended ID bytes.
+	 */
+
+	/* 512 Megabit */
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xD0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0,  64, LP_OPTIONS16),
+
+	/* 1 Gigabit */
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit",  0xA1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xF1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xD1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+	/* 2 Gigabit */
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit",  0xAA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit",  0xDA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+	/* 4 Gigabit */
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit",  0xAC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit",  0xDC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+	/* 8 Gigabit */
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit",  0xA3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit",  0xD3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+	/* 16 Gigabit */
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit",  0xA5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit",  0xD5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+	/* 32 Gigabit */
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit",  0xA7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit",  0xD7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+	/* 64 Gigabit */
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit",  0xAE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit",  0xDE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+	/* 128 Gigabit */
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit",  0x1A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit",  0x3A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+	/* 256 Gigabit */
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit",  0x1C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit",  0x3C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+	/* 512 Gigabit */
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit",  0x1E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit",  0x3E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+	{NULL}
+};
+
+/* Manufacturer IDs */
+struct nand_manufacturers nand_manuf_ids[] = {
 	{NAND_MFR_TOSHIBA, "Toshiba"},
 	{NAND_MFR_SAMSUNG, "Samsung"},
 	{NAND_MFR_FUJITSU, "Fujitsu"},
@@ -178,5 +185,14 @@
 	{NAND_MFR_AMD, "AMD/Spansion"},
 	{NAND_MFR_MACRONIX, "Macronix"},
 	{NAND_MFR_EON, "Eon"},
+	{NAND_MFR_SANDISK, "SanDisk"},
+	{NAND_MFR_INTEL, "Intel"},
 	{0x0, "Unknown"}
 };
+
+EXPORT_SYMBOL(nand_manuf_ids);
+EXPORT_SYMBOL(nand_flash_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nand_util.c b/drivers/mtd/nand/nand_util.c
index b292826..024f6fb 100644
--- a/drivers/mtd/nand/nand_util.c
+++ b/drivers/mtd/nand/nand_util.c
@@ -187,6 +187,9 @@
 
 #ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
 
+#define NAND_CMD_LOCK_TIGHT     0x2c
+#define NAND_CMD_LOCK_STATUS    0x7a
+ 
 /******************************************************************************
  * Support for locking / unlocking operations of some NAND devices
  *****************************************************************************/
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 5510b13..2659595 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -118,6 +118,7 @@
 		out_be32((u32 *)(base + NDFC_DATA), *p++);
 }
 
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 static int ndfc_verify_buf(struct mtd_info *mtdinfo, const uint8_t *buf, int len)
 {
 	struct nand_chip *this = mtdinfo->priv;
@@ -130,6 +131,7 @@
 
 	return 0;
 }
+#endif
 
 /*
  * Read a byte from the NDFC.
@@ -205,7 +207,9 @@
 #endif
 
 	nand->write_buf  = ndfc_write_buf;
+#if defined(CONFIG_MTD_NAND_VERIFY_WRITE)
 	nand->verify_buf = ndfc_verify_buf;
+#endif
 	nand->read_byte = ndfc_read_byte;
 
 	chip++;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index e33e8d3..03deabc 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -22,6 +22,7 @@
 #include <common.h>
 #include <linux/compat.h>
 #include <linux/mtd/mtd.h>
+#include "linux/mtd/flashchip.h"
 #include <linux/mtd/onenand.h>
 
 #include <asm/io.h>
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 0267c2c..52509f1 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -140,7 +140,6 @@
 {
 	unsigned char data_buf[MAX_ONENAND_PAGESIZE];
 
-	bd->options &= ~NAND_BBT_SCANEMPTY;
 	return create_bbt(mtd, data_buf, bd, -1);
 }
 
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index df04c2b..5e56a29 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -15,20 +15,12 @@
 #include <linux/compat.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/onenand.h>
+#include <linux/mtd/flashchip.h>
 #include <linux/mtd/samsung_onenand.h>
 
 #include <asm/io.h>
 #include <asm/errno.h>
 
-#ifdef ONENAND_DEBUG
-#define DPRINTK(format, args...)					\
-do {									\
-	printf("%s[%d]: " format "\n", __func__, __LINE__, ##args);	\
-} while (0)
-#else
-#define DPRINTK(...)			do { } while (0)
-#endif
-
 #define ONENAND_ERASE_STATUS		0x00
 #define ONENAND_MULTI_ERASE_SET		0x01
 #define ONENAND_ERASE_START		0x03
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 56c2823..4807f94 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -5,6 +5,7 @@
 # SPDX-License-Identifier:	GPL-2.0+
 #
 
-obj-y += build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o scan.o crc32.o
+obj-y += attach.o build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o crc32.o
+obj-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
 obj-y += misc.o
 obj-y += debug.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
new file mode 100644
index 0000000..9fce02e
--- /dev/null
+++ b/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1754 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some  point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area.  Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ *   o If the data area contains only 0xFFs, there are no data, and it is safe
+ *     to just erase this PEB - this is corruption type 1.
+ *   o If the data area has bit-flips or data integrity errors (ECC errors on
+ *     NAND), it is probably a PEB which was being erased when power cut
+ *     happened, so this is corruption type 1. However, this is just a guess,
+ *     which might be wrong.
+ *   o Otherwise this is corruption type 2.
+ */
+
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#else
+#include <div64.h>
+#include <linux/err.h>
+#endif
+
+#include <linux/math64.h>
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+		       int lnum, int ec, int to_head, struct list_head *list)
+{
+	struct ubi_ainf_peb *aeb;
+
+	if (list == &ai->free) {
+		dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+	} else if (list == &ai->erase) {
+		dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+	} else if (list == &ai->alien) {
+		dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+		ai->alien_peb_count += 1;
+	} else
+		BUG();
+
+	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return -ENOMEM;
+
+	aeb->pnum = pnum;
+	aeb->vol_id = vol_id;
+	aeb->lnum = lnum;
+	aeb->ec = ec;
+	if (to_head)
+		list_add(&aeb->u.list, list);
+	else
+		list_add_tail(&aeb->u.list, list);
+	return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list.  The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+	struct ubi_ainf_peb *aeb;
+
+	dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return -ENOMEM;
+
+	ai->corr_peb_count += 1;
+	aeb->pnum = pnum;
+	aeb->ec = ec;
+	list_add(&aeb->u.list, &ai->corr);
+	return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+			    const struct ubi_ainf_volume *av, int pnum)
+{
+	int vol_type = vid_hdr->vol_type;
+	int vol_id = be32_to_cpu(vid_hdr->vol_id);
+	int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+	int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+	if (av->leb_count != 0) {
+		int av_vol_type;
+
+		/*
+		 * This is not the first logical eraseblock belonging to this
+		 * volume. Ensure that the data in its VID header is consistent
+		 * to the data in previous logical eraseblock headers.
+		 */
+
+		if (vol_id != av->vol_id) {
+			ubi_err("inconsistent vol_id");
+			goto bad;
+		}
+
+		if (av->vol_type == UBI_STATIC_VOLUME)
+			av_vol_type = UBI_VID_STATIC;
+		else
+			av_vol_type = UBI_VID_DYNAMIC;
+
+		if (vol_type != av_vol_type) {
+			ubi_err("inconsistent vol_type");
+			goto bad;
+		}
+
+		if (used_ebs != av->used_ebs) {
+			ubi_err("inconsistent used_ebs");
+			goto bad;
+		}
+
+		if (data_pad != av->data_pad) {
+			ubi_err("inconsistent data_pad");
+			goto bad;
+		}
+	}
+
+	return 0;
+
+bad:
+	ubi_err("inconsistent VID header at PEB %d", pnum);
+	ubi_dump_vid_hdr(vid_hdr);
+	ubi_dump_av(av);
+	return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+					  int vol_id, int pnum,
+					  const struct ubi_vid_hdr *vid_hdr)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+	/* Walk the volume RB-tree to look if this volume is already present */
+	while (*p) {
+		parent = *p;
+		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (vol_id == av->vol_id)
+			return av;
+
+		if (vol_id > av->vol_id)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	/* The volume is absent - add it */
+	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+	if (!av)
+		return ERR_PTR(-ENOMEM);
+
+	av->highest_lnum = av->leb_count = 0;
+	av->vol_id = vol_id;
+	av->root = RB_ROOT;
+	av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+	av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+	av->compat = vid_hdr->compat;
+	av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+							    : UBI_STATIC_VOLUME;
+	if (vol_id > ai->highest_vol_id)
+		ai->highest_vol_id = vol_id;
+
+	rb_link_node(&av->rb, parent, p);
+	rb_insert_color(&av->rb, &ai->volumes);
+	ai->vols_found += 1;
+	dbg_bld("added volume %d", vol_id);
+	return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ *     o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ *       second PEB (described by @pnum and @vid_hdr);
+ *     o bit 0 is set: the second PEB is newer;
+ *     o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ *     o bit 1 is set: bit-flips were detected in the newer LEB;
+ *     o bit 2 is cleared: the older LEB is not corrupted;
+ *     o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+			int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+	int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+	uint32_t data_crc, crc;
+	struct ubi_vid_hdr *vh = NULL;
+	unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+	if (sqnum2 == aeb->sqnum) {
+		/*
+		 * This must be a really ancient UBI image which has been
+		 * created before sequence numbers support has been added. At
+		 * that times we used 32-bit LEB versions stored in logical
+		 * eraseblocks. That was before UBI got into mainline. We do not
+		 * support these images anymore. Well, those images still work,
+		 * but only if no unclean reboots happened.
+		 */
+		ubi_err("unsupported on-flash UBI format");
+		return -EINVAL;
+	}
+
+	/* Obviously the LEB with lower sequence counter is older */
+	second_is_newer = (sqnum2 > aeb->sqnum);
+
+	/*
+	 * Now we know which copy is newer. If the copy flag of the PEB with
+	 * newer version is not set, then we just return, otherwise we have to
+	 * check data CRC. For the second PEB we already have the VID header,
+	 * for the first one - we'll need to re-read it from flash.
+	 *
+	 * Note: this may be optimized so that we wouldn't read twice.
+	 */
+
+	if (second_is_newer) {
+		if (!vid_hdr->copy_flag) {
+			/* It is not a copy, so it is newer */
+			dbg_bld("second PEB %d is newer, copy_flag is unset",
+				pnum);
+			return 1;
+		}
+	} else {
+		if (!aeb->copy_flag) {
+			/* It is not a copy, so it is newer */
+			dbg_bld("first PEB %d is newer, copy_flag is unset",
+				pnum);
+			return bitflips << 1;
+		}
+
+		vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+		if (!vh)
+			return -ENOMEM;
+
+		pnum = aeb->pnum;
+		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		if (err) {
+			if (err == UBI_IO_BITFLIPS)
+				bitflips = 1;
+			else {
+				ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+					pnum, err);
+				if (err > 0)
+					err = -EIO;
+
+				goto out_free_vidh;
+			}
+		}
+
+		vid_hdr = vh;
+	}
+
+	/* Read the data of the copy and check the CRC */
+
+	len = be32_to_cpu(vid_hdr->data_size);
+
+	mutex_lock(&ubi->buf_mutex);
+	err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+		goto out_unlock;
+
+	data_crc = be32_to_cpu(vid_hdr->data_crc);
+	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+	if (crc != data_crc) {
+		dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+			pnum, crc, data_crc);
+		corrupted = 1;
+		bitflips = 0;
+		second_is_newer = !second_is_newer;
+	} else {
+		dbg_bld("PEB %d CRC is OK", pnum);
+		bitflips = !!err;
+	}
+	mutex_unlock(&ubi->buf_mutex);
+
+	ubi_free_vid_hdr(ubi, vh);
+
+	if (second_is_newer)
+		dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+	else
+		dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+	return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+	mutex_unlock(&ubi->buf_mutex);
+out_free_vidh:
+	ubi_free_vid_hdr(ubi, vh);
+	return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+		  int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+	int err, vol_id, lnum;
+	unsigned long long sqnum;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb;
+	struct rb_node **p, *parent = NULL;
+
+	vol_id = be32_to_cpu(vid_hdr->vol_id);
+	lnum = be32_to_cpu(vid_hdr->lnum);
+	sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+	dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+		pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+	av = add_volume(ai, vol_id, pnum, vid_hdr);
+	if (IS_ERR(av))
+		return PTR_ERR(av);
+
+	if (ai->max_sqnum < sqnum)
+		ai->max_sqnum = sqnum;
+
+	/*
+	 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+	 * if this is the first instance of this logical eraseblock or not.
+	 */
+	p = &av->root.rb_node;
+	while (*p) {
+		int cmp_res;
+
+		parent = *p;
+		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+		if (lnum != aeb->lnum) {
+			if (lnum < aeb->lnum)
+				p = &(*p)->rb_left;
+			else
+				p = &(*p)->rb_right;
+			continue;
+		}
+
+		/*
+		 * There is already a physical eraseblock describing the same
+		 * logical eraseblock present.
+		 */
+
+		dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+			aeb->pnum, aeb->sqnum, aeb->ec);
+
+		/*
+		 * Make sure that the logical eraseblocks have different
+		 * sequence numbers. Otherwise the image is bad.
+		 *
+		 * However, if the sequence number is zero, we assume it must
+		 * be an ancient UBI image from the era when UBI did not have
+		 * sequence numbers. We still can attach these images, unless
+		 * there is a need to distinguish between old and new
+		 * eraseblocks, in which case we'll refuse the image in
+		 * 'ubi_compare_lebs()'. In other words, we attach old clean
+		 * images, but refuse attaching old images with duplicated
+		 * logical eraseblocks because there was an unclean reboot.
+		 */
+		if (aeb->sqnum == sqnum && sqnum != 0) {
+			ubi_err("two LEBs with same sequence number %llu",
+				sqnum);
+			ubi_dump_aeb(aeb, 0);
+			ubi_dump_vid_hdr(vid_hdr);
+			return -EINVAL;
+		}
+
+		/*
+		 * Now we have to drop the older one and preserve the newer
+		 * one.
+		 */
+		cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+		if (cmp_res < 0)
+			return cmp_res;
+
+		if (cmp_res & 1) {
+			/*
+			 * This logical eraseblock is newer than the one
+			 * found earlier.
+			 */
+			err = validate_vid_hdr(vid_hdr, av, pnum);
+			if (err)
+				return err;
+
+			err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+					  aeb->lnum, aeb->ec, cmp_res & 4,
+					  &ai->erase);
+			if (err)
+				return err;
+
+			aeb->ec = ec;
+			aeb->pnum = pnum;
+			aeb->vol_id = vol_id;
+			aeb->lnum = lnum;
+			aeb->scrub = ((cmp_res & 2) || bitflips);
+			aeb->copy_flag = vid_hdr->copy_flag;
+			aeb->sqnum = sqnum;
+
+			if (av->highest_lnum == lnum)
+				av->last_data_size =
+					be32_to_cpu(vid_hdr->data_size);
+
+			return 0;
+		} else {
+			/*
+			 * This logical eraseblock is older than the one found
+			 * previously.
+			 */
+			return add_to_list(ai, pnum, vol_id, lnum, ec,
+					   cmp_res & 4, &ai->erase);
+		}
+	}
+
+	/*
+	 * We've met this logical eraseblock for the first time, add it to the
+	 * attaching information.
+	 */
+
+	err = validate_vid_hdr(vid_hdr, av, pnum);
+	if (err)
+		return err;
+
+	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return -ENOMEM;
+
+	aeb->ec = ec;
+	aeb->pnum = pnum;
+	aeb->vol_id = vol_id;
+	aeb->lnum = lnum;
+	aeb->scrub = bitflips;
+	aeb->copy_flag = vid_hdr->copy_flag;
+	aeb->sqnum = sqnum;
+
+	if (av->highest_lnum <= lnum) {
+		av->highest_lnum = lnum;
+		av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+	}
+
+	av->leb_count += 1;
+	rb_link_node(&aeb->u.rb, parent, p);
+	rb_insert_color(&aeb->u.rb, &av->root);
+	return 0;
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+				    int vol_id)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node *p = ai->volumes.rb_node;
+
+	while (p) {
+		av = rb_entry(p, struct ubi_ainf_volume, rb);
+
+		if (vol_id == av->vol_id)
+			return av;
+
+		if (vol_id > av->vol_id)
+			p = p->rb_left;
+		else
+			p = p->rb_right;
+	}
+
+	return NULL;
+}
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+	struct rb_node *rb;
+	struct ubi_ainf_peb *aeb;
+
+	dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+	while ((rb = rb_first(&av->root))) {
+		aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+		rb_erase(&aeb->u.rb, &av->root);
+		list_add_tail(&aeb->u.list, &ai->erase);
+	}
+
+	rb_erase(&av->rb, &ai->volumes);
+	kfree(av);
+	ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+			   const struct ubi_attach_info *ai, int pnum, int ec)
+{
+	int err;
+	struct ubi_ec_hdr *ec_hdr;
+
+	if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+		/*
+		 * Erase counter overflow. Upgrade UBI and use 64-bit
+		 * erase counters internally.
+		 */
+		ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+		return -EINVAL;
+	}
+
+	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ec_hdr)
+		return -ENOMEM;
+
+	ec_hdr->ec = cpu_to_be64(ec);
+
+	err = ubi_io_sync_erase(ubi, pnum, 0);
+	if (err < 0)
+		goto out_free;
+
+	err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+	kfree(ec_hdr);
+	return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+				       struct ubi_attach_info *ai)
+{
+	int err = 0;
+	struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+	if (!list_empty(&ai->free)) {
+		aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+		list_del(&aeb->u.list);
+		dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+		return aeb;
+	}
+
+	/*
+	 * We try to erase the first physical eraseblock from the erase list
+	 * and pick it if we succeed, or try to erase the next one if not. And
+	 * so forth. We don't want to take care about bad eraseblocks here -
+	 * they'll be handled later.
+	 */
+	list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+		if (aeb->ec == UBI_UNKNOWN)
+			aeb->ec = ai->mean_ec;
+
+		err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+		if (err)
+			continue;
+
+		aeb->ec += 1;
+		list_del(&aeb->u.list);
+		dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+		return aeb;
+	}
+
+	ubi_err("no free eraseblocks");
+	return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+			    int pnum)
+{
+	int err;
+
+	mutex_lock(&ubi->buf_mutex);
+	memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+	err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+			  ubi->leb_size);
+	if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+		/*
+		 * Bit-flips or integrity errors while reading the data area.
+		 * It is difficult to say for sure what type of corruption is
+		 * this, but presumably a power cut happened while this PEB was
+		 * erased, so it became unstable and corrupted, and should be
+		 * erased.
+		 */
+		err = 0;
+		goto out_unlock;
+	}
+
+	if (err)
+		goto out_unlock;
+
+	if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+		goto out_unlock;
+
+	ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+		pnum);
+	ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+	ubi_dump_vid_hdr(vid_hdr);
+	pr_err("hexdump of PEB %d offset %d, length %d",
+	       pnum, ubi->leb_start, ubi->leb_size);
+	ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+			       ubi->peb_buf, ubi->leb_size, 1);
+	err = 1;
+
+out_unlock:
+	mutex_unlock(&ubi->buf_mutex);
+	return err;
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		    int pnum, int *vid, unsigned long long *sqnum)
+{
+	long long uninitialized_var(ec);
+	int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+	dbg_bld("scan PEB %d", pnum);
+
+	/* Skip bad physical eraseblocks */
+	err = ubi_io_is_bad(ubi, pnum);
+	if (err < 0)
+		return err;
+	else if (err) {
+		ai->bad_peb_count += 1;
+		return 0;
+	}
+
+	err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+	if (err < 0)
+		return err;
+	switch (err) {
+	case 0:
+		break;
+	case UBI_IO_BITFLIPS:
+		bitflips = 1;
+		break;
+	case UBI_IO_FF:
+		ai->empty_peb_count += 1;
+		return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+				   UBI_UNKNOWN, 0, &ai->erase);
+	case UBI_IO_FF_BITFLIPS:
+		ai->empty_peb_count += 1;
+		return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+				   UBI_UNKNOWN, 1, &ai->erase);
+	case UBI_IO_BAD_HDR_EBADMSG:
+	case UBI_IO_BAD_HDR:
+		/*
+		 * We have to also look at the VID header, possibly it is not
+		 * corrupted. Set %bitflips flag in order to make this PEB be
+		 * moved and EC be re-created.
+		 */
+		ec_err = err;
+		ec = UBI_UNKNOWN;
+		bitflips = 1;
+		break;
+	default:
+		ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
+		return -EINVAL;
+	}
+
+	if (!ec_err) {
+		int image_seq;
+
+		/* Make sure UBI version is OK */
+		if (ech->version != UBI_VERSION) {
+			ubi_err("this UBI version is %d, image version is %d",
+				UBI_VERSION, (int)ech->version);
+			return -EINVAL;
+		}
+
+		ec = be64_to_cpu(ech->ec);
+		if (ec > UBI_MAX_ERASECOUNTER) {
+			/*
+			 * Erase counter overflow. The EC headers have 64 bits
+			 * reserved, but we anyway make use of only 31 bit
+			 * values, as this seems to be enough for any existing
+			 * flash. Upgrade UBI and use 64-bit erase counters
+			 * internally.
+			 */
+			ubi_err("erase counter overflow, max is %d",
+				UBI_MAX_ERASECOUNTER);
+			ubi_dump_ec_hdr(ech);
+			return -EINVAL;
+		}
+
+		/*
+		 * Make sure that all PEBs have the same image sequence number.
+		 * This allows us to detect situations when users flash UBI
+		 * images incorrectly, so that the flash has the new UBI image
+		 * and leftovers from the old one. This feature was added
+		 * relatively recently, and the sequence number was always
+		 * zero, because old UBI implementations always set it to zero.
+		 * For this reasons, we do not panic if some PEBs have zero
+		 * sequence number, while other PEBs have non-zero sequence
+		 * number.
+		 */
+		image_seq = be32_to_cpu(ech->image_seq);
+		if (!ubi->image_seq)
+			ubi->image_seq = image_seq;
+		if (image_seq && ubi->image_seq != image_seq) {
+			ubi_err("bad image sequence number %d in PEB %d, expected %d",
+				image_seq, pnum, ubi->image_seq);
+			ubi_dump_ec_hdr(ech);
+			return -EINVAL;
+		}
+	}
+
+	/* OK, we've done with the EC header, let's look at the VID header */
+
+	err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+	if (err < 0)
+		return err;
+	switch (err) {
+	case 0:
+		break;
+	case UBI_IO_BITFLIPS:
+		bitflips = 1;
+		break;
+	case UBI_IO_BAD_HDR_EBADMSG:
+		if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+			/*
+			 * Both EC and VID headers are corrupted and were read
+			 * with data integrity error, probably this is a bad
+			 * PEB, bit it is not marked as bad yet. This may also
+			 * be a result of power cut during erasure.
+			 */
+			ai->maybe_bad_peb_count += 1;
+	case UBI_IO_BAD_HDR:
+		if (ec_err)
+			/*
+			 * Both headers are corrupted. There is a possibility
+			 * that this a valid UBI PEB which has corresponding
+			 * LEB, but the headers are corrupted. However, it is
+			 * impossible to distinguish it from a PEB which just
+			 * contains garbage because of a power cut during erase
+			 * operation. So we just schedule this PEB for erasure.
+			 *
+			 * Besides, in case of NOR flash, we deliberately
+			 * corrupt both headers because NOR flash erasure is
+			 * slow and can start from the end.
+			 */
+			err = 0;
+		else
+			/*
+			 * The EC was OK, but the VID header is corrupted. We
+			 * have to check what is in the data area.
+			 */
+			err = check_corruption(ubi, vidh, pnum);
+
+		if (err < 0)
+			return err;
+		else if (!err)
+			/* This corruption is caused by a power cut */
+			err = add_to_list(ai, pnum, UBI_UNKNOWN,
+					  UBI_UNKNOWN, ec, 1, &ai->erase);
+		else
+			/* This is an unexpected corruption */
+			err = add_corrupted(ai, pnum, ec);
+		if (err)
+			return err;
+		goto adjust_mean_ec;
+	case UBI_IO_FF_BITFLIPS:
+		err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+				  ec, 1, &ai->erase);
+		if (err)
+			return err;
+		goto adjust_mean_ec;
+	case UBI_IO_FF:
+		if (ec_err || bitflips)
+			err = add_to_list(ai, pnum, UBI_UNKNOWN,
+					  UBI_UNKNOWN, ec, 1, &ai->erase);
+		else
+			err = add_to_list(ai, pnum, UBI_UNKNOWN,
+					  UBI_UNKNOWN, ec, 0, &ai->free);
+		if (err)
+			return err;
+		goto adjust_mean_ec;
+	default:
+		ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
+			err);
+		return -EINVAL;
+	}
+
+	vol_id = be32_to_cpu(vidh->vol_id);
+	if (vid)
+		*vid = vol_id;
+	if (sqnum)
+		*sqnum = be64_to_cpu(vidh->sqnum);
+	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+		int lnum = be32_to_cpu(vidh->lnum);
+
+		/* Unsupported internal volume */
+		switch (vidh->compat) {
+		case UBI_COMPAT_DELETE:
+			if (vol_id != UBI_FM_SB_VOLUME_ID
+			    && vol_id != UBI_FM_DATA_VOLUME_ID) {
+				ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+					vol_id, lnum);
+			}
+			err = add_to_list(ai, pnum, vol_id, lnum,
+					  ec, 1, &ai->erase);
+			if (err)
+				return err;
+			return 0;
+
+		case UBI_COMPAT_RO:
+			ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
+				vol_id, lnum);
+			ubi->ro_mode = 1;
+			break;
+
+		case UBI_COMPAT_PRESERVE:
+			ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+				vol_id, lnum);
+			err = add_to_list(ai, pnum, vol_id, lnum,
+					  ec, 0, &ai->alien);
+			if (err)
+				return err;
+			return 0;
+
+		case UBI_COMPAT_REJECT:
+			ubi_err("incompatible internal volume %d:%d found",
+				vol_id, lnum);
+			return -EINVAL;
+		}
+	}
+
+	if (ec_err)
+		ubi_warn("valid VID header but corrupted EC header at PEB %d",
+			 pnum);
+	err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+	if (err)
+		return err;
+
+adjust_mean_ec:
+	if (!ec_err) {
+		ai->ec_sum += ec;
+		ai->ec_count += 1;
+		if (ec > ai->max_ec)
+			ai->max_ec = ec;
+		if (ec < ai->min_ec)
+			ai->min_ec = ec;
+	}
+
+	return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+	struct ubi_ainf_peb *aeb;
+	int max_corr, peb_count;
+
+	peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+	max_corr = peb_count / 20 ?: 8;
+
+	/*
+	 * Few corrupted PEBs is not a problem and may be just a result of
+	 * unclean reboots. However, many of them may indicate some problems
+	 * with the flash HW or driver.
+	 */
+	if (ai->corr_peb_count) {
+		ubi_err("%d PEBs are corrupted and preserved",
+			ai->corr_peb_count);
+		pr_err("Corrupted PEBs are:");
+		list_for_each_entry(aeb, &ai->corr, u.list)
+			pr_cont(" %d", aeb->pnum);
+		pr_cont("\n");
+
+		/*
+		 * If too many PEBs are corrupted, we refuse attaching,
+		 * otherwise, only print a warning.
+		 */
+		if (ai->corr_peb_count >= max_corr) {
+			ubi_err("too many corrupted PEBs, refusing");
+			return -EINVAL;
+		}
+	}
+
+	if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+		/*
+		 * All PEBs are empty, or almost all - a couple PEBs look like
+		 * they may be bad PEBs which were not marked as bad yet.
+		 *
+		 * This piece of code basically tries to distinguish between
+		 * the following situations:
+		 *
+		 * 1. Flash is empty, but there are few bad PEBs, which are not
+		 *    marked as bad so far, and which were read with error. We
+		 *    want to go ahead and format this flash. While formatting,
+		 *    the faulty PEBs will probably be marked as bad.
+		 *
+		 * 2. Flash contains non-UBI data and we do not want to format
+		 *    it and destroy possibly important information.
+		 */
+		if (ai->maybe_bad_peb_count <= 2) {
+			ai->is_empty = 1;
+			ubi_msg("empty MTD device detected");
+			get_random_bytes(&ubi->image_seq,
+					 sizeof(ubi->image_seq));
+		} else {
+			ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+			return -EINVAL;
+		}
+
+	}
+
+	return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+	struct ubi_ainf_peb *aeb;
+	struct rb_node *this = av->root.rb_node;
+
+	while (this) {
+		if (this->rb_left)
+			this = this->rb_left;
+		else if (this->rb_right)
+			this = this->rb_right;
+		else {
+			aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+			this = rb_parent(this);
+			if (this) {
+				if (this->rb_left == &aeb->u.rb)
+					this->rb_left = NULL;
+				else
+					this->rb_right = NULL;
+			}
+
+			kmem_cache_free(ai->aeb_slab_cache, aeb);
+		}
+	}
+	kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+	struct ubi_ainf_peb *aeb, *aeb_tmp;
+	struct ubi_ainf_volume *av;
+	struct rb_node *rb;
+
+	list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+		list_del(&aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, aeb);
+	}
+	list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+		list_del(&aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, aeb);
+	}
+	list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+		list_del(&aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, aeb);
+	}
+	list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+		list_del(&aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, aeb);
+	}
+
+	/* Destroy the volume RB-tree */
+	rb = ai->volumes.rb_node;
+	while (rb) {
+		if (rb->rb_left)
+			rb = rb->rb_left;
+		else if (rb->rb_right)
+			rb = rb->rb_right;
+		else {
+			av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+			rb = rb_parent(rb);
+			if (rb) {
+				if (rb->rb_left == &av->rb)
+					rb->rb_left = NULL;
+				else
+					rb->rb_right = NULL;
+			}
+
+			destroy_av(ai, av);
+		}
+	}
+
+	if (ai->aeb_slab_cache)
+		kmem_cache_destroy(ai->aeb_slab_cache);
+
+	kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		    int start)
+{
+	int err, pnum;
+	struct rb_node *rb1, *rb2;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb;
+
+	err = -ENOMEM;
+
+	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ech)
+		return err;
+
+	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vidh)
+		goto out_ech;
+
+	for (pnum = start; pnum < ubi->peb_count; pnum++) {
+		cond_resched();
+
+		dbg_gen("process PEB %d", pnum);
+		err = scan_peb(ubi, ai, pnum, NULL, NULL);
+		if (err < 0)
+			goto out_vidh;
+	}
+
+	ubi_msg("scanning is finished");
+
+	/* Calculate mean erase counter */
+	if (ai->ec_count)
+		ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+	err = late_analysis(ubi, ai);
+	if (err)
+		goto out_vidh;
+
+	/*
+	 * In case of unknown erase counter we use the mean erase counter
+	 * value.
+	 */
+	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+			if (aeb->ec == UBI_UNKNOWN)
+				aeb->ec = ai->mean_ec;
+	}
+
+	list_for_each_entry(aeb, &ai->free, u.list) {
+		if (aeb->ec == UBI_UNKNOWN)
+			aeb->ec = ai->mean_ec;
+	}
+
+	list_for_each_entry(aeb, &ai->corr, u.list)
+		if (aeb->ec == UBI_UNKNOWN)
+			aeb->ec = ai->mean_ec;
+
+	list_for_each_entry(aeb, &ai->erase, u.list)
+		if (aeb->ec == UBI_UNKNOWN)
+			aeb->ec = ai->mean_ec;
+
+	err = self_check_ai(ubi, ai);
+	if (err)
+		goto out_vidh;
+
+	ubi_free_vid_hdr(ubi, vidh);
+	kfree(ech);
+
+	return 0;
+
+out_vidh:
+	ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+	kfree(ech);
+	return err;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+	int err, pnum, fm_anchor = -1;
+	unsigned long long max_sqnum = 0;
+
+	err = -ENOMEM;
+
+	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ech)
+		goto out;
+
+	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vidh)
+		goto out_ech;
+
+	for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+		int vol_id = -1;
+		unsigned long long sqnum = -1;
+		cond_resched();
+
+		dbg_gen("process PEB %d", pnum);
+		err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+		if (err < 0)
+			goto out_vidh;
+
+		if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+			max_sqnum = sqnum;
+			fm_anchor = pnum;
+		}
+	}
+
+	ubi_free_vid_hdr(ubi, vidh);
+	kfree(ech);
+
+	if (fm_anchor < 0)
+		return UBI_NO_FASTMAP;
+
+	return ubi_scan_fastmap(ubi, ai, fm_anchor);
+
+out_vidh:
+	ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+	kfree(ech);
+out:
+	return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+	struct ubi_attach_info *ai;
+
+	ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+	if (!ai)
+		return ai;
+
+	INIT_LIST_HEAD(&ai->corr);
+	INIT_LIST_HEAD(&ai->free);
+	INIT_LIST_HEAD(&ai->erase);
+	INIT_LIST_HEAD(&ai->alien);
+	ai->volumes = RB_ROOT;
+	ai->aeb_slab_cache = kmem_cache_create(slab_name,
+					       sizeof(struct ubi_ainf_peb),
+					       0, 0, NULL);
+	if (!ai->aeb_slab_cache) {
+		kfree(ai);
+		ai = NULL;
+	}
+
+	return ai;
+}
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+	int err;
+	struct ubi_attach_info *ai;
+
+	ai = alloc_ai("ubi_aeb_slab_cache");
+	if (!ai)
+		return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	/* On small flash devices we disable fastmap in any case. */
+	if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+		ubi->fm_disabled = 1;
+		force_scan = 1;
+	}
+
+	if (force_scan)
+		err = scan_all(ubi, ai, 0);
+	else {
+		err = scan_fast(ubi, ai);
+		if (err > 0) {
+			if (err != UBI_NO_FASTMAP) {
+				destroy_ai(ai);
+				ai = alloc_ai("ubi_aeb_slab_cache2");
+				if (!ai)
+					return -ENOMEM;
+
+				err = scan_all(ubi, ai, 0);
+			} else {
+				err = scan_all(ubi, ai, UBI_FM_MAX_START);
+			}
+		}
+	}
+#else
+	err = scan_all(ubi, ai, 0);
+#endif
+	if (err)
+		goto out_ai;
+
+	ubi->bad_peb_count = ai->bad_peb_count;
+	ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+	ubi->corr_peb_count = ai->corr_peb_count;
+	ubi->max_ec = ai->max_ec;
+	ubi->mean_ec = ai->mean_ec;
+	dbg_gen("max. sequence number:       %llu", ai->max_sqnum);
+
+	err = ubi_read_volume_table(ubi, ai);
+	if (err)
+		goto out_ai;
+
+	err = ubi_wl_init(ubi, ai);
+	if (err)
+		goto out_vtbl;
+
+	err = ubi_eba_init(ubi, ai);
+	if (err)
+		goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+		struct ubi_attach_info *scan_ai;
+
+		scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+		if (!scan_ai) {
+			err = -ENOMEM;
+			goto out_wl;
+		}
+
+		err = scan_all(ubi, scan_ai, 0);
+		if (err) {
+			destroy_ai(scan_ai);
+			goto out_wl;
+		}
+
+		err = self_check_eba(ubi, ai, scan_ai);
+		destroy_ai(scan_ai);
+
+		if (err)
+			goto out_wl;
+	}
+#endif
+
+	destroy_ai(ai);
+	return 0;
+
+out_wl:
+	ubi_wl_close(ubi);
+out_vtbl:
+	ubi_free_internal_volumes(ubi);
+	vfree(ubi->vtbl);
+out_ai:
+	destroy_ai(ai);
+	return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+	int pnum, err, vols_found = 0;
+	struct rb_node *rb1, *rb2;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb, *last_aeb;
+	uint8_t *buf;
+
+	if (!ubi_dbg_chk_gen(ubi))
+		return 0;
+
+	/*
+	 * At first, check that attaching information is OK.
+	 */
+	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+		int leb_count = 0;
+
+		cond_resched();
+
+		vols_found += 1;
+
+		if (ai->is_empty) {
+			ubi_err("bad is_empty flag");
+			goto bad_av;
+		}
+
+		if (av->vol_id < 0 || av->highest_lnum < 0 ||
+		    av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+		    av->data_pad < 0 || av->last_data_size < 0) {
+			ubi_err("negative values");
+			goto bad_av;
+		}
+
+		if (av->vol_id >= UBI_MAX_VOLUMES &&
+		    av->vol_id < UBI_INTERNAL_VOL_START) {
+			ubi_err("bad vol_id");
+			goto bad_av;
+		}
+
+		if (av->vol_id > ai->highest_vol_id) {
+			ubi_err("highest_vol_id is %d, but vol_id %d is there",
+				ai->highest_vol_id, av->vol_id);
+			goto out;
+		}
+
+		if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+		    av->vol_type != UBI_STATIC_VOLUME) {
+			ubi_err("bad vol_type");
+			goto bad_av;
+		}
+
+		if (av->data_pad > ubi->leb_size / 2) {
+			ubi_err("bad data_pad");
+			goto bad_av;
+		}
+
+		last_aeb = NULL;
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+			cond_resched();
+
+			last_aeb = aeb;
+			leb_count += 1;
+
+			if (aeb->pnum < 0 || aeb->ec < 0) {
+				ubi_err("negative values");
+				goto bad_aeb;
+			}
+
+			if (aeb->ec < ai->min_ec) {
+				ubi_err("bad ai->min_ec (%d), %d found",
+					ai->min_ec, aeb->ec);
+				goto bad_aeb;
+			}
+
+			if (aeb->ec > ai->max_ec) {
+				ubi_err("bad ai->max_ec (%d), %d found",
+					ai->max_ec, aeb->ec);
+				goto bad_aeb;
+			}
+
+			if (aeb->pnum >= ubi->peb_count) {
+				ubi_err("too high PEB number %d, total PEBs %d",
+					aeb->pnum, ubi->peb_count);
+				goto bad_aeb;
+			}
+
+			if (av->vol_type == UBI_STATIC_VOLUME) {
+				if (aeb->lnum >= av->used_ebs) {
+					ubi_err("bad lnum or used_ebs");
+					goto bad_aeb;
+				}
+			} else {
+				if (av->used_ebs != 0) {
+					ubi_err("non-zero used_ebs");
+					goto bad_aeb;
+				}
+			}
+
+			if (aeb->lnum > av->highest_lnum) {
+				ubi_err("incorrect highest_lnum or lnum");
+				goto bad_aeb;
+			}
+		}
+
+		if (av->leb_count != leb_count) {
+			ubi_err("bad leb_count, %d objects in the tree",
+				leb_count);
+			goto bad_av;
+		}
+
+		if (!last_aeb)
+			continue;
+
+		aeb = last_aeb;
+
+		if (aeb->lnum != av->highest_lnum) {
+			ubi_err("bad highest_lnum");
+			goto bad_aeb;
+		}
+	}
+
+	if (vols_found != ai->vols_found) {
+		ubi_err("bad ai->vols_found %d, should be %d",
+			ai->vols_found, vols_found);
+		goto out;
+	}
+
+	/* Check that attaching information is correct */
+	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+		last_aeb = NULL;
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+			int vol_type;
+
+			cond_resched();
+
+			last_aeb = aeb;
+
+			err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+			if (err && err != UBI_IO_BITFLIPS) {
+				ubi_err("VID header is not OK (%d)", err);
+				if (err > 0)
+					err = -EIO;
+				return err;
+			}
+
+			vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+				   UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+			if (av->vol_type != vol_type) {
+				ubi_err("bad vol_type");
+				goto bad_vid_hdr;
+			}
+
+			if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+				ubi_err("bad sqnum %llu", aeb->sqnum);
+				goto bad_vid_hdr;
+			}
+
+			if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+				ubi_err("bad vol_id %d", av->vol_id);
+				goto bad_vid_hdr;
+			}
+
+			if (av->compat != vidh->compat) {
+				ubi_err("bad compat %d", vidh->compat);
+				goto bad_vid_hdr;
+			}
+
+			if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+				ubi_err("bad lnum %d", aeb->lnum);
+				goto bad_vid_hdr;
+			}
+
+			if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+				ubi_err("bad used_ebs %d", av->used_ebs);
+				goto bad_vid_hdr;
+			}
+
+			if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+				ubi_err("bad data_pad %d", av->data_pad);
+				goto bad_vid_hdr;
+			}
+		}
+
+		if (!last_aeb)
+			continue;
+
+		if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+			ubi_err("bad highest_lnum %d", av->highest_lnum);
+			goto bad_vid_hdr;
+		}
+
+		if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+			ubi_err("bad last_data_size %d", av->last_data_size);
+			goto bad_vid_hdr;
+		}
+	}
+
+	/*
+	 * Make sure that all the physical eraseblocks are in one of the lists
+	 * or trees.
+	 */
+	buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+		err = ubi_io_is_bad(ubi, pnum);
+		if (err < 0) {
+			kfree(buf);
+			return err;
+		} else if (err)
+			buf[pnum] = 1;
+	}
+
+	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+			buf[aeb->pnum] = 1;
+
+	list_for_each_entry(aeb, &ai->free, u.list)
+		buf[aeb->pnum] = 1;
+
+	list_for_each_entry(aeb, &ai->corr, u.list)
+		buf[aeb->pnum] = 1;
+
+	list_for_each_entry(aeb, &ai->erase, u.list)
+		buf[aeb->pnum] = 1;
+
+	list_for_each_entry(aeb, &ai->alien, u.list)
+		buf[aeb->pnum] = 1;
+
+	err = 0;
+	for (pnum = 0; pnum < ubi->peb_count; pnum++)
+		if (!buf[pnum]) {
+			ubi_err("PEB %d is not referred", pnum);
+			err = 1;
+		}
+
+	kfree(buf);
+	if (err)
+		goto out;
+	return 0;
+
+bad_aeb:
+	ubi_err("bad attaching information about LEB %d", aeb->lnum);
+	ubi_dump_aeb(aeb, 0);
+	ubi_dump_av(av);
+	goto out;
+
+bad_av:
+	ubi_err("bad attaching information about volume %d", av->vol_id);
+	ubi_dump_av(av);
+	goto out;
+
+bad_vid_hdr:
+	ubi_err("bad attaching information about volume %d", av->vol_id);
+	ubi_dump_av(av);
+	ubi_dump_vid_hdr(vidh);
+
+out:
+	dump_stack();
+	return -EINVAL;
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6d86c0b..7094b9c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -15,56 +15,88 @@
  * module load parameters or the kernel boot parameters. If MTD devices were
  * specified, UBI does not attach any MTD device, but it is possible to do
  * later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
  */
 
-#ifdef UBI_LINUX
-#include <linux/err.h>
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/stringify.h>
+#include <linux/namei.h>
 #include <linux/stat.h>
 #include <linux/miscdevice.h>
 #include <linux/log2.h>
 #include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#else
+#include <linux/compat.h>
 #endif
+#include <linux/err.h>
 #include <ubi_uboot.h>
+#include <linux/mtd/partitions.h>
+
 #include "ubi.h"
 
+/* Maximum length of the 'mtd=' parameter */
+#define MTD_PARAM_LEN_MAX 64
+
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 4
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
 #if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
 #error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
 #endif
 
-/* Maximum length of the 'mtd=' parameter */
-#define MTD_PARAM_LEN_MAX 64
-
 /**
  * struct mtd_dev_param - MTD device parameter description data structure.
- * @name: MTD device name or number string
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ *        string
  * @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
  */
-struct mtd_dev_param
-{
+struct mtd_dev_param {
 	char name[MTD_PARAM_LEN_MAX];
+	int ubi_num;
 	int vid_hdr_offs;
+	int max_beb_per1024;
 };
 
 /* Numbers of elements set in the @mtd_dev_param array */
-static int mtd_devs = 0;
+static int __initdata mtd_devs;
 
 /* MTD devices specification parameters */
-static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
-
+static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
+#ifndef __UBOOT__
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+#endif
+#else
+#ifdef CONFIG_MTD_UBI_FASTMAP
+#if !defined(CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT)
+#define CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT 0
+#endif
+static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
+#endif
+#endif
 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
 struct class *ubi_class;
 
-#ifdef UBI_LINUX
 /* Slab cache for wear-leveling entries */
 struct kmem_cache *ubi_wl_entry_slab;
 
+#ifndef __UBOOT__
 /* UBI control character device */
 static struct miscdevice ubi_ctrl_cdev = {
 	.minor = MISC_DYNAMIC_MINOR,
@@ -74,9 +106,13 @@
 #endif
 
 /* All UBI devices in system */
+#ifndef __UBOOT__
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+#else
 struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+#endif
 
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 /* Serializes UBI devices creations and removals */
 DEFINE_MUTEX(ubi_devices_mutex);
 
@@ -84,7 +120,8 @@
 static DEFINE_SPINLOCK(ubi_devices_lock);
 
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
-static ssize_t ubi_version_show(struct class *class, char *buf)
+static ssize_t ubi_version_show(struct class *class,
+				struct class_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%d\n", UBI_VERSION);
 }
@@ -122,6 +159,112 @@
 #endif
 
 /**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+	struct ubi_notification nt;
+
+	ubi_do_get_device_info(ubi, &nt.di);
+	ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	switch (ntype) {
+	case UBI_VOLUME_ADDED:
+	case UBI_VOLUME_REMOVED:
+	case UBI_VOLUME_RESIZED:
+	case UBI_VOLUME_RENAMED:
+		if (ubi_update_fastmap(ubi)) {
+			ubi_err("Unable to update fastmap!");
+			ubi_ro_mode(ubi);
+		}
+	}
+#endif
+	return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+	struct ubi_notification nt;
+	int i, count = 0;
+#ifndef __UBOOT__
+	int ret;
+#endif
+
+	ubi_do_get_device_info(ubi, &nt.di);
+
+	mutex_lock(&ubi->device_mutex);
+	for (i = 0; i < ubi->vtbl_slots; i++) {
+		/*
+		 * Since the @ubi->device is locked, and we are not going to
+		 * change @ubi->volumes, we do not have to lock
+		 * @ubi->volumes_lock.
+		 */
+		if (!ubi->volumes[i])
+			continue;
+
+		ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+#ifndef __UBOOT__
+		if (nb)
+			nb->notifier_call(nb, ntype, &nt);
+		else
+			ret = blocking_notifier_call_chain(&ubi_notifiers, ntype,
+						     &nt);
+#endif
+		count += 1;
+	}
+	mutex_unlock(&ubi->device_mutex);
+
+	return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+	int i, count = 0;
+
+	/*
+	 * Since the @ubi_devices_mutex is locked, and we are not going to
+	 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+	 */
+	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+		struct ubi_device *ubi = ubi_devices[i];
+
+		if (!ubi)
+			continue;
+		count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+	}
+
+	return count;
+}
+
+/**
  * ubi_get_device - get UBI device.
  * @ubi_num: UBI device number
  *
@@ -159,8 +302,7 @@
 }
 
 /**
- * ubi_get_by_major - get UBI device description object by character device
- *                    major number.
+ * ubi_get_by_major - get UBI device by character device major number.
  * @major: major number
  *
  * This function is similar to 'ubi_get_device()', but it searches the device
@@ -213,7 +355,7 @@
 	return ubi_num;
 }
 
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
 static ssize_t dev_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
@@ -265,28 +407,35 @@
 	return ret;
 }
 
-/* Fake "release" method for UBI devices */
-static void dev_release(struct device *dev) { }
+static void dev_release(struct device *dev)
+{
+	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
+
+	kfree(ubi);
+}
 
 /**
  * ubi_sysfs_init - initialize sysfs for an UBI device.
  * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ *       taken
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int ubi_sysfs_init(struct ubi_device *ubi)
+static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
 {
 	int err;
 
 	ubi->dev.release = dev_release;
 	ubi->dev.devt = ubi->cdev.dev;
 	ubi->dev.class = ubi_class;
-	sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
+	dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
 	err = device_register(&ubi->dev);
 	if (err)
 		return err;
 
+	*ref = 1;
 	err = device_create_file(&ubi->dev, &dev_eraseblock_size);
 	if (err)
 		return err;
@@ -343,7 +492,7 @@
 #endif
 
 /**
- * kill_volumes - destroy all volumes.
+ * kill_volumes - destroy all user volumes.
  * @ubi: UBI device description object
  */
 static void kill_volumes(struct ubi_device *ubi)
@@ -358,17 +507,29 @@
 /**
  * uif_init - initialize user interfaces for an UBI device.
  * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ *       taken, otherwise set to %0
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error, and @ref is set to %0. However,
+ * if the initialization fails after the UBI device was registered in the
+ * driver core subsystem, this function takes a reference to @ubi->dev, because
+ * otherwise the release function ('dev_release()') would free whole @ubi
+ * object. The @ref argument is set to %1 in this case. The caller has to put
+ * this reference.
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int uif_init(struct ubi_device *ubi)
+static int uif_init(struct ubi_device *ubi, int *ref)
 {
 	int i, err;
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 	dev_t dev;
 #endif
 
+	*ref = 0;
 	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
 
 	/*
@@ -387,7 +548,7 @@
 
 	ubi_assert(MINOR(dev) == 0);
 	cdev_init(&ubi->cdev, &ubi_cdev_operations);
-	dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
+	dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
 	ubi->cdev.owner = THIS_MODULE;
 
 	err = cdev_add(&ubi->cdev, dev, 1);
@@ -396,7 +557,7 @@
 		goto out_unreg;
 	}
 
-	err = ubi_sysfs_init(ubi);
+	err = ubi_sysfs_init(ubi, ref);
 	if (err)
 		goto out_sysfs;
 
@@ -414,6 +575,8 @@
 out_volumes:
 	kill_volumes(ubi);
 out_sysfs:
+	if (*ref)
+		get_device(&ubi->dev);
 	ubi_sysfs_close(ubi);
 	cdev_del(&ubi->cdev);
 out_unreg:
@@ -425,6 +588,10 @@
 /**
  * uif_close - close user interfaces for an UBI device.
  * @ubi: UBI device description object
+ *
+ * Note, since this function un-registers UBI volume device objects (@vol->dev),
+ * the memory allocated voe the volumes is freed as well (in the release
+ * function).
  */
 static void uif_close(struct ubi_device *ubi)
 {
@@ -435,58 +602,52 @@
 }
 
 /**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
  */
-static int attach_by_scanning(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
 {
-	int err;
-	struct ubi_scan_info *si;
+	int i;
 
-	si = ubi_scan(ubi);
-	if (IS_ERR(si))
-		return PTR_ERR(si);
+	for (i = ubi->vtbl_slots;
+	     i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+		kfree(ubi->volumes[i]->eba_tbl);
+		kfree(ubi->volumes[i]);
+	}
+}
 
-	ubi->bad_peb_count = si->bad_peb_count;
-	ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
-	ubi->max_ec = si->max_ec;
-	ubi->mean_ec = si->mean_ec;
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+	int limit, device_pebs;
+	uint64_t device_size;
 
-	err = ubi_read_volume_table(ubi, si);
-	if (err)
-		goto out_si;
+	if (!max_beb_per1024)
+		return 0;
 
-	err = ubi_eba_init_scan(ubi, si);
-	if (err)
-		goto out_vtbl;
+	/*
+	 * Here we are using size of the entire flash chip and
+	 * not just the MTD partition size because the maximum
+	 * number of bad eraseblocks is a percentage of the
+	 * whole device and bad eraseblocks are not fairly
+	 * distributed over the flash chip. So the worst case
+	 * is that all the bad eraseblocks of the chip are in
+	 * the MTD partition we are attaching (ubi->mtd).
+	 */
+	device_size = mtd_get_device_size(ubi->mtd);
+	device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+	limit = mult_frac(device_pebs, max_beb_per1024, 1024);
 
-	err = ubi_wl_init_scan(ubi, si);
-	if (err)
-		goto out_eba;
+	/* Round it up */
+	if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+		limit += 1;
 
-	ubi_scan_destroy_si(si);
-	return 0;
-
-out_eba:
-	ubi_eba_close(ubi);
-out_vtbl:
-	vfree(ubi->vtbl);
-out_si:
-	ubi_scan_destroy_si(si);
-	return err;
+	return limit;
 }
 
 /**
- * io_init - initialize I/O unit for a given UBI device.
+ * io_init - initialize I/O sub-system for a given UBI device.
  * @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  *
  * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
  * assumed:
@@ -499,8 +660,11 @@
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
 {
+	dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+	dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
 	if (ubi->mtd->numeraseregions != 0) {
 		/*
 		 * Some flashes have several erase regions. Different regions
@@ -527,8 +691,15 @@
 	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
 	ubi->flash_size = ubi->mtd->size;
 
-	if (mtd_can_have_bb(ubi->mtd))
+	if (mtd_can_have_bb(ubi->mtd)) {
 		ubi->bad_allowed = 1;
+		ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+	}
+
+	if (ubi->mtd->type == MTD_NORFLASH) {
+		ubi_assert(ubi->mtd->writesize == 1);
+		ubi->nor_flash = 1;
+	}
 
 	ubi->min_io_size = ubi->mtd->writesize;
 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
@@ -548,14 +719,28 @@
 	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
 	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
 
+	ubi->max_write_size = ubi->mtd->writebufsize;
+	/*
+	 * Maximum write size has to be greater or equivalent to min. I/O
+	 * size, and be multiple of min. I/O size.
+	 */
+	if (ubi->max_write_size < ubi->min_io_size ||
+	    ubi->max_write_size % ubi->min_io_size ||
+	    !is_power_of_2(ubi->max_write_size)) {
+		ubi_err("bad write buffer size %d for %d min. I/O unit",
+			ubi->max_write_size, ubi->min_io_size);
+		return -EINVAL;
+	}
+
 	/* Calculate default aligned sizes of EC and VID headers */
 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
 
-	dbg_msg("min_io_size      %d", ubi->min_io_size);
-	dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
-	dbg_msg("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
-	dbg_msg("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
+	dbg_gen("min_io_size      %d", ubi->min_io_size);
+	dbg_gen("max_write_size   %d", ubi->max_write_size);
+	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+	dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
+	dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
 
 	if (ubi->vid_hdr_offset == 0)
 		/* Default offset */
@@ -569,13 +754,13 @@
 	}
 
 	/* Similar for the data offset */
-	ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
 	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
 
-	dbg_msg("vid_hdr_offset   %d", ubi->vid_hdr_offset);
-	dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
-	dbg_msg("vid_hdr_shift    %d", ubi->vid_hdr_shift);
-	dbg_msg("leb_start        %d", ubi->leb_start);
+	dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
+	dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+	dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
+	dbg_gen("leb_start        %d", ubi->leb_start);
 
 	/* The shift must be aligned to 32-bit boundary */
 	if (ubi->vid_hdr_shift % 4) {
@@ -595,41 +780,38 @@
 	}
 
 	/*
+	 * Set maximum amount of physical erroneous eraseblocks to be 10%.
+	 * Erroneous PEB are those which have read errors.
+	 */
+	ubi->max_erroneous = ubi->peb_count / 10;
+	if (ubi->max_erroneous < 16)
+		ubi->max_erroneous = 16;
+	dbg_gen("max_erroneous    %d", ubi->max_erroneous);
+
+	/*
 	 * It may happen that EC and VID headers are situated in one minimal
 	 * I/O unit. In this case we can only accept this UBI image in
 	 * read-only mode.
 	 */
 	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
-		ubi_warn("EC and VID headers are in the same minimal I/O unit, "
-			 "switch to read-only mode");
+		ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
 		ubi->ro_mode = 1;
 	}
 
 	ubi->leb_size = ubi->peb_size - ubi->leb_start;
 
 	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
-		ubi_msg("MTD device %d is write-protected, attach in "
-			"read-only mode", ubi->mtd->index);
+		ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+			ubi->mtd->index);
 		ubi->ro_mode = 1;
 	}
 
-	ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
-		ubi->peb_size, ubi->peb_size >> 10);
-	ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size);
-	ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size);
-	if (ubi->hdrs_min_io_size != ubi->min_io_size)
-		ubi_msg("sub-page size:              %d",
-			ubi->hdrs_min_io_size);
-	ubi_msg("VID header offset:          %d (aligned %d)",
-		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
-	ubi_msg("data offset:                %d", ubi->leb_start);
-
 	/*
-	 * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+	 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
 	 * unfortunately, MTD does not provide this information. We should loop
 	 * over all physical eraseblocks and invoke mtd->block_is_bad() for
-	 * each physical eraseblock. So, we skip ubi->bad_peb_count
-	 * uninitialized and initialize it after scanning.
+	 * each physical eraseblock. So, we leave @ubi->bad_peb_count
+	 * uninitialized so far.
 	 */
 
 	return 0;
@@ -640,7 +822,7 @@
  * @ubi: UBI device description object
  * @vol_id: ID of the volume to re-size
  *
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
  * the volume table to the largest possible size. See comments in ubi-header.h
  * for more description of the flag. Returns zero in case of success and a
  * negative error code in case of failure.
@@ -651,9 +833,14 @@
 	struct ubi_volume *vol = ubi->volumes[vol_id];
 	int err, old_reserved_pebs = vol->reserved_pebs;
 
+	if (ubi->ro_mode) {
+		ubi_warn("skip auto-resize because of R/O mode");
+		return 0;
+	}
+
 	/*
 	 * Clear the auto-resize flag in the volume in-memory copy of the
-	 * volume table, and 'ubi_resize_volume()' will propogate this change
+	 * volume table, and 'ubi_resize_volume()' will propagate this change
 	 * to the flash.
 	 */
 	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
@@ -662,11 +849,10 @@
 		struct ubi_vtbl_record vtbl_rec;
 
 		/*
-		 * No avalilable PEBs to re-size the volume, clear the flag on
+		 * No available PEBs to re-size the volume, clear the flag on
 		 * flash and exit.
 		 */
-		memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
-		       sizeof(struct ubi_vtbl_record));
+		vtbl_rec = ubi->vtbl[vol_id];
 		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
 		if (err)
 			ubi_err("cannot clean auto-resize flag for volume %d",
@@ -689,23 +875,31 @@
 
 /**
  * ubi_attach_mtd_dev - attach an MTD device.
- * @mtd_dev: MTD device description object
+ * @mtd: MTD device description object
  * @ubi_num: number to assign to the new UBI device
  * @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  *
  * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
  * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
- * which case this function finds a vacant device nubert and assings it
+ * which case this function finds a vacant device number and assigns it
  * automatically. Returns the new UBI device number in case of success and a
  * negative error code in case of failure.
  *
  * Note, the invocations of this function has to be serialized by the
  * @ubi_devices_mutex.
  */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+		       int vid_hdr_offset, int max_beb_per1024)
 {
 	struct ubi_device *ubi;
-	int i, err;
+	int i, err, ref = 0;
+
+	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+		return -EINVAL;
+
+	if (!max_beb_per1024)
+		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
 
 	/*
 	 * Check if we already have the same MTD device attached.
@@ -716,7 +910,7 @@
 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
 		ubi = ubi_devices[i];
 		if (ubi && mtd->index == ubi->mtd->index) {
-			dbg_err("mtd%d is already attached to ubi%d",
+			ubi_err("mtd%d is already attached to ubi%d",
 				mtd->index, i);
 			return -EEXIST;
 		}
@@ -731,8 +925,8 @@
 	 * no sense to attach emulated MTD devices, so we prohibit this.
 	 */
 	if (mtd->type == MTD_UBIVOLUME) {
-		ubi_err("refuse attaching mtd%d - it is already emulated on "
-			"top of UBI", mtd->index);
+		ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+			mtd->index);
 		return -EINVAL;
 	}
 
@@ -742,7 +936,8 @@
 			if (!ubi_devices[ubi_num])
 				break;
 		if (ubi_num == UBI_MAX_DEVICES) {
-			dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+			ubi_err("only %d UBI devices may be created",
+				UBI_MAX_DEVICES);
 			return -ENFILE;
 		}
 	} else {
@@ -751,7 +946,7 @@
 
 		/* Make sure ubi_num is not busy */
 		if (ubi_devices[ubi_num]) {
-			dbg_err("ubi%d already exists", ubi_num);
+			ubi_err("ubi%d already exists", ubi_num);
 			return -EEXIST;
 		}
 	}
@@ -765,36 +960,61 @@
 	ubi->vid_hdr_offset = vid_hdr_offset;
 	ubi->autoresize_vol_id = -1;
 
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	ubi->fm_pool.used = ubi->fm_pool.size = 0;
+	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+	/*
+	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
+	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+	 */
+	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+	ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+	ubi->fm_disabled = !fm_autoconvert;
+
+	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+	    <= UBI_FM_MAX_START) {
+		ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+			UBI_FM_MAX_START);
+		ubi->fm_disabled = 1;
+	}
+
+	ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+	ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+	ubi->fm_disabled = 1;
+#endif
 	mutex_init(&ubi->buf_mutex);
 	mutex_init(&ubi->ckvol_mutex);
-	mutex_init(&ubi->volumes_mutex);
+	mutex_init(&ubi->device_mutex);
 	spin_lock_init(&ubi->volumes_lock);
+	mutex_init(&ubi->fm_mutex);
+	init_rwsem(&ubi->fm_sem);
 
 	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
 
-	err = io_init(ubi);
+	err = io_init(ubi, max_beb_per1024);
 	if (err)
 		goto out_free;
 
 	err = -ENOMEM;
-	ubi->peb_buf1 = vmalloc(ubi->peb_size);
-	if (!ubi->peb_buf1)
+	ubi->peb_buf = vmalloc(ubi->peb_size);
+	if (!ubi->peb_buf)
 		goto out_free;
 
-	ubi->peb_buf2 = vmalloc(ubi->peb_size);
-	if (!ubi->peb_buf2)
-		goto out_free;
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-	mutex_init(&ubi->dbg_buf_mutex);
-	ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
-	if (!ubi->dbg_peb_buf)
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	ubi->fm_size = ubi_calc_fm_size(ubi);
+	ubi->fm_buf = vzalloc(ubi->fm_size);
+	if (!ubi->fm_buf)
 		goto out_free;
 #endif
-
-	err = attach_by_scanning(ubi);
+	err = ubi_attach(ubi, 0);
 	if (err) {
-		dbg_err("failed to attach by scanning, error %d", err);
+		ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
 		goto out_free;
 	}
 
@@ -804,56 +1024,71 @@
 			goto out_detach;
 	}
 
-	err = uif_init(ubi);
+	err = uif_init(ubi, &ref);
 	if (err)
 		goto out_detach;
 
-	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+	err = ubi_debugfs_init_dev(ubi);
+	if (err)
+		goto out_uif;
+
+	ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
 	if (IS_ERR(ubi->bgt_thread)) {
 		err = PTR_ERR(ubi->bgt_thread);
 		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
 			err);
-		goto out_uif;
+		goto out_debugfs;
 	}
 
-	ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
-	ubi_msg("MTD device name:            \"%s\"", mtd->name);
-	ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
-	ubi_msg("number of good PEBs:        %d", ubi->good_peb_count);
-	ubi_msg("number of bad PEBs:         %d", ubi->bad_peb_count);
-	ubi_msg("max. allowed volumes:       %d", ubi->vtbl_slots);
-	ubi_msg("wear-leveling threshold:    %d", CONFIG_MTD_UBI_WL_THRESHOLD);
-	ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
-	ubi_msg("number of user volumes:     %d",
-		ubi->vol_count - UBI_INT_VOL_COUNT);
-	ubi_msg("available PEBs:             %d", ubi->avail_pebs);
-	ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
-	ubi_msg("number of PEBs reserved for bad PEB handling: %d",
-		ubi->beb_rsvd_pebs);
-	ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
+	ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+		mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+	ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+	ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+	ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+	ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+	ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+		ubi->vtbl_slots);
+	ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+		ubi->image_seq);
+	ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
 
-	/* Enable the background thread */
-	if (!DBG_DISABLE_BGT) {
-		ubi->thread_enabled = 1;
-		wake_up_process(ubi->bgt_thread);
-	}
+	/*
+	 * The below lock makes sure we do not race with 'ubi_thread()' which
+	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+	 */
+	spin_lock(&ubi->wl_lock);
+	ubi->thread_enabled = 1;
+	wake_up_process(ubi->bgt_thread);
+	spin_unlock(&ubi->wl_lock);
 
 	ubi_devices[ubi_num] = ubi;
+	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
 	return ubi_num;
 
+out_debugfs:
+	ubi_debugfs_exit_dev(ubi);
 out_uif:
+	get_device(&ubi->dev);
+	ubi_assert(ref);
 	uif_close(ubi);
 out_detach:
-	ubi_eba_close(ubi);
 	ubi_wl_close(ubi);
+	ubi_free_internal_volumes(ubi);
 	vfree(ubi->vtbl);
 out_free:
-	vfree(ubi->peb_buf1);
-	vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
-	vfree(ubi->dbg_peb_buf);
-#endif
-	kfree(ubi);
+	vfree(ubi->peb_buf);
+	vfree(ubi->fm_buf);
+	if (ref)
+		put_device(&ubi->dev);
+	else
+		kfree(ubi);
 	return err;
 }
 
@@ -877,13 +1112,13 @@
 	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
 		return -EINVAL;
 
-	spin_lock(&ubi_devices_lock);
-	ubi = ubi_devices[ubi_num];
-	if (!ubi) {
-		spin_unlock(&ubi_devices_lock);
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
 		return -EINVAL;
-	}
 
+	spin_lock(&ubi_devices_lock);
+	put_device(&ubi->dev);
+	ubi->ref_count -= 1;
 	if (ubi->ref_count) {
 		if (!anyway) {
 			spin_unlock(&ubi_devices_lock);
@@ -897,8 +1132,13 @@
 	spin_unlock(&ubi_devices_lock);
 
 	ubi_assert(ubi_num == ubi->ubi_num);
-	dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
-
+	ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
+	ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	/* If we don't write a new fastmap at detach time we lose all
+	 * EC updates that have been made since the last written fastmap. */
+	ubi_update_fastmap(ubi);
+#endif
 	/*
 	 * Before freeing anything, we have to stop the background thread to
 	 * prevent it from doing anything on this device while we are freeing.
@@ -906,29 +1146,73 @@
 	if (ubi->bgt_thread)
 		kthread_stop(ubi->bgt_thread);
 
+	/*
+	 * Get a reference to the device in order to prevent 'dev_release()'
+	 * from freeing the @ubi object.
+	 */
+	get_device(&ubi->dev);
+
+	ubi_debugfs_exit_dev(ubi);
 	uif_close(ubi);
-	ubi_eba_close(ubi);
+
 	ubi_wl_close(ubi);
+	ubi_free_internal_volumes(ubi);
 	vfree(ubi->vtbl);
 	put_mtd_device(ubi->mtd);
-	vfree(ubi->peb_buf1);
-	vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
-	vfree(ubi->dbg_peb_buf);
-#endif
+	vfree(ubi->peb_buf);
+	vfree(ubi->fm_buf);
 	ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
-	kfree(ubi);
+	put_device(&ubi->dev);
 	return 0;
 }
 
+#ifndef __UBOOT__
 /**
- * find_mtd_device - open an MTD device by its name or number.
- * @mtd_dev: name or number of the device
+ * open_mtd_by_chdev - open an MTD device by its character device node path.
+ * @mtd_dev: MTD character device node path
+ *
+ * This helper function opens an MTD device by its character node device path.
+ * Returns MTD device description object in case of success and a negative
+ * error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
+{
+	int err, major, minor, mode;
+	struct path path;
+
+	/* Probably this is an MTD character device node path */
+	err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+	if (err)
+		return ERR_PTR(err);
+
+	/* MTD device number is defined by the major / minor numbers */
+	major = imajor(path.dentry->d_inode);
+	minor = iminor(path.dentry->d_inode);
+	mode = path.dentry->d_inode->i_mode;
+	path_put(&path);
+	if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+		return ERR_PTR(-EINVAL);
+
+	if (minor & 1)
+		/*
+		 * Just do not think the "/dev/mtdrX" devices support is need,
+		 * so do not support them to avoid doing extra work.
+		 */
+		return ERR_PTR(-EINVAL);
+
+	return get_mtd_device(NULL, minor / 2);
+}
+#endif
+
+/**
+ * open_mtd_device - open MTD device by name, character device path, or number.
+ * @mtd_dev: name, character device node path, or MTD device device number
  *
  * This function tries to open and MTD device described by @mtd_dev string,
- * which is first treated as an ASCII number, and if it is not true, it is
- * treated as MTD device name. Returns MTD device description object in case of
- * success and a negative error code in case of failure.
+ * which is first treated as ASCII MTD device number, and if it is not true, it
+ * is treated as MTD device name, and if that is also not true, it is treated
+ * as MTD character device node path. Returns MTD device description object in
+ * case of success and a negative error code in case of failure.
  */
 static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
 {
@@ -943,13 +1227,22 @@
 		 * MTD device name.
 		 */
 		mtd = get_mtd_device_nm(mtd_dev);
+#ifndef __UBOOT__
+		if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+			/* Probably this is an MTD character device node path */
+			mtd = open_mtd_by_chdev(mtd_dev);
+#endif
 	} else
 		mtd = get_mtd_device(NULL, mtd_num);
 
 	return mtd;
 }
 
-int __init ubi_init(void)
+#ifndef __UBOOT__
+static int __init ubi_init(void)
+#else
+int ubi_init(void)
+#endif
 {
 	int err, i, k;
 
@@ -982,13 +1275,18 @@
 		goto out_version;
 	}
 
-#ifdef UBI_LINUX
 	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
 					      sizeof(struct ubi_wl_entry),
 					      0, 0, NULL);
-	if (!ubi_wl_entry_slab)
+	if (!ubi_wl_entry_slab) {
+		err = -ENOMEM;
 		goto out_dev_unreg;
-#endif
+	}
+
+	err = ubi_debugfs_init();
+	if (err)
+		goto out_slab;
+
 
 	/* Attach MTD devices */
 	for (i = 0; i < mtd_devs; i++) {
@@ -1000,17 +1298,36 @@
 		mtd = open_mtd_device(p->name);
 		if (IS_ERR(mtd)) {
 			err = PTR_ERR(mtd);
-			goto out_detach;
+			ubi_err("cannot open mtd %s, error %d", p->name, err);
+			/* See comment below re-ubi_is_module(). */
+			if (ubi_is_module())
+				goto out_detach;
+			continue;
 		}
 
 		mutex_lock(&ubi_devices_mutex);
-		err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
-					 p->vid_hdr_offs);
+		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+					 p->vid_hdr_offs, p->max_beb_per1024);
 		mutex_unlock(&ubi_devices_mutex);
 		if (err < 0) {
-			put_mtd_device(mtd);
 			ubi_err("cannot attach mtd%d", mtd->index);
-			goto out_detach;
+			put_mtd_device(mtd);
+
+			/*
+			 * Originally UBI stopped initializing on any error.
+			 * However, later on it was found out that this
+			 * behavior is not very good when UBI is compiled into
+			 * the kernel and the MTD devices to attach are passed
+			 * through the command line. Indeed, UBI failure
+			 * stopped whole boot sequence.
+			 *
+			 * To fix this, we changed the behavior for the
+			 * non-module case, but preserved the old behavior for
+			 * the module case, just for compatibility. This is a
+			 * little inconsistent, though.
+			 */
+			if (ubi_is_module())
+				goto out_detach;
 		}
 	}
 
@@ -1023,23 +1340,26 @@
 			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
 			mutex_unlock(&ubi_devices_mutex);
 		}
-#ifdef UBI_LINUX
+	ubi_debugfs_exit();
+out_slab:
 	kmem_cache_destroy(ubi_wl_entry_slab);
 out_dev_unreg:
-#endif
 	misc_deregister(&ubi_ctrl_cdev);
 out_version:
 	class_remove_file(ubi_class, &ubi_version);
 out_class:
 	class_destroy(ubi_class);
 out:
-	mtd_devs = 0;
-	ubi_err("UBI error: cannot initialize UBI, error %d", err);
+	ubi_err("cannot initialize UBI, error %d", err);
 	return err;
 }
-module_init(ubi_init);
+late_initcall(ubi_init);
 
-void __exit ubi_exit(void)
+#ifndef __UBOOT__
+static void __exit ubi_exit(void)
+#else
+void ubi_exit(void)
+#endif
 {
 	int i;
 
@@ -1049,17 +1369,16 @@
 			ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
 			mutex_unlock(&ubi_devices_mutex);
 		}
+	ubi_debugfs_exit();
 	kmem_cache_destroy(ubi_wl_entry_slab);
 	misc_deregister(&ubi_ctrl_cdev);
 	class_remove_file(ubi_class, &ubi_version);
 	class_destroy(ubi_class);
-	mtd_devs = 0;
 }
 module_exit(ubi_exit);
 
 /**
- * bytes_str_to_int - convert a string representing number of bytes to an
- * integer.
+ * bytes_str_to_int - convert a number of bytes string into an integer.
  * @str: the string to convert
  *
  * This function returns positive resulting integer in case of success and a
@@ -1071,9 +1390,8 @@
 	unsigned long result;
 
 	result = simple_strtoul(str, &endp, 0);
-	if (str == endp || result < 0) {
-		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-		       str);
+	if (str == endp || result >= INT_MAX) {
+		ubi_err("incorrect bytes count: \"%s\"\n", str);
 		return -EINVAL;
 	}
 
@@ -1089,14 +1407,24 @@
 	case '\0':
 		break;
 	default:
-		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-		       str);
+		ubi_err("incorrect bytes count: \"%s\"\n", str);
 		return -EINVAL;
 	}
 
 	return result;
 }
 
+int kstrtoint(const char *s, unsigned int base, int *res)
+{
+	unsigned long long tmp;
+
+	tmp = simple_strtoull(s, NULL, base);
+	if (tmp != (unsigned long long)(int)tmp)
+		return -ERANGE;
+
+	return (int)tmp;
+}
+
 /**
  * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
  * @val: the parameter value to parse
@@ -1105,33 +1433,36 @@
  * This function returns zero in case of success and a negative error code in
  * case of error.
  */
-int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+#ifndef __UBOOT__
+static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+#else
+int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+#endif
 {
 	int i, len;
 	struct mtd_dev_param *p;
 	char buf[MTD_PARAM_LEN_MAX];
 	char *pbuf = &buf[0];
-	char *tokens[2] = {NULL, NULL};
+	char *tokens[MTD_PARAM_MAX_COUNT], *token;
 
 	if (!val)
 		return -EINVAL;
 
 	if (mtd_devs == UBI_MAX_DEVICES) {
-		printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
-		       UBI_MAX_DEVICES);
+		ubi_err("too many parameters, max. is %d\n",
+			UBI_MAX_DEVICES);
 		return -EINVAL;
 	}
 
 	len = strnlen(val, MTD_PARAM_LEN_MAX);
 	if (len == MTD_PARAM_LEN_MAX) {
-		printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
-		       "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+		ubi_err("parameter \"%s\" is too long, max. is %d\n",
+			val, MTD_PARAM_LEN_MAX);
 		return -EINVAL;
 	}
 
 	if (len == 0) {
-		printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
-		       "ignored\n");
+		pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
 		return 0;
 	}
 
@@ -1141,40 +1472,69 @@
 	if (buf[len - 1] == '\n')
 		buf[len - 1] = '\0';
 
-	for (i = 0; i < 2; i++)
+	for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
 		tokens[i] = strsep(&pbuf, ",");
 
 	if (pbuf) {
-		printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
-		       val);
+		ubi_err("too many arguments at \"%s\"\n", val);
 		return -EINVAL;
 	}
 
 	p = &mtd_dev_param[mtd_devs];
 	strcpy(&p->name[0], tokens[0]);
 
-	if (tokens[1])
-		p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
+	token = tokens[1];
+	if (token) {
+		p->vid_hdr_offs = bytes_str_to_int(token);
 
-	if (p->vid_hdr_offs < 0)
-		return p->vid_hdr_offs;
+		if (p->vid_hdr_offs < 0)
+			return p->vid_hdr_offs;
+	}
+
+	token = tokens[2];
+	if (token) {
+		int err = kstrtoint(token, 10, &p->max_beb_per1024);
+
+		if (err) {
+			ubi_err("bad value for max_beb_per1024 parameter: %s",
+				token);
+			return -EINVAL;
+		}
+	}
+
+	token = tokens[3];
+	if (token) {
+		int err = kstrtoint(token, 10, &p->ubi_num);
+
+		if (err) {
+			ubi_err("bad value for ubi_num parameter: %s", token);
+			return -EINVAL;
+		}
+	} else
+		p->ubi_num = UBI_DEV_NUM_AUTO;
 
 	mtd_devs += 1;
 	return 0;
 }
 
 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
-		      "mtd=<name|num>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
 		      "Multiple \"mtd\" parameters may be specified.\n"
-		      "MTD devices may be specified by their number or name.\n"
-		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
-		      "header position and data starting position to be used "
-		      "by UBI.\n"
-		      "Example: mtd=content,1984 mtd=4 - attach MTD device"
-		      "with name \"content\" using VID header offset 1984, and "
-		      "MTD device number 4 with default VID header offset.");
-
+		      "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+		      "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+		      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+		      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+		      "\n"
+		      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+		      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+		      "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+		      "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+		      "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+#endif
 MODULE_VERSION(__stringify(UBI_VERSION));
 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
 MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/crc32.c b/drivers/mtd/ubi/crc32.c
index f1bebf5..0d65bf4 100644
--- a/drivers/mtd/ubi/crc32.c
+++ b/drivers/mtd/ubi/crc32.c
@@ -20,7 +20,8 @@
  * Version 2.  See the file COPYING for more details.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/crc32.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -30,7 +31,7 @@
 
 #include <asm/byteorder.h>
 
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <asm/atomic.h>
@@ -46,7 +47,7 @@
 #define tobe(x) (x)
 #endif
 #include "crc32table.h"
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
 MODULE_DESCRIPTION("Ethernet CRC32 calculations");
 MODULE_LICENSE("GPL");
@@ -146,7 +147,7 @@
 # endif
 }
 #endif
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 /**
  * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
  * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for
@@ -379,7 +380,7 @@
 #include <stdlib.h>
 #include <stdio.h>
 
-#ifdef UBI_LINUX				/*Not used at present */
+#ifndef __UBOOT__
 static void
 buf_dump(char const *prefix, unsigned char const *buf, size_t len)
 {
@@ -405,7 +406,7 @@
 		*buf++ = (unsigned char) random();
 }
 
-#ifdef UBI_LINUX				/* Not used at present */
+#ifndef __UBOOT__
 static void store_le(u32 x, unsigned char *buf)
 {
 	buf[0] = (unsigned char) x;
diff --git a/drivers/mtd/ubi/crc32table.h b/drivers/mtd/ubi/crc32table.h
index 0438af4..02ce6fd 100644
--- a/drivers/mtd/ubi/crc32table.h
+++ b/drivers/mtd/ubi/crc32table.h
@@ -66,7 +66,7 @@
 tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L),
 tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL)
 };
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 static const u32 crc32table_be[] = {
 tobe(0x00000000L), tobe(0x04c11db7L), tobe(0x09823b6eL), tobe(0x0d4326d9L),
 tobe(0x130476dcL), tobe(0x17c56b6bL), tobe(0x1a864db2L), tobe(0x1e475005L),
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 6c22301..af254da 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -6,175 +6,455 @@
  * Author: Artem Bityutskiy (Битюцкий Артём)
  */
 
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
 #include <ubi_uboot.h>
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
-
 #include "ubi.h"
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#endif
 
 /**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+	int err;
+	size_t read;
+	void *buf;
+	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+	buf = vmalloc(len);
+	if (!buf)
+		return;
+	err = mtd_read(ubi->mtd, addr, len, &read, buf);
+	if (err && err != -EUCLEAN) {
+		ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+			err, len, pnum, offset, read);
+		goto out;
+	}
+
+	ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+		len, pnum, offset);
+	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+	vfree(buf);
+	return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
  * @ec_hdr: the erase counter header to dump
  */
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
 {
-	dbg_msg("erase counter header dump:");
-	dbg_msg("magic          %#08x", be32_to_cpu(ec_hdr->magic));
-	dbg_msg("version        %d",    (int)ec_hdr->version);
-	dbg_msg("ec             %llu",  (long long)be64_to_cpu(ec_hdr->ec));
-	dbg_msg("vid_hdr_offset %d",    be32_to_cpu(ec_hdr->vid_hdr_offset));
-	dbg_msg("data_offset    %d",    be32_to_cpu(ec_hdr->data_offset));
-	dbg_msg("hdr_crc        %#08x", be32_to_cpu(ec_hdr->hdr_crc));
-	dbg_msg("erase counter header hexdump:");
+	pr_err("Erase counter header dump:\n");
+	pr_err("\tmagic          %#08x\n", be32_to_cpu(ec_hdr->magic));
+	pr_err("\tversion        %d\n", (int)ec_hdr->version);
+	pr_err("\tec             %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+	pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+	pr_err("\tdata_offset    %d\n", be32_to_cpu(ec_hdr->data_offset));
+	pr_err("\timage_seq      %d\n", be32_to_cpu(ec_hdr->image_seq));
+	pr_err("\thdr_crc        %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+	pr_err("erase counter header hexdump:\n");
 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
 		       ec_hdr, UBI_EC_HDR_SIZE, 1);
 }
 
 /**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
  * @vid_hdr: the volume identifier header to dump
  */
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
 {
-	dbg_msg("volume identifier header dump:");
-	dbg_msg("magic     %08x", be32_to_cpu(vid_hdr->magic));
-	dbg_msg("version   %d",   (int)vid_hdr->version);
-	dbg_msg("vol_type  %d",   (int)vid_hdr->vol_type);
-	dbg_msg("copy_flag %d",   (int)vid_hdr->copy_flag);
-	dbg_msg("compat    %d",   (int)vid_hdr->compat);
-	dbg_msg("vol_id    %d",   be32_to_cpu(vid_hdr->vol_id));
-	dbg_msg("lnum      %d",   be32_to_cpu(vid_hdr->lnum));
-	dbg_msg("leb_ver   %u",   be32_to_cpu(vid_hdr->leb_ver));
-	dbg_msg("data_size %d",   be32_to_cpu(vid_hdr->data_size));
-	dbg_msg("used_ebs  %d",   be32_to_cpu(vid_hdr->used_ebs));
-	dbg_msg("data_pad  %d",   be32_to_cpu(vid_hdr->data_pad));
-	dbg_msg("sqnum     %llu",
+	pr_err("Volume identifier header dump:\n");
+	pr_err("\tmagic     %08x\n", be32_to_cpu(vid_hdr->magic));
+	pr_err("\tversion   %d\n",  (int)vid_hdr->version);
+	pr_err("\tvol_type  %d\n",  (int)vid_hdr->vol_type);
+	pr_err("\tcopy_flag %d\n",  (int)vid_hdr->copy_flag);
+	pr_err("\tcompat    %d\n",  (int)vid_hdr->compat);
+	pr_err("\tvol_id    %d\n",  be32_to_cpu(vid_hdr->vol_id));
+	pr_err("\tlnum      %d\n",  be32_to_cpu(vid_hdr->lnum));
+	pr_err("\tdata_size %d\n",  be32_to_cpu(vid_hdr->data_size));
+	pr_err("\tused_ebs  %d\n",  be32_to_cpu(vid_hdr->used_ebs));
+	pr_err("\tdata_pad  %d\n",  be32_to_cpu(vid_hdr->data_pad));
+	pr_err("\tsqnum     %llu\n",
 		(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
-	dbg_msg("hdr_crc   %08x", be32_to_cpu(vid_hdr->hdr_crc));
-	dbg_msg("volume identifier header hexdump:");
+	pr_err("\thdr_crc   %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+	pr_err("Volume identifier header hexdump:\n");
+	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+		       vid_hdr, UBI_VID_HDR_SIZE, 1);
 }
 
 /**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
  * @vol: UBI volume description object
  */
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
 {
-	dbg_msg("volume information dump:");
-	dbg_msg("vol_id          %d", vol->vol_id);
-	dbg_msg("reserved_pebs   %d", vol->reserved_pebs);
-	dbg_msg("alignment       %d", vol->alignment);
-	dbg_msg("data_pad        %d", vol->data_pad);
-	dbg_msg("vol_type        %d", vol->vol_type);
-	dbg_msg("name_len        %d", vol->name_len);
-	dbg_msg("usable_leb_size %d", vol->usable_leb_size);
-	dbg_msg("used_ebs        %d", vol->used_ebs);
-	dbg_msg("used_bytes      %lld", vol->used_bytes);
-	dbg_msg("last_eb_bytes   %d", vol->last_eb_bytes);
-	dbg_msg("corrupted       %d", vol->corrupted);
-	dbg_msg("upd_marker      %d", vol->upd_marker);
+	printf("Volume information dump:\n");
+	printf("\tvol_id          %d\n", vol->vol_id);
+	printf("\treserved_pebs   %d\n", vol->reserved_pebs);
+	printf("\talignment       %d\n", vol->alignment);
+	printf("\tdata_pad        %d\n", vol->data_pad);
+	printf("\tvol_type        %d\n", vol->vol_type);
+	printf("\tname_len        %d\n", vol->name_len);
+	printf("\tusable_leb_size %d\n", vol->usable_leb_size);
+	printf("\tused_ebs        %d\n", vol->used_ebs);
+	printf("\tused_bytes      %lld\n", vol->used_bytes);
+	printf("\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
+	printf("\tcorrupted       %d\n", vol->corrupted);
+	printf("\tupd_marker      %d\n", vol->upd_marker);
 
 	if (vol->name_len <= UBI_VOL_NAME_MAX &&
 	    strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
-		dbg_msg("name            %s", vol->name);
+		printf("\tname            %s\n", vol->name);
 	} else {
-		dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
-			vol->name[0], vol->name[1], vol->name[2],
-			vol->name[3], vol->name[4]);
+		printf("\t1st 5 characters of name: %c%c%c%c%c\n",
+		       vol->name[0], vol->name[1], vol->name[2],
+		       vol->name[3], vol->name[4]);
 	}
 }
 
 /**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
  * @r: the object to dump
  * @idx: volume table index
  */
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
 {
 	int name_len = be16_to_cpu(r->name_len);
 
-	dbg_msg("volume table record %d dump:", idx);
-	dbg_msg("reserved_pebs   %d", be32_to_cpu(r->reserved_pebs));
-	dbg_msg("alignment       %d", be32_to_cpu(r->alignment));
-	dbg_msg("data_pad        %d", be32_to_cpu(r->data_pad));
-	dbg_msg("vol_type        %d", (int)r->vol_type);
-	dbg_msg("upd_marker      %d", (int)r->upd_marker);
-	dbg_msg("name_len        %d", name_len);
+	pr_err("Volume table record %d dump:\n", idx);
+	pr_err("\treserved_pebs   %d\n", be32_to_cpu(r->reserved_pebs));
+	pr_err("\talignment       %d\n", be32_to_cpu(r->alignment));
+	pr_err("\tdata_pad        %d\n", be32_to_cpu(r->data_pad));
+	pr_err("\tvol_type        %d\n", (int)r->vol_type);
+	pr_err("\tupd_marker      %d\n", (int)r->upd_marker);
+	pr_err("\tname_len        %d\n", name_len);
 
 	if (r->name[0] == '\0') {
-		dbg_msg("name            NULL");
+		pr_err("\tname            NULL\n");
 		return;
 	}
 
 	if (name_len <= UBI_VOL_NAME_MAX &&
 	    strnlen(&r->name[0], name_len + 1) == name_len) {
-		dbg_msg("name            %s", &r->name[0]);
+		pr_err("\tname            %s\n", &r->name[0]);
 	} else {
-		dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
+		pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
 			r->name[0], r->name[1], r->name[2], r->name[3],
 			r->name[4]);
 	}
-	dbg_msg("crc             %#08x", be32_to_cpu(r->crc));
+	pr_err("\tcrc             %#08x\n", be32_to_cpu(r->crc));
 }
 
 /**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
  */
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
 {
-	dbg_msg("volume scanning information dump:");
-	dbg_msg("vol_id         %d", sv->vol_id);
-	dbg_msg("highest_lnum   %d", sv->highest_lnum);
-	dbg_msg("leb_count      %d", sv->leb_count);
-	dbg_msg("compat         %d", sv->compat);
-	dbg_msg("vol_type       %d", sv->vol_type);
-	dbg_msg("used_ebs       %d", sv->used_ebs);
-	dbg_msg("last_data_size %d", sv->last_data_size);
-	dbg_msg("data_pad       %d", sv->data_pad);
+	pr_err("Volume attaching information dump:\n");
+	pr_err("\tvol_id         %d\n", av->vol_id);
+	pr_err("\thighest_lnum   %d\n", av->highest_lnum);
+	pr_err("\tleb_count      %d\n", av->leb_count);
+	pr_err("\tcompat         %d\n", av->compat);
+	pr_err("\tvol_type       %d\n", av->vol_type);
+	pr_err("\tused_ebs       %d\n", av->used_ebs);
+	pr_err("\tlast_data_size %d\n", av->last_data_size);
+	pr_err("\tdata_pad       %d\n", av->data_pad);
 }
 
 /**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
  * @type: object type: 0 - not corrupted, 1 - corrupted
  */
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
 {
-	dbg_msg("eraseblock scanning information dump:");
-	dbg_msg("ec       %d", seb->ec);
-	dbg_msg("pnum     %d", seb->pnum);
+	pr_err("eraseblock attaching information dump:\n");
+	pr_err("\tec       %d\n", aeb->ec);
+	pr_err("\tpnum     %d\n", aeb->pnum);
 	if (type == 0) {
-		dbg_msg("lnum     %d", seb->lnum);
-		dbg_msg("scrub    %d", seb->scrub);
-		dbg_msg("sqnum    %llu", seb->sqnum);
-		dbg_msg("leb_ver  %u", seb->leb_ver);
+		pr_err("\tlnum     %d\n", aeb->lnum);
+		pr_err("\tscrub    %d\n", aeb->scrub);
+		pr_err("\tsqnum    %llu\n", aeb->sqnum);
 	}
 }
 
 /**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
  * @req: the object to dump
  */
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
 {
 	char nm[17];
 
-	dbg_msg("volume creation request dump:");
-	dbg_msg("vol_id    %d",   req->vol_id);
-	dbg_msg("alignment %d",   req->alignment);
-	dbg_msg("bytes     %lld", (long long)req->bytes);
-	dbg_msg("vol_type  %d",   req->vol_type);
-	dbg_msg("name_len  %d",   req->name_len);
+	pr_err("Volume creation request dump:\n");
+	pr_err("\tvol_id    %d\n",   req->vol_id);
+	pr_err("\talignment %d\n",   req->alignment);
+	pr_err("\tbytes     %lld\n", (long long)req->bytes);
+	pr_err("\tvol_type  %d\n",   req->vol_type);
+	pr_err("\tname_len  %d\n",   req->name_len);
 
 	memcpy(nm, req->name, 16);
 	nm[16] = 0;
-	dbg_msg("the 1st 16 characters of the name: %s", nm);
+	pr_err("\t1st 16 characters of name: %s\n", nm);
 }
 
-#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
+#ifndef __UBOOT__
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
+	dfs_rootdir = debugfs_create_dir("ubi", NULL);
+	if (IS_ERR_OR_NULL(dfs_rootdir)) {
+		int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+		ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
+			err);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	unsigned long ubi_num = (unsigned long)file->private_data;
+	struct dentry *dent = file->f_path.dentry;
+	struct ubi_device *ubi;
+	struct ubi_debug_info *d;
+	char buf[3];
+	int val;
+
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return -ENODEV;
+	d = &ubi->dbg;
+
+	if (dent == d->dfs_chk_gen)
+		val = d->chk_gen;
+	else if (dent == d->dfs_chk_io)
+		val = d->chk_io;
+	else if (dent == d->dfs_disable_bgt)
+		val = d->disable_bgt;
+	else if (dent == d->dfs_emulate_bitflips)
+		val = d->emulate_bitflips;
+	else if (dent == d->dfs_emulate_io_failures)
+		val = d->emulate_io_failures;
+	else {
+		count = -EINVAL;
+		goto out;
+	}
+
+	if (val)
+		buf[0] = '1';
+	else
+		buf[0] = '0';
+	buf[1] = '\n';
+	buf[2] = 0x00;
+
+	count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+	ubi_put_device(ubi);
+	return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	unsigned long ubi_num = (unsigned long)file->private_data;
+	struct dentry *dent = file->f_path.dentry;
+	struct ubi_device *ubi;
+	struct ubi_debug_info *d;
+	size_t buf_size;
+	char buf[8];
+	int val;
+
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return -ENODEV;
+	d = &ubi->dbg;
+
+	buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size)) {
+		count = -EFAULT;
+		goto out;
+	}
+
+	if (buf[0] == '1')
+		val = 1;
+	else if (buf[0] == '0')
+		val = 0;
+	else {
+		count = -EINVAL;
+		goto out;
+	}
+
+	if (dent == d->dfs_chk_gen)
+		d->chk_gen = val;
+	else if (dent == d->dfs_chk_io)
+		d->chk_io = val;
+	else if (dent == d->dfs_disable_bgt)
+		d->disable_bgt = val;
+	else if (dent == d->dfs_emulate_bitflips)
+		d->emulate_bitflips = val;
+	else if (dent == d->dfs_emulate_io_failures)
+		d->emulate_io_failures = val;
+	else
+		count = -EINVAL;
+
+out:
+	ubi_put_device(ubi);
+	return count;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+	.read   = dfs_file_read,
+	.write  = dfs_file_write,
+	.open	= simple_open,
+	.llseek = no_llseek,
+	.owner  = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+	int err, n;
+	unsigned long ubi_num = ubi->ubi_num;
+	const char *fname;
+	struct dentry *dent;
+	struct ubi_debug_info *d = &ubi->dbg;
+
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
+	n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+		     ubi->ubi_num);
+	if (n == UBI_DFS_DIR_LEN) {
+		/* The array size is too small */
+		fname = UBI_DFS_DIR_NAME;
+		dent = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	fname = d->dfs_dir_name;
+	dent = debugfs_create_dir(fname, dfs_rootdir);
+	if (IS_ERR_OR_NULL(dent))
+		goto out;
+	d->dfs_dir = dent;
+
+	fname = "chk_gen";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_chk_gen = dent;
+
+	fname = "chk_io";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_chk_io = dent;
+
+	fname = "tst_disable_bgt";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_disable_bgt = dent;
+
+	fname = "tst_emulate_bitflips";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_emulate_bitflips = dent;
+
+	fname = "tst_emulate_io_failures";
+	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+				   &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	d->dfs_emulate_io_failures = dent;
+
+	return 0;
+
+out_remove:
+	debugfs_remove_recursive(d->dfs_dir);
+out:
+	err = dent ? PTR_ERR(dent) : -ENODEV;
+	ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+		fname, err);
+	return err;
+}
+
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove_recursive(ubi->dbg.dfs_dir);
+}
+#else
+int ubi_debugfs_init(void)
+{
+	return 0;
+}
+
+void ubi_debugfs_exit(void)
+{
+}
+
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+	return 0;
+}
+
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+}
+#endif
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 222b2b8..980eb11 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -9,132 +9,113 @@
 #ifndef __UBI_DEBUG_H__
 #define __UBI_DEBUG_H__
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-#ifdef UBI_LINUX
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/random.h>
 #endif
 
-#define ubi_assert(expr)  BUG_ON(!(expr))
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-#else
-#define ubi_assert(expr)  ({})
-#define dbg_err(fmt, ...) ({})
-#endif
+#define ubi_assert(expr)  do {                                               \
+	if (unlikely(!(expr))) {                                             \
+		pr_crit("UBI assert failed in %s at %u (pid %d)\n",          \
+		       __func__, __LINE__, current->pid);                    \
+		dump_stack();                                                \
+	}                                                                    \
+} while (0)
 
-#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
-#define DBG_DISABLE_BGT 1
-#else
-#define DBG_DISABLE_BGT 0
-#endif
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a)                   \
+		print_hex_dump(l, ps, pt, r, g, b, len, a)
 
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
-/* Generic debugging message */
-#define dbg_msg(fmt, ...)                                    \
-	printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", \
-	       __FUNCTION__, ##__VA_ARGS__)
+#define ubi_dbg_msg(type, fmt, ...) \
+	pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid,       \
+		 ##__VA_ARGS__)
 
-#define ubi_dbg_dump_stack() dump_stack()
-
-struct ubi_ec_hdr;
-struct ubi_vid_hdr;
-struct ubi_volume;
-struct ubi_vtbl_record;
-struct ubi_scan_volume;
-struct ubi_scan_leb;
-struct ubi_mkvol_req;
-
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-
-#else
-
-#define dbg_msg(fmt, ...)    ({})
-#define ubi_dbg_dump_stack() ({})
-#define ubi_dbg_dump_ec_hdr(ec_hdr)      ({})
-#define ubi_dbg_dump_vid_hdr(vid_hdr)    ({})
-#define ubi_dbg_dump_vol_info(vol)       ({})
-#define ubi_dbg_dump_vtbl_record(r, idx) ({})
-#define ubi_dbg_dump_sv(sv)              ({})
-#define ubi_dbg_dump_seb(seb, type)      ({})
-#define ubi_dbg_dump_mkvol_req(req)      ({})
-
-#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
-/* Messages from the eraseblock association unit */
-#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_eba(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
-/* Messages from the wear-leveling unit */
-#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_wl(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
-/* Messages from the input/output unit */
-#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_io(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
+/* General debugging messages */
+#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+/* Messages from the eraseblock association sub-system */
+#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
+/* Messages from the wear-leveling sub-system */
+#define dbg_wl(fmt, ...)  ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
+/* Messages from the input/output sub-system */
+#define dbg_io(fmt, ...)  ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
 /* Initialization and build messages */
-#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_bld(fmt, ...) ({})
-#endif
+#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
 
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+			  int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+	return ubi->dbg.disable_bgt;
+}
+
 /**
  * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
  *
  * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
  */
-static inline int ubi_dbg_is_bitflip(void)
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
 {
-	return !(random32() % 200);
+	if (ubi->dbg.emulate_bitflips)
+		return !(prandom_u32() % 200);
+	return 0;
 }
-#else
-#define ubi_dbg_is_bitflip() 0
-#endif
 
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
 /**
  * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
  *
  * Returns non-zero if a write failure should be emulated, otherwise returns
  * zero.
  */
-static inline int ubi_dbg_is_write_failure(void)
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
 {
-	return !(random32() % 500);
+	if (ubi->dbg.emulate_io_failures)
+		return !(prandom_u32() % 500);
+	return 0;
 }
-#else
-#define ubi_dbg_is_write_failure() 0
-#endif
 
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
 /**
  * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
  *
  * Returns non-zero if an erase failure should be emulated, otherwise returns
  * zero.
  */
-static inline int ubi_dbg_is_erase_failure(void)
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 {
-		return !(random32() % 400);
+	if (ubi->dbg.emulate_io_failures)
+		return !(prandom_u32() % 400);
+	return 0;
 }
-#else
-#define ubi_dbg_is_erase_failure() 0
-#endif
 
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+	return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+	return ubi->dbg.chk_gen;
+}
 #endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 7d27eda..3c2a7e6 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -7,20 +7,20 @@
  */
 
 /*
- * The UBI Eraseblock Association (EBA) unit.
+ * The UBI Eraseblock Association (EBA) sub-system.
  *
- * This unit is responsible for I/O to/from logical eraseblock.
+ * This sub-system is responsible for I/O to/from logical eraseblock.
  *
  * Although in this implementation the EBA table is fully kept and managed in
  * RAM, which assumes poor scalability, it might be (partially) maintained on
  * flash in future implementations.
  *
- * The EBA unit implements per-logical eraseblock locking. Before accessing a
- * logical eraseblock it is locked for reading or writing. The per-logical
- * eraseblock locking is implemented by means of the lock tree. The lock tree
- * is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
- * (@vol_id, @lnum) pairs.
+ * The EBA sub-system implements per-logical eraseblock locking. Before
+ * accessing a logical eraseblock it is locked for reading or writing. The
+ * per-logical eraseblock locking is implemented by means of the lock tree. The
+ * lock tree is an RB-tree which refers all the currently locked logical
+ * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
+ * They are indexed by (@vol_id, @lnum) pairs.
  *
  * EBA also maintains the global sequence counter which is incremented each
  * time a logical eraseblock is mapped to a physical eraseblock and it is
@@ -29,13 +29,15 @@
  * 64 bits is enough to never overflow.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/slab.h>
 #include <linux/crc32.h>
-#include <linux/err.h>
+#else
+#include <ubi_uboot.h>
 #endif
 
-#include <ubi_uboot.h>
+#include <linux/err.h>
 #include "ubi.h"
 
 /* Number of physical eraseblocks reserved for atomic LEB change operation */
@@ -49,7 +51,7 @@
  * global sequence counter value. It also increases the global sequence
  * counter.
  */
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
 {
 	unsigned long long sqnum;
 
@@ -181,9 +183,7 @@
 	le->users += 1;
 	spin_unlock(&ubi->ltree_lock);
 
-	if (le_free)
-		kfree(le_free);
-
+	kfree(le_free);
 	return le;
 }
 
@@ -215,22 +215,18 @@
  */
 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	int _free = 0;
 	struct ubi_ltree_entry *le;
 
 	spin_lock(&ubi->ltree_lock);
 	le = ltree_lookup(ubi, vol_id, lnum);
 	le->users -= 1;
 	ubi_assert(le->users >= 0);
+	up_read(&le->mutex);
 	if (le->users == 0) {
 		rb_erase(&le->rb, &ubi->ltree);
-		_free = 1;
+		kfree(le);
 	}
 	spin_unlock(&ubi->ltree_lock);
-
-	up_read(&le->mutex);
-	if (_free)
-		kfree(le);
 }
 
 /**
@@ -266,7 +262,6 @@
  */
 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	int _free;
 	struct ubi_ltree_entry *le;
 
 	le = ltree_add_entry(ubi, vol_id, lnum);
@@ -281,12 +276,9 @@
 	ubi_assert(le->users >= 0);
 	if (le->users == 0) {
 		rb_erase(&le->rb, &ubi->ltree);
-		_free = 1;
-	} else
-		_free = 0;
-	spin_unlock(&ubi->ltree_lock);
-	if (_free)
 		kfree(le);
+	}
+	spin_unlock(&ubi->ltree_lock);
 
 	return 1;
 }
@@ -299,23 +291,18 @@
  */
 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	int _free;
 	struct ubi_ltree_entry *le;
 
 	spin_lock(&ubi->ltree_lock);
 	le = ltree_lookup(ubi, vol_id, lnum);
 	le->users -= 1;
 	ubi_assert(le->users >= 0);
+	up_write(&le->mutex);
 	if (le->users == 0) {
 		rb_erase(&le->rb, &ubi->ltree);
-		_free = 1;
-	} else
-		_free = 0;
-	spin_unlock(&ubi->ltree_lock);
-
-	up_write(&le->mutex);
-	if (_free)
 		kfree(le);
+	}
+	spin_unlock(&ubi->ltree_lock);
 }
 
 /**
@@ -347,8 +334,10 @@
 
 	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
 
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
-	err = ubi_wl_put_peb(ubi, pnum, 0);
+	up_read(&ubi->fm_sem);
+	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
 
 out_unlock:
 	leb_write_unlock(ubi, vol_id, lnum);
@@ -425,9 +414,10 @@
 				 * may try to recover data. FIXME: but this is
 				 * not implemented.
 				 */
-				if (err == UBI_IO_BAD_VID_HDR) {
-					ubi_warn("bad VID header at PEB %d, LEB"
-						 "%d:%d", pnum, vol_id, lnum);
+				if (err == UBI_IO_BAD_HDR_EBADMSG ||
+				    err == UBI_IO_BAD_HDR) {
+					ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+						 pnum, vol_id, lnum);
 					err = -EBADMSG;
 				} else
 					ubi_ro_mode(ubi);
@@ -508,16 +498,12 @@
 	struct ubi_vid_hdr *vid_hdr;
 
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr) {
+	if (!vid_hdr)
 		return -ENOMEM;
-	}
-
-	mutex_lock(&ubi->buf_mutex);
 
 retry:
-	new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+	new_pnum = ubi_wl_get_peb(ubi);
 	if (new_pnum < 0) {
-		mutex_unlock(&ubi->buf_mutex);
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		return new_pnum;
 	}
@@ -531,39 +517,45 @@
 		goto out_put;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
 	if (err)
 		goto write_error;
 
 	data_size = offset + len;
-	memset(ubi->peb_buf1 + offset, 0xFF, len);
+	mutex_lock(&ubi->buf_mutex);
+	memset(ubi->peb_buf + offset, 0xFF, len);
 
 	/* Read everything before the area where the write failure happened */
 	if (offset > 0) {
-		err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
 		if (err && err != UBI_IO_BITFLIPS)
-			goto out_put;
+			goto out_unlock;
 	}
 
-	memcpy(ubi->peb_buf1 + offset, buf, len);
+	memcpy(ubi->peb_buf + offset, buf, len);
 
-	err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
-	if (err)
+	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+	if (err) {
+		mutex_unlock(&ubi->buf_mutex);
 		goto write_error;
+	}
 
 	mutex_unlock(&ubi->buf_mutex);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = new_pnum;
-	ubi_wl_put_peb(ubi, pnum, 1);
+	up_read(&ubi->fm_sem);
+	ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 
 	ubi_msg("data was successfully recovered");
 	return 0;
 
-out_put:
+out_unlock:
 	mutex_unlock(&ubi->buf_mutex);
-	ubi_wl_put_peb(ubi, new_pnum, 1);
+out_put:
+	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
 
@@ -573,9 +565,8 @@
 	 * get another one.
 	 */
 	ubi_warn("failed to write to PEB %d", new_pnum);
-	ubi_wl_put_peb(ubi, new_pnum, 1);
+	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
 	if (++tries > UBI_IO_RETRIES) {
-		mutex_unlock(&ubi->buf_mutex);
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		return err;
 	}
@@ -591,7 +582,6 @@
  * @buf: the data to write
  * @offset: offset within the logical eraseblock where to write
  * @len: how many bytes to write
- * @dtype: data type
  *
  * This function writes data to logical eraseblock @lnum of a dynamic volume
  * @vol. Returns zero in case of success and a negative error code in case
@@ -599,7 +589,7 @@
  * written to the flash media, but may be some garbage.
  */
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
-		      const void *buf, int offset, int len, int dtype)
+		      const void *buf, int offset, int len)
 {
 	int err, pnum, tries = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
@@ -640,14 +630,14 @@
 	}
 
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
 retry:
-	pnum = ubi_wl_get_peb(ubi, dtype);
+	pnum = ubi_wl_get_peb(ubi);
 	if (pnum < 0) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		leb_write_unlock(ubi, vol_id, lnum);
@@ -667,14 +657,15 @@
 	if (len) {
 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
 		if (err) {
-			ubi_warn("failed to write %d bytes at offset %d of "
-				 "LEB %d:%d, PEB %d", len, offset, vol_id,
-				 lnum, pnum);
+			ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+				 len, offset, vol_id, lnum, pnum);
 			goto write_error;
 		}
 	}
 
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = pnum;
+	up_read(&ubi->fm_sem);
 
 	leb_write_unlock(ubi, vol_id, lnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
@@ -693,7 +684,7 @@
 	 * eraseblock, so just put it and request a new one. We assume that if
 	 * this physical eraseblock went bad, the erase code will handle that.
 	 */
-	err = ubi_wl_put_peb(ubi, pnum, 1);
+	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 	if (err || ++tries > UBI_IO_RETRIES) {
 		ubi_ro_mode(ubi);
 		leb_write_unlock(ubi, vol_id, lnum);
@@ -701,7 +692,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -713,7 +704,6 @@
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: data type
  * @used_ebs: how many logical eraseblocks will this volume contain
  *
  * This function writes data to logical eraseblock @lnum of static volume
@@ -725,13 +715,12 @@
  * to the real data size, although the @buf buffer has to contain the
  * alignment. In all other cases, @len has to be aligned.
  *
- * It is prohibited to write more then once to logical eraseblocks of static
+ * It is prohibited to write more than once to logical eraseblocks of static
  * volumes. This function returns zero in case of success and a negative error
  * code in case of failure.
  */
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
-			 int lnum, const void *buf, int len, int dtype,
-			 int used_ebs)
+			 int lnum, const void *buf, int len, int used_ebs)
 {
 	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
@@ -756,7 +745,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -769,7 +758,7 @@
 	vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
-	pnum = ubi_wl_get_peb(ubi, dtype);
+	pnum = ubi_wl_get_peb(ubi);
 	if (pnum < 0) {
 		ubi_free_vid_hdr(ubi, vid_hdr);
 		leb_write_unlock(ubi, vol_id, lnum);
@@ -794,7 +783,9 @@
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] < 0);
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = pnum;
+	up_read(&ubi->fm_sem);
 
 	leb_write_unlock(ubi, vol_id, lnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
@@ -813,7 +804,7 @@
 		return err;
 	}
 
-	err = ubi_wl_put_peb(ubi, pnum, 1);
+	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 	if (err || ++tries > UBI_IO_RETRIES) {
 		ubi_ro_mode(ubi);
 		leb_write_unlock(ubi, vol_id, lnum);
@@ -821,7 +812,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -833,7 +824,6 @@
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: data type
  *
  * This function changes the contents of a logical eraseblock atomically. @buf
  * has to contain new logical eraseblock data, and @len - the length of the
@@ -845,7 +835,7 @@
  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  */
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
-			      int lnum, const void *buf, int len, int dtype)
+			      int lnum, const void *buf, int len)
 {
 	int err, pnum, tries = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
@@ -862,7 +852,7 @@
 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
 		if (err)
 			return err;
-		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
 	}
 
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -874,7 +864,7 @@
 	if (err)
 		goto out_mutex;
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -887,7 +877,7 @@
 	vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
-	pnum = ubi_wl_get_peb(ubi, dtype);
+	pnum = ubi_wl_get_peb(ubi);
 	if (pnum < 0) {
 		err = pnum;
 		goto out_leb_unlock;
@@ -911,12 +901,14 @@
 	}
 
 	if (vol->eba_tbl[lnum] >= 0) {
-		err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+		err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
 		if (err)
 			goto out_leb_unlock;
 	}
 
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = pnum;
+	up_read(&ubi->fm_sem);
 
 out_leb_unlock:
 	leb_write_unlock(ubi, vol_id, lnum);
@@ -936,18 +928,45 @@
 		goto out_leb_unlock;
 	}
 
-	err = ubi_wl_put_peb(ubi, pnum, 1);
+	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 	if (err || ++tries > UBI_IO_RETRIES) {
 		ubi_ro_mode(ubi);
 		goto out_leb_unlock;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
 
 /**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+	if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
+	    err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
+		return 0;
+	return 1;
+}
+
+/**
  * ubi_eba_copy_leb - copy logical eraseblock.
  * @ubi: UBI device description object
  * @from: physical eraseblock number from where to copy
@@ -957,10 +976,9 @@
  * This function copies logical eraseblock from physical eraseblock @from to
  * physical eraseblock @to. The @vid_hdr buffer may be changed by this
  * function. Returns:
- *   o %0  in case of success;
- *   o %1 if the operation was canceled and should be tried later (e.g.,
- *     because a bit-flip was detected at the target PEB);
- *   o %2 if the volume is being deleted and this LEB should not be moved.
+ *   o %0 in case of success;
+ *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
+ *   o a negative error code in case of failure.
  */
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		     struct ubi_vid_hdr *vid_hdr)
@@ -972,7 +990,7 @@
 	vol_id = be32_to_cpu(vid_hdr->vol_id);
 	lnum = be32_to_cpu(vid_hdr->lnum);
 
-	dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+	dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
 
 	if (vid_hdr->vol_type == UBI_VID_STATIC) {
 		data_size = be32_to_cpu(vid_hdr->data_size);
@@ -986,17 +1004,16 @@
 	/*
 	 * Note, we may race with volume deletion, which means that the volume
 	 * this logical eraseblock belongs to might be being deleted. Since the
-	 * volume deletion unmaps all the volume's logical eraseblocks, it will
+	 * volume deletion un-maps all the volume's logical eraseblocks, it will
 	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
 	 */
 	vol = ubi->volumes[idx];
+	spin_unlock(&ubi->volumes_lock);
 	if (!vol) {
 		/* No need to do further work, cancel */
-		dbg_eba("volume %d is being removed, cancel", vol_id);
-		spin_unlock(&ubi->volumes_lock);
-		return 2;
+		dbg_wl("volume %d is being removed, cancel", vol_id);
+		return MOVE_CANCEL_RACE;
 	}
-	spin_unlock(&ubi->volumes_lock);
 
 	/*
 	 * We do not want anybody to write to this logical eraseblock while we
@@ -1008,12 +1025,15 @@
 	 * (@from). This task locks the LEB and goes sleep in the
 	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
 	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
-	 * LEB is already locked, we just do not move it and return %1.
+	 * LEB is already locked, we just do not move it and return
+	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+	 * we do not know the reasons of the contention - it may be just a
+	 * normal I/O on this LEB, so we want to re-try.
 	 */
 	err = leb_write_trylock(ubi, vol_id, lnum);
 	if (err) {
-		dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
-		return err;
+		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+		return MOVE_RETRY;
 	}
 
 	/*
@@ -1022,30 +1042,30 @@
 	 * cancel it.
 	 */
 	if (vol->eba_tbl[lnum] != from) {
-		dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
-			"PEB %d, cancel", vol_id, lnum, from,
-			vol->eba_tbl[lnum]);
-		err = 1;
+		dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+		       vol_id, lnum, from, vol->eba_tbl[lnum]);
+		err = MOVE_CANCEL_RACE;
 		goto out_unlock_leb;
 	}
 
 	/*
-	 * OK, now the LEB is locked and we can safely start moving iy. Since
-	 * this function utilizes thie @ubi->peb1_buf buffer which is shared
-	 * with some other functions, so lock the buffer by taking the
+	 * OK, now the LEB is locked and we can safely start moving it. Since
+	 * this function utilizes the @ubi->peb_buf buffer which is shared
+	 * with some other functions - we lock the buffer by taking the
 	 * @ubi->buf_mutex.
 	 */
 	mutex_lock(&ubi->buf_mutex);
-	dbg_eba("read %d bytes of data", aldata_size);
-	err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+	dbg_wl("read %d bytes of data", aldata_size);
+	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
 	if (err && err != UBI_IO_BITFLIPS) {
 		ubi_warn("error %d while reading data from PEB %d",
 			 err, from);
+		err = MOVE_SOURCE_RD_ERR;
 		goto out_unlock_buf;
 	}
 
 	/*
-	 * Now we have got to calculate how much data we have to to copy. In
+	 * Now we have got to calculate how much data we have to copy. In
 	 * case of a static volume it is fairly easy - the VID header contains
 	 * the data size. In case of a dynamic volume it is more difficult - we
 	 * have to read the contents, cut 0xFF bytes from the end and copy only
@@ -1056,14 +1076,14 @@
 	 */
 	if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
 		aldata_size = data_size =
-			ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+			ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
 
 	cond_resched();
-	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
+	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
 	cond_resched();
 
 	/*
-	 * It may turn out to me that the whole @from physical eraseblock
+	 * It may turn out to be that the whole @from physical eraseblock
 	 * contains only 0xFF bytes. Then we have to only write the VID header
 	 * and do not write any data. This also means we should not set
 	 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
@@ -1073,28 +1093,37 @@
 		vid_hdr->data_size = cpu_to_be32(data_size);
 		vid_hdr->data_crc = cpu_to_be32(crc);
 	}
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 
 	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
-	if (err)
+	if (err) {
+		if (err == -EIO)
+			err = MOVE_TARGET_WR_ERR;
 		goto out_unlock_buf;
+	}
 
 	cond_resched();
 
 	/* Read the VID header back and check if it was written correctly */
 	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
 	if (err) {
-		if (err != UBI_IO_BITFLIPS)
-			ubi_warn("cannot read VID header back from PEB %d", to);
-		else
-			err = 1;
+		if (err != UBI_IO_BITFLIPS) {
+			ubi_warn("error %d while reading VID header back from PEB %d",
+				 err, to);
+			if (is_error_sane(err))
+				err = MOVE_TARGET_RD_ERR;
+		} else
+			err = MOVE_TARGET_BITFLIPS;
 		goto out_unlock_buf;
 	}
 
 	if (data_size > 0) {
-		err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
-		if (err)
+		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+		if (err) {
+			if (err == -EIO)
+				err = MOVE_TARGET_WR_ERR;
 			goto out_unlock_buf;
+		}
 
 		cond_resched();
 
@@ -1102,28 +1131,33 @@
 		 * We've written the data and are going to read it back to make
 		 * sure it was written correctly.
 		 */
-
-		err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+		memset(ubi->peb_buf, 0xFF, aldata_size);
+		err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
 		if (err) {
-			if (err != UBI_IO_BITFLIPS)
-				ubi_warn("cannot read data back from PEB %d",
-					 to);
-			else
-				err = 1;
+			if (err != UBI_IO_BITFLIPS) {
+				ubi_warn("error %d while reading data back from PEB %d",
+					 err, to);
+				if (is_error_sane(err))
+					err = MOVE_TARGET_RD_ERR;
+			} else
+				err = MOVE_TARGET_BITFLIPS;
 			goto out_unlock_buf;
 		}
 
 		cond_resched();
 
-		if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
-			ubi_warn("read data back from PEB %d - it is different",
+		if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+			ubi_warn("read data back from PEB %d and it is different",
 				 to);
+			err = -EINVAL;
 			goto out_unlock_buf;
 		}
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] == from);
+	down_read(&ubi->fm_sem);
 	vol->eba_tbl[lnum] = to;
+	up_read(&ubi->fm_sem);
 
 out_unlock_buf:
 	mutex_unlock(&ubi->buf_mutex);
@@ -1133,28 +1167,165 @@
 }
 
 /**
- * ubi_eba_init_scan - initialize the EBA unit using scanning information.
+ * print_rsvd_warning - warn about not having enough reserved PEBs.
  * @ubi: UBI device description object
- * @si: scanning information
+ *
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
+ * cannot reserve enough PEBs for bad block handling. This function makes a
+ * decision whether we have to print a warning or not. The algorithm is as
+ * follows:
+ *   o if this is a new UBI image, then just print the warning
+ *   o if this is an UBI image which has already been used for some time, print
+ *     a warning only if we can reserve less than 10% of the expected amount of
+ *     the reserved PEB.
+ *
+ * The idea is that when UBI is used, PEBs become bad, and the reserved pool
+ * of PEBs becomes smaller, which is normal and we do not want to scare users
+ * with a warning every time they attach the MTD device. This was an issue
+ * reported by real users.
+ */
+static void print_rsvd_warning(struct ubi_device *ubi,
+			       struct ubi_attach_info *ai)
+{
+	/*
+	 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
+	 * large number to distinguish between newly flashed and used images.
+	 */
+	if (ai->max_sqnum > (1 << 18)) {
+		int min = ubi->beb_rsvd_level / 10;
+
+		if (!min)
+			min = 1;
+		if (ubi->beb_rsvd_pebs > min)
+			return;
+	}
+
+	ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+	if (ubi->corr_peb_count)
+		ubi_warn("%d PEBs are corrupted and not used",
+			 ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+		   struct ubi_attach_info *ai_scan)
+{
+	int i, j, num_volumes, ret = 0;
+	int **scan_eba, **fm_eba;
+	struct ubi_ainf_volume *av;
+	struct ubi_volume *vol;
+	struct ubi_ainf_peb *aeb;
+	struct rb_node *rb;
+
+	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+	scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+	if (!scan_eba)
+		return -ENOMEM;
+
+	fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+	if (!fm_eba) {
+		kfree(scan_eba);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_volumes; i++) {
+		vol = ubi->volumes[i];
+		if (!vol)
+			continue;
+
+		scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+				      GFP_KERNEL);
+		if (!scan_eba[i]) {
+			ret = -ENOMEM;
+			goto out_free;
+		}
+
+		fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+				    GFP_KERNEL);
+		if (!fm_eba[i]) {
+			ret = -ENOMEM;
+			goto out_free;
+		}
+
+		for (j = 0; j < vol->reserved_pebs; j++)
+			scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+		av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+		if (!av)
+			continue;
+
+		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+			scan_eba[i][aeb->lnum] = aeb->pnum;
+
+		av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+		if (!av)
+			continue;
+
+		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+			fm_eba[i][aeb->lnum] = aeb->pnum;
+
+		for (j = 0; j < vol->reserved_pebs; j++) {
+			if (scan_eba[i][j] != fm_eba[i][j]) {
+				if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+					fm_eba[i][j] == UBI_LEB_UNMAPPED)
+					continue;
+
+				ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+					vol->vol_id, i, fm_eba[i][j],
+					scan_eba[i][j]);
+				ubi_assert(0);
+			}
+		}
+	}
+
+out_free:
+	for (i = 0; i < num_volumes; i++) {
+		if (!ubi->volumes[i])
+			continue;
+
+		kfree(scan_eba[i]);
+		kfree(fm_eba[i]);
+	}
+
+	kfree(scan_eba);
+	kfree(fm_eba);
+	return ret;
+}
+
+/**
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
 	int i, j, err, num_volumes;
-	struct ubi_scan_volume *sv;
+	struct ubi_ainf_volume *av;
 	struct ubi_volume *vol;
-	struct ubi_scan_leb *seb;
+	struct ubi_ainf_peb *aeb;
 	struct rb_node *rb;
 
-	dbg_eba("initialize EBA unit");
+	dbg_eba("initialize EBA sub-system");
 
 	spin_lock_init(&ubi->ltree_lock);
 	mutex_init(&ubi->alc_mutex);
 	ubi->ltree = RB_ROOT;
 
-	ubi->global_sqnum = si->max_sqnum + 1;
+	ubi->global_sqnum = ai->max_sqnum + 1;
 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
 
 	for (i = 0; i < num_volumes; i++) {
@@ -1174,24 +1345,27 @@
 		for (j = 0; j < vol->reserved_pebs; j++)
 			vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
 
-		sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
-		if (!sv)
+		av = ubi_find_av(ai, idx2vol_id(ubi, i));
+		if (!av)
 			continue;
 
-		ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-			if (seb->lnum >= vol->reserved_pebs)
+		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+			if (aeb->lnum >= vol->reserved_pebs)
 				/*
 				 * This may happen in case of an unclean reboot
 				 * during re-size.
 				 */
-				ubi_scan_move_to_list(sv, seb, &si->erase);
-			vol->eba_tbl[seb->lnum] = seb->pnum;
+				ubi_move_aeb_to_list(av, aeb, &ai->erase);
+			vol->eba_tbl[aeb->lnum] = aeb->pnum;
 		}
 	}
 
 	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
 		ubi_err("no enough physical eraseblocks (%d, need %d)",
 			ubi->avail_pebs, EBA_RESERVED_PEBS);
+		if (ubi->corr_peb_count)
+			ubi_err("%d PEBs are corrupted and not used",
+				ubi->corr_peb_count);
 		err = -ENOSPC;
 		goto out_free;
 	}
@@ -1204,9 +1378,7 @@
 		if (ubi->avail_pebs < ubi->beb_rsvd_level) {
 			/* No enough free physical eraseblocks */
 			ubi->beb_rsvd_pebs = ubi->avail_pebs;
-			ubi_warn("cannot reserve enough PEBs for bad PEB "
-				 "handling, reserved %d, need %d",
-				 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+			print_rsvd_warning(ubi, ai);
 		} else
 			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
 
@@ -1214,7 +1386,7 @@
 		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
 	}
 
-	dbg_eba("EBA unit is initialized");
+	dbg_eba("EBA sub-system is initialized");
 	return 0;
 
 out_free:
@@ -1222,23 +1394,7 @@
 		if (!ubi->volumes[i])
 			continue;
 		kfree(ubi->volumes[i]->eba_tbl);
+		ubi->volumes[i]->eba_tbl = NULL;
 	}
 	return err;
 }
-
-/**
- * ubi_eba_close - close EBA unit.
- * @ubi: UBI device description object
- */
-void ubi_eba_close(const struct ubi_device *ubi)
-{
-	int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
-
-	dbg_eba("close EBA unit");
-
-	for (i = 0; i < num_volumes; i++) {
-		if (!ubi->volumes[i])
-			continue;
-		kfree(ubi->volumes[i]->eba_tbl);
-	}
-}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 0000000..787522f
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1584 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
+ */
+
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/crc32.h>
+#else
+#include <div64.h>
+#include <malloc.h>
+#include <ubi_uboot.h>
+#endif
+
+#include <linux/compat.h>
+#include <linux/math64.h>
+#include "ubi.h"
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+	size_t size;
+
+	size = sizeof(struct ubi_fm_hdr) + \
+		sizeof(struct ubi_fm_scan_pool) + \
+		sizeof(struct ubi_fm_scan_pool) + \
+		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+		(sizeof(struct ubi_fm_eba) + \
+		(ubi->peb_count * sizeof(__be32))) + \
+		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+	return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+	struct ubi_vid_hdr *new;
+
+	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!new)
+		goto out;
+
+	new->vol_type = UBI_VID_DYNAMIC;
+	new->vol_id = cpu_to_be32(vol_id);
+
+	/* UBI implementations without fastmap support have to delete the
+	 * fastmap.
+	 */
+	new->compat = UBI_COMPAT_DELETE;
+
+out:
+	return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+		   int pnum, int ec, int scrub)
+{
+	struct ubi_ainf_peb *aeb;
+
+	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return -ENOMEM;
+
+	aeb->pnum = pnum;
+	aeb->ec = ec;
+	aeb->lnum = -1;
+	aeb->scrub = scrub;
+	aeb->copy_flag = aeb->sqnum = 0;
+
+	ai->ec_sum += aeb->ec;
+	ai->ec_count++;
+
+	if (ai->max_ec < aeb->ec)
+		ai->max_ec = aeb->ec;
+
+	if (ai->min_ec > aeb->ec)
+		ai->min_ec = aeb->ec;
+
+	list_add_tail(&aeb->u.list, list);
+
+	return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+				       int used_ebs, int data_pad, u8 vol_type,
+				       int last_eb_bytes)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	while (*p) {
+		parent = *p;
+		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (vol_id > av->vol_id)
+			p = &(*p)->rb_left;
+		else if (vol_id > av->vol_id)
+			p = &(*p)->rb_right;
+	}
+
+	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+	if (!av)
+		goto out;
+
+	av->highest_lnum = av->leb_count = 0;
+	av->vol_id = vol_id;
+	av->used_ebs = used_ebs;
+	av->data_pad = data_pad;
+	av->last_data_size = last_eb_bytes;
+	av->compat = 0;
+	av->vol_type = vol_type;
+	av->root = RB_ROOT;
+
+	dbg_bld("found volume (ID %i)", vol_id);
+
+	rb_link_node(&av->rb, parent, p);
+	rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+	return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+			     struct ubi_ainf_peb *aeb,
+			     struct ubi_ainf_volume *av)
+{
+	struct ubi_ainf_peb *tmp_aeb;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	p = &av->root.rb_node;
+	while (*p) {
+		parent = *p;
+
+		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+		if (aeb->lnum != tmp_aeb->lnum) {
+			if (aeb->lnum < tmp_aeb->lnum)
+				p = &(*p)->rb_left;
+			else
+				p = &(*p)->rb_right;
+
+			continue;
+		} else
+			break;
+	}
+
+	list_del(&aeb->u.list);
+	av->leb_count++;
+
+	rb_link_node(&aeb->u.rb, parent, p);
+	rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+		      struct ubi_ainf_peb *new_aeb)
+{
+	struct rb_node **p = &av->root.rb_node, *parent = NULL;
+	struct ubi_ainf_peb *aeb, *victim;
+	int cmp_res;
+
+	while (*p) {
+		parent = *p;
+		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+				p = &(*p)->rb_left;
+			else
+				p = &(*p)->rb_right;
+
+			continue;
+		}
+
+		/* This case can happen if the fastmap gets written
+		 * because of a volume change (creation, deletion, ..).
+		 * Then a PEB can be within the persistent EBA and the pool.
+		 */
+		if (aeb->pnum == new_aeb->pnum) {
+			ubi_assert(aeb->lnum == new_aeb->lnum);
+			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+			return 0;
+		}
+
+		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+		if (cmp_res < 0)
+			return cmp_res;
+
+		/* new_aeb is newer */
+		if (cmp_res & 1) {
+			victim = kmem_cache_alloc(ai->aeb_slab_cache,
+				GFP_KERNEL);
+			if (!victim)
+				return -ENOMEM;
+
+			victim->ec = aeb->ec;
+			victim->pnum = aeb->pnum;
+			list_add_tail(&victim->u.list, &ai->erase);
+
+			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+				av->last_data_size = \
+					be32_to_cpu(new_vh->data_size);
+
+			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+				av->vol_id, aeb->lnum, new_aeb->pnum);
+
+			aeb->ec = new_aeb->ec;
+			aeb->pnum = new_aeb->pnum;
+			aeb->copy_flag = new_vh->copy_flag;
+			aeb->scrub = new_aeb->scrub;
+			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+		/* new_aeb is older */
+		} else {
+			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+				av->vol_id, aeb->lnum, new_aeb->pnum);
+			list_add_tail(&new_aeb->u.list, &ai->erase);
+		}
+
+		return 0;
+	}
+	/* This LEB is new, let's add it to the volume */
+
+	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+		av->highest_lnum = be32_to_cpu(new_vh->lnum);
+		av->last_data_size = be32_to_cpu(new_vh->data_size);
+	}
+
+	if (av->vol_type == UBI_STATIC_VOLUME)
+		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+	av->leb_count++;
+
+	rb_link_node(&new_aeb->u.rb, parent, p);
+	rb_insert_color(&new_aeb->u.rb, &av->root);
+
+	return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+			    struct ubi_vid_hdr *new_vh,
+			    struct ubi_ainf_peb *new_aeb)
+{
+	struct ubi_ainf_volume *av, *tmp_av = NULL;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+	int found = 0;
+
+	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+		return 0;
+	}
+
+	/* Find the volume this SEB belongs to */
+	while (*p) {
+		parent = *p;
+		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+			p = &(*p)->rb_left;
+		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+			p = &(*p)->rb_right;
+		else {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found)
+		av = tmp_av;
+	else {
+		ubi_err("orphaned volume in fastmap pool!");
+		return UBI_BAD_FASTMAP;
+	}
+
+	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+	return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node *node, *node2;
+	struct ubi_ainf_peb *aeb;
+
+	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+		av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+		for (node2 = rb_first(&av->root); node2;
+		     node2 = rb_next(node2)) {
+			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+			if (aeb->pnum == pnum) {
+				rb_erase(&aeb->u.rb, &av->root);
+				kmem_cache_free(ai->aeb_slab_cache, aeb);
+				return;
+			}
+		}
+	}
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+#ifndef __UBOOT__
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		     int *pebs, int pool_size, unsigned long long *max_sqnum,
+		     struct list_head *eba_orphans, struct list_head *freef)
+#else
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
+		     struct list_head *eba_orphans, struct list_head *freef)
+#endif
+{
+	struct ubi_vid_hdr *vh;
+	struct ubi_ec_hdr *ech;
+	struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+	int i, pnum, err, found_orphan, ret = 0;
+
+	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ech)
+		return -ENOMEM;
+
+	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vh) {
+		kfree(ech);
+		return -ENOMEM;
+	}
+
+	dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+	/*
+	 * Now scan all PEBs in the pool to find changes which have been made
+	 * after the creation of the fastmap
+	 */
+	for (i = 0; i < pool_size; i++) {
+		int scrub = 0;
+		int image_seq;
+
+		pnum = be32_to_cpu(pebs[i]);
+
+		if (ubi_io_is_bad(ubi, pnum)) {
+			ubi_err("bad PEB in fastmap pool!");
+			ret = UBI_BAD_FASTMAP;
+			goto out;
+		}
+
+		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+		if (err && err != UBI_IO_BITFLIPS) {
+			ubi_err("unable to read EC header! PEB:%i err:%i",
+				pnum, err);
+			ret = err > 0 ? UBI_BAD_FASTMAP : err;
+			goto out;
+		} else if (ret == UBI_IO_BITFLIPS)
+			scrub = 1;
+
+		/*
+		 * Older UBI implementations have image_seq set to zero, so
+		 * we shouldn't fail if image_seq == 0.
+		 */
+		image_seq = be32_to_cpu(ech->image_seq);
+
+		if (image_seq && (image_seq != ubi->image_seq)) {
+			ubi_err("bad image seq: 0x%x, expected: 0x%x",
+				be32_to_cpu(ech->image_seq), ubi->image_seq);
+			ret = UBI_BAD_FASTMAP;
+			goto out;
+		}
+
+		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+			unsigned long long ec = be64_to_cpu(ech->ec);
+			unmap_peb(ai, pnum);
+			dbg_bld("Adding PEB to free: %i", pnum);
+			if (err == UBI_IO_FF_BITFLIPS)
+				add_aeb(ai, freef, pnum, ec, 1);
+			else
+				add_aeb(ai, freef, pnum, ec, 0);
+			continue;
+		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
+			dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+			if (err == UBI_IO_BITFLIPS)
+				scrub = 1;
+
+			found_orphan = 0;
+			list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+				if (tmp_aeb->pnum == pnum) {
+					found_orphan = 1;
+					break;
+				}
+			}
+			if (found_orphan) {
+				list_del(&tmp_aeb->u.list);
+				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+			}
+
+			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+						   GFP_KERNEL);
+			if (!new_aeb) {
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			new_aeb->ec = be64_to_cpu(ech->ec);
+			new_aeb->pnum = pnum;
+			new_aeb->lnum = be32_to_cpu(vh->lnum);
+			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+			new_aeb->copy_flag = vh->copy_flag;
+			new_aeb->scrub = scrub;
+
+			if (*max_sqnum < new_aeb->sqnum)
+				*max_sqnum = new_aeb->sqnum;
+
+			err = process_pool_aeb(ubi, ai, vh, new_aeb);
+			if (err) {
+				ret = err > 0 ? UBI_BAD_FASTMAP : err;
+				goto out;
+			}
+		} else {
+			/* We are paranoid and fall back to scanning mode */
+			ubi_err("fastmap pool PEBs contains damaged PEBs!");
+			ret = err > 0 ? UBI_BAD_FASTMAP : err;
+			goto out;
+		}
+
+	}
+
+out:
+	ubi_free_vid_hdr(ubi, vh);
+	kfree(ech);
+	return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+	struct ubi_ainf_peb *aeb;
+	struct ubi_ainf_volume *av;
+	struct rb_node *rb1, *rb2;
+	int n = 0;
+
+	list_for_each_entry(aeb, &ai->erase, u.list)
+		n++;
+
+	list_for_each_entry(aeb, &ai->free, u.list)
+		n++;
+
+	 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+			n++;
+
+	return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+			      struct ubi_attach_info *ai,
+			      struct ubi_fastmap_layout *fm)
+{
+	struct list_head used, eba_orphans, freef;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+	struct ubi_ec_hdr *ech;
+	struct ubi_fm_sb *fmsb;
+	struct ubi_fm_hdr *fmhdr;
+	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+	struct ubi_fm_ec *fmec;
+	struct ubi_fm_volhdr *fmvhdr;
+	struct ubi_fm_eba *fm_eba;
+	int ret, i, j, pool_size, wl_pool_size;
+	size_t fm_pos = 0, fm_size = ubi->fm_size;
+	unsigned long long max_sqnum = 0;
+	void *fm_raw = ubi->fm_buf;
+
+	INIT_LIST_HEAD(&used);
+	INIT_LIST_HEAD(&freef);
+	INIT_LIST_HEAD(&eba_orphans);
+	INIT_LIST_HEAD(&ai->corr);
+	INIT_LIST_HEAD(&ai->free);
+	INIT_LIST_HEAD(&ai->erase);
+	INIT_LIST_HEAD(&ai->alien);
+	ai->volumes = RB_ROOT;
+	ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+					       sizeof(struct ubi_ainf_peb),
+					       0, 0, NULL);
+	if (!ai->aeb_slab_cache) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fmsb = (struct ubi_fm_sb *)(fm_raw);
+	ai->max_sqnum = fmsb->sqnum;
+	fm_pos += sizeof(struct ubi_fm_sb);
+	if (fm_pos >= fm_size)
+		goto fail_bad;
+
+	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmhdr);
+	if (fm_pos >= fm_size)
+		goto fail_bad;
+
+	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+		goto fail_bad;
+	}
+
+	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl1);
+	if (fm_pos >= fm_size)
+		goto fail_bad;
+	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+		goto fail_bad;
+	}
+
+	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl2);
+	if (fm_pos >= fm_size)
+		goto fail_bad;
+	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+		goto fail_bad;
+	}
+
+	pool_size = be16_to_cpu(fmpl1->size);
+	wl_pool_size = be16_to_cpu(fmpl2->size);
+	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+		ubi_err("bad pool size: %i", pool_size);
+		goto fail_bad;
+	}
+
+	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+		ubi_err("bad WL pool size: %i", wl_pool_size);
+		goto fail_bad;
+	}
+
+
+	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+	    fm->max_pool_size < 0) {
+		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+		goto fail_bad;
+	}
+
+	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+	    fm->max_wl_pool_size < 0) {
+		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+		goto fail_bad;
+	}
+
+	/* read EC values from free list */
+	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec), 0);
+	}
+
+	/* read EC values from used list */
+	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec), 0);
+	}
+
+	/* read EC values from scrub list */
+	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec), 1);
+	}
+
+	/* read EC values from erase list */
+	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec), 1);
+	}
+
+	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+	/* Iterate over all volumes and read their EBA table */
+	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmvhdr);
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+			ubi_err("bad fastmap vol header magic: 0x%x, " \
+				"expected: 0x%x",
+				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+			goto fail_bad;
+		}
+
+		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+			     be32_to_cpu(fmvhdr->used_ebs),
+			     be32_to_cpu(fmvhdr->data_pad),
+			     fmvhdr->vol_type,
+			     be32_to_cpu(fmvhdr->last_eb_bytes));
+
+		if (!av)
+			goto fail_bad;
+
+		ai->vols_found++;
+		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fm_eba);
+		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+		if (fm_pos >= fm_size)
+			goto fail_bad;
+
+		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+			ubi_err("bad fastmap EBA header magic: 0x%x, " \
+				"expected: 0x%x",
+				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+			goto fail_bad;
+		}
+
+		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+			int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+				continue;
+
+			aeb = NULL;
+			list_for_each_entry(tmp_aeb, &used, u.list) {
+				if (tmp_aeb->pnum == pnum) {
+					aeb = tmp_aeb;
+					break;
+				}
+			}
+
+			/* This can happen if a PEB is already in an EBA known
+			 * by this fastmap but the PEB itself is not in the used
+			 * list.
+			 * In this case the PEB can be within the fastmap pool
+			 * or while writing the fastmap it was in the protection
+			 * queue.
+			 */
+			if (!aeb) {
+				aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+						       GFP_KERNEL);
+				if (!aeb) {
+					ret = -ENOMEM;
+
+					goto fail;
+				}
+
+				aeb->lnum = j;
+				aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+				aeb->ec = -1;
+				aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+				list_add_tail(&aeb->u.list, &eba_orphans);
+				continue;
+			}
+
+			aeb->lnum = j;
+
+			if (av->highest_lnum <= aeb->lnum)
+				av->highest_lnum = aeb->lnum;
+
+			assign_aeb_to_av(ai, aeb, av);
+
+			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+				aeb->pnum, aeb->lnum, av->vol_id);
+		}
+
+		ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+		if (!ech) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+					 u.list) {
+			int err;
+
+			if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+				ubi_err("bad PEB in fastmap EBA orphan list");
+				ret = UBI_BAD_FASTMAP;
+				kfree(ech);
+				goto fail;
+			}
+
+			err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+			if (err && err != UBI_IO_BITFLIPS) {
+				ubi_err("unable to read EC header! PEB:%i " \
+					"err:%i", tmp_aeb->pnum, err);
+				ret = err > 0 ? UBI_BAD_FASTMAP : err;
+				kfree(ech);
+
+				goto fail;
+			} else if (err == UBI_IO_BITFLIPS)
+				tmp_aeb->scrub = 1;
+
+			tmp_aeb->ec = be64_to_cpu(ech->ec);
+			assign_aeb_to_av(ai, tmp_aeb, av);
+		}
+
+		kfree(ech);
+	}
+
+	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+			&eba_orphans, &freef);
+	if (ret)
+		goto fail;
+
+	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+			&eba_orphans, &freef);
+	if (ret)
+		goto fail;
+
+	if (max_sqnum > ai->max_sqnum)
+		ai->max_sqnum = max_sqnum;
+
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list)
+		list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+	ubi_assert(list_empty(&used));
+	ubi_assert(list_empty(&eba_orphans));
+	ubi_assert(list_empty(&freef));
+
+	/*
+	 * If fastmap is leaking PEBs (must not happen), raise a
+	 * fat warning and fall back to scanning mode.
+	 * We do this here because in ubi_wl_init() it's too late
+	 * and we cannot fall back to scanning.
+	 */
+#ifndef __UBOOT__
+	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+		    ai->bad_peb_count - fm->used_blocks))
+		goto fail_bad;
+#else
+	if (count_fastmap_pebs(ai) != ubi->peb_count -
+		    ai->bad_peb_count - fm->used_blocks) {
+		WARN_ON(1);
+		goto fail_bad;
+	}
+#endif
+
+	return 0;
+
+fail_bad:
+	ret = UBI_BAD_FASTMAP;
+fail:
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+		list_del(&tmp_aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+	}
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+		list_del(&tmp_aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+	}
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list) {
+		list_del(&tmp_aeb->u.list);
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+	}
+
+	return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		     int fm_anchor)
+{
+	struct ubi_fm_sb *fmsb, *fmsb2;
+	struct ubi_vid_hdr *vh;
+	struct ubi_ec_hdr *ech;
+	struct ubi_fastmap_layout *fm;
+	int i, used_blocks, pnum, ret = 0;
+	size_t fm_size;
+	__be32 crc, tmp_crc;
+	unsigned long long sqnum = 0;
+
+	mutex_lock(&ubi->fm_mutex);
+	memset(ubi->fm_buf, 0, ubi->fm_size);
+
+	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+	if (!fmsb) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+	if (!fm) {
+		ret = -ENOMEM;
+		kfree(fmsb);
+		goto out;
+	}
+
+	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+	if (ret && ret != UBI_IO_BITFLIPS)
+		goto free_fm_sb;
+	else if (ret == UBI_IO_BITFLIPS)
+		fm->to_be_tortured[0] = 1;
+
+	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+		ret = UBI_BAD_FASTMAP;
+		goto free_fm_sb;
+	}
+
+	if (fmsb->version != UBI_FM_FMT_VERSION) {
+		ubi_err("bad fastmap version: %i, expected: %i",
+			fmsb->version, UBI_FM_FMT_VERSION);
+		ret = UBI_BAD_FASTMAP;
+		goto free_fm_sb;
+	}
+
+	used_blocks = be32_to_cpu(fmsb->used_blocks);
+	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+		ret = UBI_BAD_FASTMAP;
+		goto free_fm_sb;
+	}
+
+	fm_size = ubi->leb_size * used_blocks;
+	if (fm_size != ubi->fm_size) {
+		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+			ubi->fm_size);
+		ret = UBI_BAD_FASTMAP;
+		goto free_fm_sb;
+	}
+
+	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ech) {
+		ret = -ENOMEM;
+		goto free_fm_sb;
+	}
+
+	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vh) {
+		ret = -ENOMEM;
+		goto free_hdr;
+	}
+
+	for (i = 0; i < used_blocks; i++) {
+		int image_seq;
+
+		pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+		if (ubi_io_is_bad(ubi, pnum)) {
+			ret = UBI_BAD_FASTMAP;
+			goto free_hdr;
+		}
+
+		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+		if (ret && ret != UBI_IO_BITFLIPS) {
+			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+				i, pnum);
+			if (ret > 0)
+				ret = UBI_BAD_FASTMAP;
+			goto free_hdr;
+		} else if (ret == UBI_IO_BITFLIPS)
+			fm->to_be_tortured[i] = 1;
+
+		image_seq = be32_to_cpu(ech->image_seq);
+		if (!ubi->image_seq)
+			ubi->image_seq = image_seq;
+
+		/*
+		 * Older UBI implementations have image_seq set to zero, so
+		 * we shouldn't fail if image_seq == 0.
+		 */
+		if (image_seq && (image_seq != ubi->image_seq)) {
+			ubi_err("wrong image seq:%d instead of %d",
+				be32_to_cpu(ech->image_seq), ubi->image_seq);
+			ret = UBI_BAD_FASTMAP;
+			goto free_hdr;
+		}
+
+		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		if (ret && ret != UBI_IO_BITFLIPS) {
+			ubi_err("unable to read fastmap block# %i (PEB: %i)",
+				i, pnum);
+			goto free_hdr;
+		}
+
+		if (i == 0) {
+			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+				ubi_err("bad fastmap anchor vol_id: 0x%x," \
+					" expected: 0x%x",
+					be32_to_cpu(vh->vol_id),
+					UBI_FM_SB_VOLUME_ID);
+				ret = UBI_BAD_FASTMAP;
+				goto free_hdr;
+			}
+		} else {
+			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+				ubi_err("bad fastmap data vol_id: 0x%x," \
+					" expected: 0x%x",
+					be32_to_cpu(vh->vol_id),
+					UBI_FM_DATA_VOLUME_ID);
+				ret = UBI_BAD_FASTMAP;
+				goto free_hdr;
+			}
+		}
+
+		if (sqnum < be64_to_cpu(vh->sqnum))
+			sqnum = be64_to_cpu(vh->sqnum);
+
+		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+				  ubi->leb_start, ubi->leb_size);
+		if (ret && ret != UBI_IO_BITFLIPS) {
+			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+				"err: %i)", i, pnum, ret);
+			goto free_hdr;
+		}
+	}
+
+	kfree(fmsb);
+	fmsb = NULL;
+
+	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+	tmp_crc = be32_to_cpu(fmsb2->data_crc);
+	fmsb2->data_crc = 0;
+	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+	if (crc != tmp_crc) {
+		ubi_err("fastmap data CRC is invalid");
+		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+		ret = UBI_BAD_FASTMAP;
+		goto free_hdr;
+	}
+
+	fmsb2->sqnum = sqnum;
+
+	fm->used_blocks = used_blocks;
+
+	ret = ubi_attach_fastmap(ubi, ai, fm);
+	if (ret) {
+		if (ret > 0)
+			ret = UBI_BAD_FASTMAP;
+		goto free_hdr;
+	}
+
+	for (i = 0; i < used_blocks; i++) {
+		struct ubi_wl_entry *e;
+
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+		if (!e) {
+			while (i--)
+				kfree(fm->e[i]);
+
+			ret = -ENOMEM;
+			goto free_hdr;
+		}
+
+		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+		fm->e[i] = e;
+	}
+
+	ubi->fm = fm;
+	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+	ubi_msg("attached by fastmap");
+	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+	ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+	ubi->fm_disabled = 0;
+
+	ubi_free_vid_hdr(ubi, vh);
+	kfree(ech);
+out:
+	mutex_unlock(&ubi->fm_mutex);
+	if (ret == UBI_BAD_FASTMAP)
+		ubi_err("Attach by fastmap failed, doing a full scan!");
+	return ret;
+
+free_hdr:
+	ubi_free_vid_hdr(ubi, vh);
+	kfree(ech);
+free_fm_sb:
+	kfree(fmsb);
+	kfree(fm);
+	goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+			     struct ubi_fastmap_layout *new_fm)
+{
+	size_t fm_pos = 0;
+	void *fm_raw;
+	struct ubi_fm_sb *fmsb;
+	struct ubi_fm_hdr *fmh;
+	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+	struct ubi_fm_ec *fec;
+	struct ubi_fm_volhdr *fvh;
+	struct ubi_fm_eba *feba;
+	struct rb_node *node;
+	struct ubi_wl_entry *wl_e;
+	struct ubi_volume *vol;
+	struct ubi_vid_hdr *avhdr, *dvhdr;
+	struct ubi_work *ubi_wrk;
+	int ret, i, j, free_peb_count, used_peb_count, vol_count;
+	int scrub_peb_count, erase_peb_count;
+
+	fm_raw = ubi->fm_buf;
+	memset(ubi->fm_buf, 0, ubi->fm_size);
+
+	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+	if (!avhdr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+	if (!dvhdr) {
+		ret = -ENOMEM;
+		goto out_kfree;
+	}
+
+	spin_lock(&ubi->volumes_lock);
+	spin_lock(&ubi->wl_lock);
+
+	fmsb = (struct ubi_fm_sb *)fm_raw;
+	fm_pos += sizeof(*fmsb);
+	ubi_assert(fm_pos <= ubi->fm_size);
+
+	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmh);
+	ubi_assert(fm_pos <= ubi->fm_size);
+
+	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+	fmsb->version = UBI_FM_FMT_VERSION;
+	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+	/* the max sqnum will be filled in while *reading* the fastmap */
+	fmsb->sqnum = 0;
+
+	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+	free_peb_count = 0;
+	used_peb_count = 0;
+	scrub_peb_count = 0;
+	erase_peb_count = 0;
+	vol_count = 0;
+
+	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl1);
+	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+	for (i = 0; i < ubi->fm_pool.size; i++)
+		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl2);
+	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+	for (i = 0; i < ubi->fm_wl_pool.size; i++)
+		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		free_peb_count++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= ubi->fm_size);
+	}
+	fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		used_peb_count++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= ubi->fm_size);
+	}
+	fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		scrub_peb_count++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= ubi->fm_size);
+	}
+	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+	list_for_each_entry(ubi_wrk, &ubi->works, list) {
+		if (ubi_is_erase_work(ubi_wrk)) {
+			wl_e = ubi_wrk->e;
+			ubi_assert(wl_e);
+
+			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+			fec->pnum = cpu_to_be32(wl_e->pnum);
+			fec->ec = cpu_to_be32(wl_e->ec);
+
+			erase_peb_count++;
+			fm_pos += sizeof(*fec);
+			ubi_assert(fm_pos <= ubi->fm_size);
+		}
+	}
+	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+		vol = ubi->volumes[i];
+
+		if (!vol)
+			continue;
+
+		vol_count++;
+
+		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fvh);
+		ubi_assert(fm_pos <= ubi->fm_size);
+
+		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+		fvh->vol_id = cpu_to_be32(vol->vol_id);
+		fvh->vol_type = vol->vol_type;
+		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+		fvh->data_pad = cpu_to_be32(vol->data_pad);
+		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+			vol->vol_type == UBI_STATIC_VOLUME);
+
+		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+		ubi_assert(fm_pos <= ubi->fm_size);
+
+		for (j = 0; j < vol->reserved_pebs; j++)
+			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+		feba->reserved_pebs = cpu_to_be32(j);
+		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+	}
+	fmh->vol_count = cpu_to_be32(vol_count);
+	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+	avhdr->lnum = 0;
+
+	spin_unlock(&ubi->wl_lock);
+	spin_unlock(&ubi->volumes_lock);
+
+	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+	if (ret) {
+		ubi_err("unable to write vid_hdr to fastmap SB!");
+		goto out_kfree;
+	}
+
+	for (i = 0; i < new_fm->used_blocks; i++) {
+		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+	}
+
+	fmsb->data_crc = 0;
+	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+					   ubi->fm_size));
+
+	for (i = 1; i < new_fm->used_blocks; i++) {
+		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+		dvhdr->lnum = cpu_to_be32(i);
+		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+		if (ret) {
+			ubi_err("unable to write vid_hdr to PEB %i!",
+				new_fm->e[i]->pnum);
+			goto out_kfree;
+		}
+	}
+
+	for (i = 0; i < new_fm->used_blocks; i++) {
+		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+		if (ret) {
+			ubi_err("unable to write fastmap to PEB %i!",
+				new_fm->e[i]->pnum);
+			goto out_kfree;
+		}
+	}
+
+	ubi_assert(new_fm);
+	ubi->fm = new_fm;
+
+	dbg_bld("fastmap written!");
+
+out_kfree:
+	ubi_free_vid_hdr(ubi, avhdr);
+	ubi_free_vid_hdr(ubi, dvhdr);
+out:
+	return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+	int ret;
+	struct ubi_ec_hdr *ec_hdr;
+	long long ec;
+
+	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ec_hdr)
+		return -ENOMEM;
+
+	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+	if (ret < 0)
+		goto out;
+	else if (ret && ret != UBI_IO_BITFLIPS) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = ubi_io_sync_erase(ubi, pnum, 0);
+	if (ret < 0)
+		goto out;
+
+	ec = be64_to_cpu(ec_hdr->ec);
+	ec += ret;
+	if (ec > UBI_MAX_ERASECOUNTER) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ec_hdr->ec = cpu_to_be64(ec);
+	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+	if (ret < 0)
+		goto out;
+
+	ret = ec;
+out:
+	kfree(ec_hdr);
+	return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+			      struct ubi_fastmap_layout *fm)
+{
+	int ret;
+	struct ubi_vid_hdr *vh;
+
+	ret = erase_block(ubi, fm->e[0]->pnum);
+	if (ret < 0)
+		return ret;
+
+	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+	if (!vh)
+		return -ENOMEM;
+
+	/* deleting the current fastmap SB is not enough, an old SB may exist,
+	 * so create a (corrupted) SB such that fastmap will find it and fall
+	 * back to scanning mode in any case */
+	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+	return ret;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+	int ret, i;
+	struct ubi_fastmap_layout *new_fm, *old_fm;
+	struct ubi_wl_entry *tmp_e;
+
+	mutex_lock(&ubi->fm_mutex);
+
+	ubi_refill_pools(ubi);
+
+	if (ubi->ro_mode || ubi->fm_disabled) {
+		mutex_unlock(&ubi->fm_mutex);
+		return 0;
+	}
+
+	ret = ubi_ensure_anchor_pebs(ubi);
+	if (ret) {
+		mutex_unlock(&ubi->fm_mutex);
+		return ret;
+	}
+
+	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+	if (!new_fm) {
+		mutex_unlock(&ubi->fm_mutex);
+		return -ENOMEM;
+	}
+
+	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+
+	for (i = 0; i < new_fm->used_blocks; i++) {
+		new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+		if (!new_fm->e[i]) {
+			while (i--)
+				kfree(new_fm->e[i]);
+
+			kfree(new_fm);
+			mutex_unlock(&ubi->fm_mutex);
+			return -ENOMEM;
+		}
+	}
+
+	old_fm = ubi->fm;
+	ubi->fm = NULL;
+
+	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+		ubi_err("fastmap too large");
+		ret = -ENOSPC;
+		goto err;
+	}
+
+	for (i = 1; i < new_fm->used_blocks; i++) {
+		spin_lock(&ubi->wl_lock);
+		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+		spin_unlock(&ubi->wl_lock);
+
+		if (!tmp_e && !old_fm) {
+			int j;
+			ubi_err("could not get any free erase block");
+
+			for (j = 1; j < i; j++)
+				ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+			ret = -ENOSPC;
+			goto err;
+		} else if (!tmp_e && old_fm) {
+			ret = erase_block(ubi, old_fm->e[i]->pnum);
+			if (ret < 0) {
+				int j;
+
+				for (j = 1; j < i; j++)
+					ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+							  j, 0);
+
+				ubi_err("could not erase old fastmap PEB");
+				goto err;
+			}
+
+			new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+			new_fm->e[i]->ec = old_fm->e[i]->ec;
+		} else {
+			new_fm->e[i]->pnum = tmp_e->pnum;
+			new_fm->e[i]->ec = tmp_e->ec;
+
+			if (old_fm)
+				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+						  old_fm->to_be_tortured[i]);
+		}
+	}
+
+	spin_lock(&ubi->wl_lock);
+	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+	spin_unlock(&ubi->wl_lock);
+
+	if (old_fm) {
+		/* no fresh anchor PEB was found, reuse the old one */
+		if (!tmp_e) {
+			ret = erase_block(ubi, old_fm->e[0]->pnum);
+			if (ret < 0) {
+				int i;
+				ubi_err("could not erase old anchor PEB");
+
+				for (i = 1; i < new_fm->used_blocks; i++)
+					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+							  i, 0);
+				goto err;
+			}
+
+			new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+			new_fm->e[0]->ec = ret;
+		} else {
+			/* we've got a new anchor PEB, return the old one */
+			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+					  old_fm->to_be_tortured[0]);
+
+			new_fm->e[0]->pnum = tmp_e->pnum;
+			new_fm->e[0]->ec = tmp_e->ec;
+		}
+	} else {
+		if (!tmp_e) {
+			int i;
+			ubi_err("could not find any anchor PEB");
+
+			for (i = 1; i < new_fm->used_blocks; i++)
+				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+			ret = -ENOSPC;
+			goto err;
+		}
+
+		new_fm->e[0]->pnum = tmp_e->pnum;
+		new_fm->e[0]->ec = tmp_e->ec;
+	}
+
+	down_write(&ubi->work_sem);
+	down_write(&ubi->fm_sem);
+	ret = ubi_write_fastmap(ubi, new_fm);
+	up_write(&ubi->fm_sem);
+	up_write(&ubi->work_sem);
+
+	if (ret)
+		goto err;
+
+out_unlock:
+	mutex_unlock(&ubi->fm_mutex);
+	kfree(old_fm);
+	return ret;
+
+err:
+	kfree(new_fm);
+
+	ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+	ret = 0;
+	if (old_fm) {
+		ret = invalidate_fastmap(ubi, old_fm);
+		if (ret < 0)
+			ubi_err("Unable to invalidiate current fastmap!");
+		else if (ret)
+			ret = 0;
+	}
+	goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 960befc..41d7eb7 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1,22 +1,21 @@
 /*
  * Copyright (c) International Business Machines Corp., 2006
  * Copyright (c) Nokia Corporation, 2006, 2007
- *
  * SPDX-License-Identifier:	GPL-2.0+
  *
  * Author: Artem Bityutskiy (Битюцкий Артём)
  */
 
 /*
- * UBI input/output unit.
+ * UBI input/output sub-system.
  *
- * This unit provides a uniform way to work with all kinds of the underlying
- * MTD devices. It also implements handy functions for reading and writing UBI
- * headers.
+ * This sub-system provides a uniform way to work with all kinds of the
+ * underlying MTD devices. It also implements handy functions for reading and
+ * writing UBI headers.
  *
  * We are trying to have a paranoid mindset and not to trust to what we read
- * from the flash media in order to be more secure and robust. So this unit
- * validates every single header it reads from the flash media.
+ * from the flash media in order to be more secure and robust. So this
+ * sub-system validates every single header it reads from the flash media.
  *
  * Some words about how the eraseblock headers are stored.
  *
@@ -52,9 +51,9 @@
  * device, e.g., make @ubi->min_io_size = 512 in the example above?
  *
  * A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
  *
  * As it was noted above, the VID header may start at a non-aligned offset.
  * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
@@ -67,39 +66,33 @@
  * 512-byte chunks, we have to allocate one more buffer and copy our VID header
  * to offset 448 of this buffer.
  *
- * The I/O unit does the following trick in order to avoid this extra copy.
- * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header
- * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the
- * VID header is being written out, it shifts the VID header pointer back and
- * writes the whole sub-page.
+ * The I/O sub-system does the following trick in order to avoid this extra
+ * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
+ * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
+ * When the VID header is being written out, it shifts the VID header pointer
+ * back and writes the whole sub-page.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/crc32.h>
 #include <linux/err.h>
+#include <linux/slab.h>
+#else
+#include <ubi_uboot.h>
 #endif
 
-#include <ubi_uboot.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
-				 const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
-				  const struct ubi_vid_hdr *vid_hdr);
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
-				 int len);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum)  0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr)  0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+			     const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+			      const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+			    int offset, int len);
 
 /**
  * ubi_io_read - read data from a physical eraseblock.
@@ -136,51 +129,77 @@
 	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
 	ubi_assert(len > 0);
 
-	err = paranoid_check_not_bad(ubi, pnum);
+	err = self_check_not_bad(ubi, pnum);
 	if (err)
-		return err > 0 ? -EINVAL : err;
+		return err;
+
+	/*
+	 * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+	 * do not do this, the following may happen:
+	 * 1. The buffer contains data from previous operation, e.g., read from
+	 *    another PEB previously. The data looks like expected, e.g., if we
+	 *    just do not read anything and return - the caller would not
+	 *    notice this. E.g., if we are reading a VID header, the buffer may
+	 *    contain a valid VID header from another PEB.
+	 * 2. The driver is buggy and returns us success or -EBADMSG or
+	 *    -EUCLEAN, but it does not actually put any data to the buffer.
+	 *
+	 * This may confuse UBI or upper layers - they may think the buffer
+	 * contains valid data while in fact it is just old data. This is
+	 * especially possible because UBI (and UBIFS) relies on CRC, and
+	 * treats data as correct even in case of ECC errors if the CRC is
+	 * correct.
+	 *
+	 * Try to prevent this situation by changing the first byte of the
+	 * buffer.
+	 */
+	*((uint8_t *)buf) ^= 0xFF;
 
 	addr = (loff_t)pnum * ubi->peb_size + offset;
 retry:
 	err = mtd_read(ubi->mtd, addr, len, &read, buf);
 	if (err) {
-		if (err == -EUCLEAN) {
+		const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+		if (mtd_is_bitflip(err)) {
 			/*
 			 * -EUCLEAN is reported if there was a bit-flip which
 			 * was corrected, so this is harmless.
+			 *
+			 * We do not report about it here unless debugging is
+			 * enabled. A corresponding message will be printed
+			 * later, when it is has been scrubbed.
 			 */
 			ubi_msg("fixable bit-flip detected at PEB %d", pnum);
 			ubi_assert(len == read);
 			return UBI_IO_BITFLIPS;
 		}
 
-		if (read != len && retries++ < UBI_IO_RETRIES) {
-			dbg_io("error %d while reading %d bytes from PEB %d:%d, "
-			       "read only %zd bytes, retry",
-			       err, len, pnum, offset, read);
+		if (retries++ < UBI_IO_RETRIES) {
+			ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+				 err, errstr, len, pnum, offset, read);
 			yield();
 			goto retry;
 		}
 
-		ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-			"read %zd bytes", err, len, pnum, offset, read);
-		ubi_dbg_dump_stack();
+		ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+			err, errstr, len, pnum, offset, read);
+		dump_stack();
 
 		/*
 		 * The driver should never return -EBADMSG if it failed to read
 		 * all the requested data. But some buggy drivers might do
 		 * this, so we change it to -EIO.
 		 */
-		if (read != len && err == -EBADMSG) {
+		if (read != len && mtd_is_eccerr(err)) {
 			ubi_assert(0);
-			printk("%s[%d] not here\n", __func__, __LINE__);
-/*			err = -EIO; */
+			err = -EIO;
 		}
 	} else {
 		ubi_assert(len == read);
 
-		if (ubi_dbg_is_bitflip()) {
-			dbg_msg("bit-flip (emulated)");
+		if (ubi_dbg_is_bitflip(ubi)) {
+			dbg_gen("bit-flip (emulated)");
 			err = UBI_IO_BITFLIPS;
 		}
 	}
@@ -224,46 +243,60 @@
 		return -EROFS;
 	}
 
-	/* The below has to be compiled out if paranoid checks are disabled */
-
-	err = paranoid_check_not_bad(ubi, pnum);
+	err = self_check_not_bad(ubi, pnum);
 	if (err)
-		return err > 0 ? -EINVAL : err;
+		return err;
 
 	/* The area we are writing to has to contain all 0xFF bytes */
-	err = paranoid_check_all_ff(ubi, pnum, offset, len);
+	err = ubi_self_check_all_ff(ubi, pnum, offset, len);
 	if (err)
-		return err > 0 ? -EINVAL : err;
+		return err;
 
 	if (offset >= ubi->leb_start) {
 		/*
 		 * We write to the data area of the physical eraseblock. Make
 		 * sure it has valid EC and VID headers.
 		 */
-		err = paranoid_check_peb_ec_hdr(ubi, pnum);
+		err = self_check_peb_ec_hdr(ubi, pnum);
 		if (err)
-			return err > 0 ? -EINVAL : err;
-		err = paranoid_check_peb_vid_hdr(ubi, pnum);
+			return err;
+		err = self_check_peb_vid_hdr(ubi, pnum);
 		if (err)
-			return err > 0 ? -EINVAL : err;
+			return err;
 	}
 
-	if (ubi_dbg_is_write_failure()) {
-		dbg_err("cannot write %d bytes to PEB %d:%d "
-			"(emulated)", len, pnum, offset);
-		ubi_dbg_dump_stack();
+	if (ubi_dbg_is_write_failure(ubi)) {
+		ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+			len, pnum, offset);
+		dump_stack();
 		return -EIO;
 	}
 
 	addr = (loff_t)pnum * ubi->peb_size + offset;
 	err = mtd_write(ubi->mtd, addr, len, &written, buf);
 	if (err) {
-		ubi_err("error %d while writing %d bytes to PEB %d:%d, written"
-			" %zd bytes", err, len, pnum, offset, written);
-		ubi_dbg_dump_stack();
+		ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+			err, len, pnum, offset, written);
+		dump_stack();
+		ubi_dump_flash(ubi, pnum, offset, len);
 	} else
 		ubi_assert(written == len);
 
+	if (!err) {
+		err = self_check_write(ubi, buf, pnum, offset, len);
+		if (err)
+			return err;
+
+		/*
+		 * Since we always write sequentially, the rest of the PEB has
+		 * to contain only 0xFF bytes.
+		 */
+		offset += len;
+		len = ubi->peb_size - offset;
+		if (len)
+			err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+	}
+
 	return err;
 }
 
@@ -295,6 +328,12 @@
 	wait_queue_head_t wq;
 
 	dbg_io("erase PEB %d", pnum);
+	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+	if (ubi->ro_mode) {
+		ubi_err("read-only mode");
+		return -EROFS;
+	}
 
 retry:
 	init_waitqueue_head(&wq);
@@ -309,13 +348,13 @@
 	err = mtd_erase(ubi->mtd, &ei);
 	if (err) {
 		if (retries++ < UBI_IO_RETRIES) {
-			dbg_io("error %d while erasing PEB %d, retry",
-			       err, pnum);
+			ubi_warn("error %d while erasing PEB %d, retry",
+				 err, pnum);
 			yield();
 			goto retry;
 		}
 		ubi_err("cannot erase PEB %d, error %d", pnum, err);
-		ubi_dbg_dump_stack();
+		dump_stack();
 		return err;
 	}
 
@@ -328,46 +367,27 @@
 
 	if (ei.state == MTD_ERASE_FAILED) {
 		if (retries++ < UBI_IO_RETRIES) {
-			dbg_io("error while erasing PEB %d, retry", pnum);
+			ubi_warn("error while erasing PEB %d, retry", pnum);
 			yield();
 			goto retry;
 		}
 		ubi_err("cannot erase PEB %d", pnum);
-		ubi_dbg_dump_stack();
+		dump_stack();
 		return -EIO;
 	}
 
-	err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+	err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
 	if (err)
-		return err > 0 ? -EINVAL : err;
+		return err;
 
-	if (ubi_dbg_is_erase_failure() && !err) {
-		dbg_err("cannot erase PEB %d (emulated)", pnum);
+	if (ubi_dbg_is_erase_failure(ubi)) {
+		ubi_err("cannot erase PEB %d (emulated)", pnum);
 		return -EIO;
 	}
 
 	return 0;
 }
 
-/**
- * check_pattern - check if buffer contains only a certain byte pattern.
- * @buf: buffer to check
- * @patt: the pattern to check
- * @size: buffer size in bytes
- *
- * This function returns %1 in there are only @patt bytes in @buf, and %0 if
- * something else was also found.
- */
-static int check_pattern(const void *buf, uint8_t patt, int size)
-{
-	int i;
-
-	for (i = 0; i < size; i++)
-		if (((const uint8_t *)buf)[i] != patt)
-			return 0;
-	return 1;
-}
-
 /* Patterns to write to a physical eraseblock when torturing it */
 static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
 
@@ -384,6 +404,7 @@
 {
 	int err, i, patt_count;
 
+	ubi_msg("run torture test for PEB %d", pnum);
 	patt_count = ARRAY_SIZE(patterns);
 	ubi_assert(patt_count > 0);
 
@@ -394,11 +415,11 @@
 			goto out;
 
 		/* Make sure the PEB contains only 0xFF bytes */
-		err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
 		if (err)
 			goto out;
 
-		err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+		err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
 		if (err == 0) {
 			ubi_err("erased PEB %d, but a non-0xFF byte found",
 				pnum);
@@ -407,17 +428,18 @@
 		}
 
 		/* Write a pattern and check it */
-		memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
-		err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+		memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+		err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
 		if (err)
 			goto out;
 
-		memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
-		err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+		memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
 		if (err)
 			goto out;
 
-		err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size);
+		err = ubi_check_pattern(ubi->peb_buf, patterns[i],
+					ubi->peb_size);
 		if (err == 0) {
 			ubi_err("pattern %x checking failed for PEB %d",
 				patterns[i], pnum);
@@ -427,10 +449,11 @@
 	}
 
 	err = patt_count;
+	ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
 
 out:
 	mutex_unlock(&ubi->buf_mutex);
-	if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+	if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
 		/*
 		 * If a bit-flip or data integrity error was detected, the test
 		 * has not passed because it happened on a freshly erased
@@ -444,6 +467,80 @@
 }
 
 /**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+	int err;
+	size_t written;
+	loff_t addr;
+	uint32_t data = 0;
+	struct ubi_ec_hdr ec_hdr;
+
+	/*
+	 * Note, we cannot generally define VID header buffers on stack,
+	 * because of the way we deal with these buffers (see the header
+	 * comment in this file). But we know this is a NOR-specific piece of
+	 * code, so we can do this. But yes, this is error-prone and we should
+	 * (pre-)allocate VID header buffer instead.
+	 */
+	struct ubi_vid_hdr vid_hdr;
+
+	/*
+	 * If VID or EC is valid, we have to corrupt them before erasing.
+	 * It is important to first invalidate the EC header, and then the VID
+	 * header. Otherwise a power cut may lead to valid EC header and
+	 * invalid VID header, in which case UBI will treat this PEB as
+	 * corrupted and will try to preserve it, and print scary warnings.
+	 */
+	addr = (loff_t)pnum * ubi->peb_size;
+	err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+	    err != UBI_IO_FF){
+		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+		if(err)
+			goto error;
+	}
+
+	err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+	    err != UBI_IO_FF){
+		addr += ubi->vid_hdr_aloffset;
+		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+		if (err)
+			goto error;
+	}
+	return 0;
+
+error:
+	/*
+	 * The PEB contains a valid VID or EC header, but we cannot invalidate
+	 * it. Supposedly the flash media or the driver is screwed up, so
+	 * return an error.
+	 */
+	ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
+	ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
+	return -EIO;
+}
+
+/**
  * ubi_io_sync_erase - synchronously erase a physical eraseblock.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number to erase
@@ -452,7 +549,7 @@
  * This function synchronously erases physical eraseblock @pnum. If @torture
  * flag is not zero, the physical eraseblock is checked by means of writing
  * different patterns to it and reading them back. If the torturing is enabled,
- * the physical eraseblock is erased more then once.
+ * the physical eraseblock is erased more than once.
  *
  * This function returns the number of erasures made in case of success, %-EIO
  * if the erasure failed or the torturing test failed, and other negative error
@@ -465,15 +562,21 @@
 
 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
 
-	err = paranoid_check_not_bad(ubi, pnum);
+	err = self_check_not_bad(ubi, pnum);
 	if (err != 0)
-		return err > 0 ? -EINVAL : err;
+		return err;
 
 	if (ubi->ro_mode) {
 		ubi_err("read-only mode");
 		return -EROFS;
 	}
 
+	if (ubi->nor_flash) {
+		err = nor_erase_prepare(ubi, pnum);
+		if (err)
+			return err;
+	}
+
 	if (torture) {
 		ret = torture_peb(ubi, pnum);
 		if (ret < 0)
@@ -564,8 +667,7 @@
 	leb_start = be32_to_cpu(ec_hdr->data_offset);
 
 	if (ec_hdr->version != UBI_VERSION) {
-		ubi_err("node with incompatible UBI version found: "
-			"this UBI version is %d, image version is %d",
+		ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
 			UBI_VERSION, (int)ec_hdr->version);
 		goto bad;
 	}
@@ -591,8 +693,8 @@
 
 bad:
 	ubi_err("bad EC header");
-	ubi_dbg_dump_ec_hdr(ec_hdr);
-	ubi_dbg_dump_stack();
+	ubi_dump_ec_hdr(ec_hdr);
+	dump_stack();
 	return 1;
 }
 
@@ -612,67 +714,58 @@
  * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
  *   and corrected by the flash driver; this is harmless but may indicate that
  *   this eraseblock may become bad soon (but may be not);
- * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error);
- * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty;
+ * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
+ *   a data integrity error (uncorrectable ECC error in case of NAND);
+ * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
  * o a negative error code in case of failure.
  */
 int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
 		       struct ubi_ec_hdr *ec_hdr, int verbose)
 {
-	int err, read_err = 0;
+	int err, read_err;
 	uint32_t crc, magic, hdr_crc;
 
 	dbg_io("read EC header from PEB %d", pnum);
 	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
-	if (UBI_IO_DEBUG)
-		verbose = 1;
 
-	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
-	if (err) {
-		if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
-			return err;
+	read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+	if (read_err) {
+		if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+			return read_err;
 
 		/*
 		 * We read all the data, but either a correctable bit-flip
-		 * occurred, or MTD reported about some data integrity error,
-		 * like an ECC error in case of NAND. The former is harmless,
-		 * the later may mean that the read data is corrupted. But we
-		 * have a CRC check-sum and we will detect this. If the EC
-		 * header is still OK, we just report this as there was a
-		 * bit-flip.
+		 * occurred, or MTD reported a data integrity error
+		 * (uncorrectable ECC error in case of NAND). The former is
+		 * harmless, the later may mean that the read data is
+		 * corrupted. But we have a CRC check-sum and we will detect
+		 * this. If the EC header is still OK, we just report this as
+		 * there was a bit-flip, to force scrubbing.
 		 */
-		read_err = err;
 	}
 
 	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
+		if (mtd_is_eccerr(read_err))
+			return UBI_IO_BAD_HDR_EBADMSG;
+
 		/*
 		 * The magic field is wrong. Let's check if we have read all
 		 * 0xFF. If yes, this physical eraseblock is assumed to be
 		 * empty.
-		 *
-		 * But if there was a read error, we do not test it for all
-		 * 0xFFs. Even if it does contain all 0xFFs, this error
-		 * indicates that something is still wrong with this physical
-		 * eraseblock and we anyway cannot treat it as empty.
 		 */
-		if (read_err != -EBADMSG &&
-		    check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+		if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
 			/* The physical eraseblock is supposedly empty */
-
-			/*
-			 * The below is just a paranoid check, it has to be
-			 * compiled out if paranoid checks are disabled.
-			 */
-			err = paranoid_check_all_ff(ubi, pnum, 0,
-						    ubi->peb_size);
-			if (err)
-				return err > 0 ? UBI_IO_BAD_EC_HDR : err;
-
 			if (verbose)
-				ubi_warn("no EC header found at PEB %d, "
-					 "only 0xFF bytes", pnum);
-			return UBI_IO_PEB_EMPTY;
+				ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+					 pnum);
+			dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+				pnum);
+			if (!read_err)
+				return UBI_IO_FF;
+			else
+				return UBI_IO_FF_BITFLIPS;
 		}
 
 		/*
@@ -680,11 +773,13 @@
 		 * 0xFF bytes. Report that the header is corrupted.
 		 */
 		if (verbose) {
-			ubi_warn("bad magic number at PEB %d: %08x instead of "
-				 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
-			ubi_dbg_dump_ec_hdr(ec_hdr);
+			ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+				 pnum, magic, UBI_EC_HDR_MAGIC);
+			ubi_dump_ec_hdr(ec_hdr);
 		}
-		return UBI_IO_BAD_EC_HDR;
+		dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+			pnum, magic, UBI_EC_HDR_MAGIC);
+		return UBI_IO_BAD_HDR;
 	}
 
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -692,11 +787,17 @@
 
 	if (hdr_crc != crc) {
 		if (verbose) {
-			ubi_warn("bad EC header CRC at PEB %d, calculated %#08x,"
-				 " read %#08x", pnum, crc, hdr_crc);
-			ubi_dbg_dump_ec_hdr(ec_hdr);
+			ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+				 pnum, crc, hdr_crc);
+			ubi_dump_ec_hdr(ec_hdr);
 		}
-		return UBI_IO_BAD_EC_HDR;
+		dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+			pnum, crc, hdr_crc);
+
+		if (!read_err)
+			return UBI_IO_BAD_HDR;
+		else
+			return UBI_IO_BAD_HDR_EBADMSG;
 	}
 
 	/* And of course validate what has just been read from the media */
@@ -706,6 +807,10 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * If there was %-EBADMSG, but the header CRC is still OK, report about
+	 * a bit-flip to force scrubbing on this PEB.
+	 */
 	return read_err ? UBI_IO_BITFLIPS : 0;
 }
 
@@ -737,12 +842,13 @@
 	ec_hdr->version = UBI_VERSION;
 	ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
 	ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+	ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
 	ec_hdr->hdr_crc = cpu_to_be32(crc);
 
-	err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+	err = self_check_ec_hdr(ubi, pnum, ec_hdr);
 	if (err)
-		return -EINVAL;
+		return err;
 
 	err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
 	return err;
@@ -771,40 +877,40 @@
 	int usable_leb_size = ubi->leb_size - data_pad;
 
 	if (copy_flag != 0 && copy_flag != 1) {
-		dbg_err("bad copy_flag");
+		ubi_err("bad copy_flag");
 		goto bad;
 	}
 
 	if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
 	    data_pad < 0) {
-		dbg_err("negative values");
+		ubi_err("negative values");
 		goto bad;
 	}
 
 	if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
-		dbg_err("bad vol_id");
+		ubi_err("bad vol_id");
 		goto bad;
 	}
 
 	if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
-		dbg_err("bad compat");
+		ubi_err("bad compat");
 		goto bad;
 	}
 
 	if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
 	    compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
 	    compat != UBI_COMPAT_REJECT) {
-		dbg_err("bad compat");
+		ubi_err("bad compat");
 		goto bad;
 	}
 
 	if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
-		dbg_err("bad vol_type");
+		ubi_err("bad vol_type");
 		goto bad;
 	}
 
 	if (data_pad >= ubi->leb_size / 2) {
-		dbg_err("bad data_pad");
+		ubi_err("bad data_pad");
 		goto bad;
 	}
 
@@ -816,45 +922,45 @@
 		 * mapped logical eraseblocks.
 		 */
 		if (used_ebs == 0) {
-			dbg_err("zero used_ebs");
+			ubi_err("zero used_ebs");
 			goto bad;
 		}
 		if (data_size == 0) {
-			dbg_err("zero data_size");
+			ubi_err("zero data_size");
 			goto bad;
 		}
 		if (lnum < used_ebs - 1) {
 			if (data_size != usable_leb_size) {
-				dbg_err("bad data_size");
+				ubi_err("bad data_size");
 				goto bad;
 			}
 		} else if (lnum == used_ebs - 1) {
 			if (data_size == 0) {
-				dbg_err("bad data_size at last LEB");
+				ubi_err("bad data_size at last LEB");
 				goto bad;
 			}
 		} else {
-			dbg_err("too high lnum");
+			ubi_err("too high lnum");
 			goto bad;
 		}
 	} else {
 		if (copy_flag == 0) {
 			if (data_crc != 0) {
-				dbg_err("non-zero data CRC");
+				ubi_err("non-zero data CRC");
 				goto bad;
 			}
 			if (data_size != 0) {
-				dbg_err("non-zero data_size");
+				ubi_err("non-zero data_size");
 				goto bad;
 			}
 		} else {
 			if (data_size == 0) {
-				dbg_err("zero data_size of copy");
+				ubi_err("zero data_size of copy");
 				goto bad;
 			}
 		}
 		if (used_ebs != 0) {
-			dbg_err("bad used_ebs");
+			ubi_err("bad used_ebs");
 			goto bad;
 		}
 	}
@@ -863,8 +969,8 @@
 
 bad:
 	ubi_err("bad VID header");
-	ubi_dbg_dump_vid_hdr(vid_hdr);
-	ubi_dbg_dump_stack();
+	ubi_dump_vid_hdr(vid_hdr);
+	dump_stack();
 	return 1;
 }
 
@@ -878,88 +984,53 @@
  *
  * This function reads the volume identifier header from physical eraseblock
  * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
- * volume identifier header. The following codes may be returned:
+ * volume identifier header. The error codes are the same as in
+ * 'ubi_io_read_ec_hdr()'.
  *
- * o %0 if the CRC checksum is correct and the header was successfully read;
- * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
- *   and corrected by the flash driver; this is harmless but may indicate that
- *   this eraseblock may become bad soon;
- * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
- *   error detected);
- * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
- *   header there);
- * o a negative error code in case of failure.
+ * Note, the implementation of this function is also very similar to
+ * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
  */
 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 			struct ubi_vid_hdr *vid_hdr, int verbose)
 {
-	int err, read_err = 0;
+	int err, read_err;
 	uint32_t crc, magic, hdr_crc;
 	void *p;
 
 	dbg_io("read VID header from PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
-	if (UBI_IO_DEBUG)
-		verbose = 1;
 
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
-	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+	read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_alsize);
-	if (err) {
-		if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
-			return err;
-
-		/*
-		 * We read all the data, but either a correctable bit-flip
-		 * occurred, or MTD reported about some data integrity error,
-		 * like an ECC error in case of NAND. The former is harmless,
-		 * the later may mean the read data is corrupted. But we have a
-		 * CRC check-sum and we will identify this. If the VID header is
-		 * still OK, we just report this as there was a bit-flip.
-		 */
-		read_err = err;
-	}
+	if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+		return read_err;
 
 	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
-		/*
-		 * If we have read all 0xFF bytes, the VID header probably does
-		 * not exist and the physical eraseblock is assumed to be free.
-		 *
-		 * But if there was a read error, we do not test the data for
-		 * 0xFFs. Even if it does contain all 0xFFs, this error
-		 * indicates that something is still wrong with this physical
-		 * eraseblock and it cannot be regarded as free.
-		 */
-		if (read_err != -EBADMSG &&
-		    check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
-			/* The physical eraseblock is supposedly free */
+		if (mtd_is_eccerr(read_err))
+			return UBI_IO_BAD_HDR_EBADMSG;
 
-			/*
-			 * The below is just a paranoid check, it has to be
-			 * compiled out if paranoid checks are disabled.
-			 */
-			err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
-						    ubi->leb_size);
-			if (err)
-				return err > 0 ? UBI_IO_BAD_VID_HDR : err;
-
+		if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
 			if (verbose)
-				ubi_warn("no VID header found at PEB %d, "
-					 "only 0xFF bytes", pnum);
-			return UBI_IO_PEB_FREE;
+				ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+					 pnum);
+			dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+				pnum);
+			if (!read_err)
+				return UBI_IO_FF;
+			else
+				return UBI_IO_FF_BITFLIPS;
 		}
 
-		/*
-		 * This is not a valid VID header, and these are not 0xFF
-		 * bytes. Report that the header is corrupted.
-		 */
 		if (verbose) {
-			ubi_warn("bad magic number at PEB %d: %08x instead of "
-				 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
-			ubi_dbg_dump_vid_hdr(vid_hdr);
+			ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+				 pnum, magic, UBI_VID_HDR_MAGIC);
+			ubi_dump_vid_hdr(vid_hdr);
 		}
-		return UBI_IO_BAD_VID_HDR;
+		dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+			pnum, magic, UBI_VID_HDR_MAGIC);
+		return UBI_IO_BAD_HDR;
 	}
 
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
@@ -967,14 +1038,18 @@
 
 	if (hdr_crc != crc) {
 		if (verbose) {
-			ubi_warn("bad CRC at PEB %d, calculated %#08x, "
-				 "read %#08x", pnum, crc, hdr_crc);
-			ubi_dbg_dump_vid_hdr(vid_hdr);
+			ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+				 pnum, crc, hdr_crc);
+			ubi_dump_vid_hdr(vid_hdr);
 		}
-		return UBI_IO_BAD_VID_HDR;
+		dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+			pnum, crc, hdr_crc);
+		if (!read_err)
+			return UBI_IO_BAD_HDR;
+		else
+			return UBI_IO_BAD_HDR_EBADMSG;
 	}
 
-	/* Validate the VID header that we have just read */
 	err = validate_vid_hdr(ubi, vid_hdr);
 	if (err) {
 		ubi_err("validation failed for PEB %d", pnum);
@@ -1009,18 +1084,18 @@
 	dbg_io("write VID header to PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
 
-	err = paranoid_check_peb_ec_hdr(ubi, pnum);
+	err = self_check_peb_ec_hdr(ubi, pnum);
 	if (err)
-		return err > 0 ? -EINVAL: err;
+		return err;
 
 	vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
 	vid_hdr->version = UBI_VERSION;
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
 	vid_hdr->hdr_crc = cpu_to_be32(crc);
 
-	err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+	err = self_check_vid_hdr(ubi, pnum, vid_hdr);
 	if (err)
-		return -EINVAL;
+		return err;
 
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -1028,44 +1103,48 @@
 	return err;
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
 /**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number to check
  *
- * This function returns zero if the physical eraseblock is good, a positive
- * number if it is bad and a negative error code if an error occurred.
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
  */
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
 {
 	int err;
 
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
 	err = ubi_io_is_bad(ubi, pnum);
 	if (!err)
 		return err;
 
-	ubi_err("paranoid check failed for PEB %d", pnum);
-	ubi_dbg_dump_stack();
-	return err;
+	ubi_err("self-check failed for PEB %d", pnum);
+	dump_stack();
+	return err > 0 ? -EINVAL : err;
 }
 
 /**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number the erase counter header belongs to
  * @ec_hdr: the erase counter header to check
  *
  * This function returns zero if the erase counter header contains valid
- * values, and %1 if not.
+ * values, and %-EINVAL if not.
  */
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
-				 const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+			     const struct ubi_ec_hdr *ec_hdr)
 {
 	int err;
 	uint32_t magic;
 
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
 	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
 		ubi_err("bad magic %#08x, must be %#08x",
@@ -1075,53 +1154,55 @@
 
 	err = validate_ec_hdr(ubi, ec_hdr);
 	if (err) {
-		ubi_err("paranoid check failed for PEB %d", pnum);
+		ubi_err("self-check failed for PEB %d", pnum);
 		goto fail;
 	}
 
 	return 0;
 
 fail:
-	ubi_dbg_dump_ec_hdr(ec_hdr);
-	ubi_dbg_dump_stack();
-	return 1;
+	ubi_dump_ec_hdr(ec_hdr);
+	dump_stack();
+	return -EINVAL;
 }
 
 /**
- * paranoid_check_peb_ec_hdr - check that the erase counter header of a
- * physical eraseblock is in-place and is all right.
+ * self_check_peb_ec_hdr - check erase counter header.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  *
- * This function returns zero if the erase counter header is all right, %1 if
- * not, and a negative error code if an error occurred.
+ * This function returns zero if the erase counter header is all right and and
+ * a negative error code if not or if an error occurred.
  */
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
 {
 	int err;
 	uint32_t crc, hdr_crc;
 	struct ubi_ec_hdr *ec_hdr;
 
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
 	if (!ec_hdr)
 		return -ENOMEM;
 
 	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
-	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
 	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
 	if (hdr_crc != crc) {
 		ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
-		ubi_err("paranoid check failed for PEB %d", pnum);
-		ubi_dbg_dump_ec_hdr(ec_hdr);
-		ubi_dbg_dump_stack();
-		err = 1;
+		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_dump_ec_hdr(ec_hdr);
+		dump_stack();
+		err = -EINVAL;
 		goto exit;
 	}
 
-	err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+	err = self_check_ec_hdr(ubi, pnum, ec_hdr);
 
 exit:
 	kfree(ec_hdr);
@@ -1129,20 +1210,23 @@
 }
 
 /**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number the volume identifier header belongs to
  * @vid_hdr: the volume identifier header to check
  *
  * This function returns zero if the volume identifier header is all right, and
- * %1 if not.
+ * %-EINVAL if not.
  */
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
-				  const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+			      const struct ubi_vid_hdr *vid_hdr)
 {
 	int err;
 	uint32_t magic;
 
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
 	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
 		ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
@@ -1152,36 +1236,38 @@
 
 	err = validate_vid_hdr(ubi, vid_hdr);
 	if (err) {
-		ubi_err("paranoid check failed for PEB %d", pnum);
+		ubi_err("self-check failed for PEB %d", pnum);
 		goto fail;
 	}
 
 	return err;
 
 fail:
-	ubi_err("paranoid check failed for PEB %d", pnum);
-	ubi_dbg_dump_vid_hdr(vid_hdr);
-	ubi_dbg_dump_stack();
-	return 1;
+	ubi_err("self-check failed for PEB %d", pnum);
+	ubi_dump_vid_hdr(vid_hdr);
+	dump_stack();
+	return -EINVAL;
 
 }
 
 /**
- * paranoid_check_peb_vid_hdr - check that the volume identifier header of a
- * physical eraseblock is in-place and is all right.
+ * self_check_peb_vid_hdr - check volume identifier header.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  *
  * This function returns zero if the volume identifier header is all right,
- * %1 if not, and a negative error code if an error occurred.
+ * and a negative error code if not or if an error occurred.
  */
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
 {
 	int err;
 	uint32_t crc, hdr_crc;
 	struct ubi_vid_hdr *vid_hdr;
 	void *p;
 
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
 	if (!vid_hdr)
 		return -ENOMEM;
@@ -1189,22 +1275,22 @@
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_alsize);
-	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
 	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
 	if (hdr_crc != crc) {
-		ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
-			"read %#08x", pnum, crc, hdr_crc);
-		ubi_err("paranoid check failed for PEB %d", pnum);
-		ubi_dbg_dump_vid_hdr(vid_hdr);
-		ubi_dbg_dump_stack();
-		err = 1;
+		ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+			pnum, crc, hdr_crc);
+		ubi_err("self-check failed for PEB %d", pnum);
+		ubi_dump_vid_hdr(vid_hdr);
+		dump_stack();
+		err = -EINVAL;
 		goto exit;
 	}
 
-	err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+	err = self_check_vid_hdr(ubi, pnum, vid_hdr);
 
 exit:
 	ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1212,51 +1298,123 @@
 }
 
 /**
- * paranoid_check_all_ff - check that a region of flash is empty.
+ * self_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+			    int offset, int len)
+{
+	int err, i;
+	size_t read;
+	void *buf1;
+	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
+	buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+	if (!buf1) {
+		ubi_err("cannot allocate memory to check writes");
+		return 0;
+	}
+
+	err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+	if (err && !mtd_is_bitflip(err))
+		goto out_free;
+
+	for (i = 0; i < len; i++) {
+		uint8_t c = ((uint8_t *)buf)[i];
+		uint8_t c1 = ((uint8_t *)buf1)[i];
+#if !defined(CONFIG_UBI_SILENCE_MSG)
+		int dump_len = max_t(int, 128, len - i);
+#endif
+
+		if (c == c1)
+			continue;
+
+		ubi_err("self-check failed for PEB %d:%d, len %d",
+			pnum, offset, len);
+		ubi_msg("data differ at position %d", i);
+		ubi_msg("hex dump of the original buffer from %d to %d",
+			i, i + dump_len);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+			       buf + i, dump_len, 1);
+		ubi_msg("hex dump of the read buffer from %d to %d",
+			i, i + dump_len);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+			       buf1 + i, dump_len, 1);
+		dump_stack();
+		err = -EINVAL;
+		goto out_free;
+	}
+
+	vfree(buf1);
+	return 0;
+
+out_free:
+	vfree(buf1);
+	return err;
+}
+
+/**
+ * ubi_self_check_all_ff - check that a region of flash is empty.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  * @offset: the starting offset within the physical eraseblock to check
  * @len: the length of the region to check
  *
  * This function returns zero if only 0xFF bytes are present at offset
- * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
- * code if an error occurred.
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
  */
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
-				 int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
 {
 	size_t read;
 	int err;
+	void *buf;
 	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
 
-	mutex_lock(&ubi->dbg_buf_mutex);
-	err = mtd_read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
-	if (err && err != -EUCLEAN) {
-		ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-			"read %zd bytes", err, len, pnum, offset, read);
+	if (!ubi_dbg_chk_io(ubi))
+		return 0;
+
+	buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+	if (!buf) {
+		ubi_err("cannot allocate memory to check for 0xFFs");
+		return 0;
+	}
+
+	err = mtd_read(ubi->mtd, addr, len, &read, buf);
+	if (err && !mtd_is_bitflip(err)) {
+		ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+			err, len, pnum, offset, read);
 		goto error;
 	}
 
-	err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+	err = ubi_check_pattern(buf, 0xFF, len);
 	if (err == 0) {
-		ubi_err("flash region at PEB %d:%d, length %d does not "
-			"contain all 0xFF bytes", pnum, offset, len);
+		ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+			pnum, offset, len);
 		goto fail;
 	}
-	mutex_unlock(&ubi->dbg_buf_mutex);
 
+	vfree(buf);
 	return 0;
 
 fail:
-	ubi_err("paranoid check failed for PEB %d", pnum);
-	dbg_msg("hex dump of the %d-%d region", offset, offset + len);
-	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
-		       ubi->dbg_peb_buf, len, 1);
-	err = 1;
+	ubi_err("self-check failed for PEB %d", pnum);
+	ubi_msg("hex dump of the %d-%d region", offset, offset + len);
+	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+	err = -EINVAL;
 error:
-	ubi_dbg_dump_stack();
-	mutex_unlock(&ubi->dbg_buf_mutex);
+	dump_stack();
+	vfree(buf);
 	return err;
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 63c56c9..0183c93 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -8,16 +8,43 @@
 
 /* This file mostly implements UBI kernel API functions */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/module.h>
-#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
 #include <asm/div64.h>
-#endif
-
+#else
 #include <ubi_uboot.h>
+#endif
+#include <linux/err.h>
+
 #include "ubi.h"
 
 /**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+	di->ubi_num = ubi->ubi_num;
+	di->leb_size = ubi->leb_size;
+	di->leb_start = ubi->leb_start;
+	di->min_io_size = ubi->min_io_size;
+	di->max_write_size = ubi->max_write_size;
+	di->ro_mode = ubi->ro_mode;
+#ifndef __UBOOT__
+	di->cdev = ubi->cdev.dev;
+#endif
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
  * ubi_get_device_info - get information about UBI device.
  * @ubi_num: UBI device number
  * @di: the information is stored here
@@ -31,33 +58,24 @@
 
 	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
 		return -EINVAL;
-
 	ubi = ubi_get_device(ubi_num);
 	if (!ubi)
 		return -ENODEV;
-
-	di->ubi_num = ubi->ubi_num;
-	di->leb_size = ubi->leb_size;
-	di->min_io_size = ubi->min_io_size;
-	di->ro_mode = ubi->ro_mode;
-	di->cdev = ubi->cdev.dev;
-
+	ubi_do_get_device_info(ubi, di);
 	ubi_put_device(ubi);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ubi_get_device_info);
 
 /**
- * ubi_get_volume_info - get information about UBI volume.
- * @desc: volume descriptor
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
  * @vi: the information is stored here
  */
-void ubi_get_volume_info(struct ubi_volume_desc *desc,
-			 struct ubi_volume_info *vi)
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+			    struct ubi_volume_info *vi)
 {
-	const struct ubi_volume *vol = desc->vol;
-	const struct ubi_device *ubi = vol->ubi;
-
 	vi->vol_id = vol->vol_id;
 	vi->ubi_num = ubi->ubi_num;
 	vi->size = vol->reserved_pebs;
@@ -71,6 +89,17 @@
 	vi->name = vol->name;
 	vi->cdev = vol->cdev.dev;
 }
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+			 struct ubi_volume_info *vi)
+{
+	ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
 EXPORT_SYMBOL_GPL(ubi_get_volume_info);
 
 /**
@@ -98,7 +127,7 @@
 	struct ubi_device *ubi;
 	struct ubi_volume *vol;
 
-	dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+	dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
 
 	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
 		return ERR_PTR(-EINVAL);
@@ -188,6 +217,8 @@
 	kfree(desc);
 out_put_ubi:
 	ubi_put_device(ubi);
+	ubi_err("cannot open device %d, volume %d, error %d",
+		ubi_num, vol_id, err);
 	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -207,7 +238,7 @@
 	struct ubi_device *ubi;
 	struct ubi_volume_desc *ret;
 
-	dbg_msg("open volume %s, mode %d", name, mode);
+	dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
 
 	if (!name)
 		return ERR_PTR(-EINVAL);
@@ -249,6 +280,45 @@
 }
 EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
 
+#ifndef __UBOOT__
+/**
+ * ubi_open_volume_path - open UBI volume by its character device node path.
+ * @pathname: volume character device node path
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume the path
+ * to its character device node.
+ */
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
+{
+	int error, ubi_num, vol_id, mod;
+	struct inode *inode;
+	struct path path;
+
+	dbg_gen("open volume %s, mode %d", pathname, mode);
+
+	if (!pathname || !*pathname)
+		return ERR_PTR(-EINVAL);
+
+	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+	if (error)
+		return ERR_PTR(error);
+
+	inode = path.dentry->d_inode;
+	mod = inode->i_mode;
+	ubi_num = ubi_major2num(imajor(inode));
+	vol_id = iminor(inode) - 1;
+	path_put(&path);
+
+	if (!S_ISCHR(mod))
+		return ERR_PTR(-EINVAL);
+	if (vol_id >= 0 && ubi_num >= 0)
+		return ubi_open_volume(ubi_num, vol_id, mode);
+	return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+#endif
+
 /**
  * ubi_close_volume - close UBI volume.
  * @desc: volume descriptor
@@ -258,7 +328,8 @@
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 
-	dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
+	dbg_gen("close device %d, volume %d, mode %d",
+		ubi->ubi_num, vol->vol_id, desc->mode);
 
 	spin_lock(&ubi->volumes_lock);
 	switch (desc->mode) {
@@ -315,7 +386,7 @@
 	struct ubi_device *ubi = vol->ubi;
 	int err, vol_id = vol->vol_id;
 
-	dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+	dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
 
 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
 	    lnum >= vol->used_ebs || offset < 0 || len < 0 ||
@@ -353,11 +424,9 @@
  * @buf: data to write
  * @offset: offset within the logical eraseblock where to write
  * @len: how many bytes to write
- * @dtype: expected data type
  *
  * This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
  *
  * This function takes care of physical eraseblock write failures. If write to
  * the physical eraseblock write operation fails, the logical eraseblock is
@@ -374,13 +443,13 @@
  * returns immediately with %-EBADF code.
  */
 int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
-		  int offset, int len, int dtype)
+		  int offset, int len)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 	int vol_id = vol->vol_id;
 
-	dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+	dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
 
 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
 		return -EINVAL;
@@ -393,17 +462,13 @@
 	    offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
 		return -EINVAL;
 
-	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-	    dtype != UBI_UNKNOWN)
-		return -EINVAL;
-
 	if (vol->upd_marker)
 		return -EBADF;
 
 	if (len == 0)
 		return 0;
 
-	return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+	return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_write);
 
@@ -413,24 +478,23 @@
  * @lnum: logical eraseblock number to change
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: expected data type
  *
  * This function changes the contents of a logical eraseblock atomically. @buf
  * has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
  * eraseblock size, ant the logical eraseblock may be appended to more times
  * later on. This function guarantees that in case of an unclean reboot the old
  * contents is preserved. Returns zero in case of success and a negative error
  * code in case of failure.
  */
 int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
-		   int len, int dtype)
+		   int len)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 	int vol_id = vol->vol_id;
 
-	dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+	dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
 
 	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
 		return -EINVAL;
@@ -442,17 +506,13 @@
 	    len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
 		return -EINVAL;
 
-	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-	    dtype != UBI_UNKNOWN)
-		return -EINVAL;
-
 	if (vol->upd_marker)
 		return -EBADF;
 
 	if (len == 0)
 		return 0;
 
-	return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+	return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_change);
 
@@ -474,7 +534,7 @@
 	struct ubi_device *ubi = vol->ubi;
 	int err;
 
-	dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
+	dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -489,7 +549,7 @@
 	if (err)
 		return err;
 
-	return ubi_wl_flush(ubi);
+	return ubi_wl_flush(ubi, vol->vol_id, lnum);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_erase);
 
@@ -500,7 +560,7 @@
  *
  * This function un-maps logical eraseblock @lnum and schedules the
  * corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
  * erase operation.
  *
  * Unlike erase, the un-map operation does not guarantee that the logical
@@ -519,7 +579,7 @@
  *
  * The main and obvious use-case of this function is when the contents of a
  * logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
  * new data. Note, once new data has been written to the logical eraseblock,
  * UBI guarantees that the old contents has gone forever. In other words, if an
  * unclean reboot happens after the logical eraseblock has been un-mapped and
@@ -534,7 +594,7 @@
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 
-	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+	dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -550,13 +610,12 @@
 EXPORT_SYMBOL_GPL(ubi_leb_unmap);
 
 /**
- * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
  * @desc: volume descriptor
  * @lnum: logical eraseblock number
- * @dtype: expected data type
  *
  * This function maps an un-mapped logical eraseblock @lnum to a physical
- * eraseblock. This means, that after a successfull invocation of this
+ * eraseblock. This means, that after a successful invocation of this
  * function the logical eraseblock @lnum will be empty (contain only %0xFF
  * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
  * happens.
@@ -566,12 +625,12 @@
  * eraseblock is already mapped, and other negative error codes in case of
  * other failures.
  */
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 
-	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+	dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -579,17 +638,13 @@
 	if (lnum < 0 || lnum >= vol->reserved_pebs)
 		return -EINVAL;
 
-	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-	    dtype != UBI_UNKNOWN)
-		return -EINVAL;
-
 	if (vol->upd_marker)
 		return -EBADF;
 
 	if (vol->eba_tbl[lnum] >= 0)
 		return -EBADMSG;
 
-	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_map);
 
@@ -613,7 +668,7 @@
 {
 	struct ubi_volume *vol = desc->vol;
 
-	dbg_msg("test LEB %d:%d", vol->vol_id, lnum);
+	dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
 
 	if (lnum < 0 || lnum >= vol->reserved_pebs)
 		return -EINVAL;
@@ -624,3 +679,110 @@
 	return vol->eba_tbl[lnum] >= 0;
 }
 EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/**
+ * ubi_sync - synchronize UBI device buffers.
+ * @ubi_num: UBI device to synchronize
+ *
+ * The underlying MTD device may cache data in hardware or in software. This
+ * function ensures the caches are flushed. Returns zero in case of success and
+ * a negative error code in case of failure.
+ */
+int ubi_sync(int ubi_num)
+{
+	struct ubi_device *ubi;
+
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return -ENODEV;
+
+	mtd_sync(ubi->mtd);
+	ubi_put_device(ubi);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_sync);
+
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+	struct ubi_device *ubi;
+	int err = 0;
+
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return -ENODEV;
+
+	err = ubi_wl_flush(ubi, vol_id, lnum);
+	ubi_put_device(ubi);
+	return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
+#ifndef __UBOOT__
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ *                   already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI  volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+				 int ignore_existing)
+{
+	int err;
+
+	err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+	if (err != 0)
+		return err;
+	if (ignore_existing)
+		return 0;
+
+	/*
+	 * We are going to walk all UBI devices and all volumes, and
+	 * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+	 * event. We have to lock the @ubi_devices_mutex to make sure UBI
+	 * devices do not disappear.
+	 */
+	mutex_lock(&ubi_devices_mutex);
+	ubi_enumerate_volumes(nb);
+	mutex_unlock(&ubi_devices_mutex);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
+#endif
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 5ff55b4..49530b7 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,14 +81,62 @@
 }
 
 /**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+	int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+	if (need <= 0 || ubi->avail_pebs == 0)
+		return;
+
+	need = min_t(int, need, ubi->avail_pebs);
+	ubi->avail_pebs -= need;
+	ubi->rsvd_pebs += need;
+	ubi->beb_rsvd_pebs += need;
+	ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
  * eraseblock handling.
  * @ubi: UBI device description object
  */
 void ubi_calculate_reserved(struct ubi_device *ubi)
 {
-	ubi->beb_rsvd_level = ubi->good_peb_count/100;
-	ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
-	if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
-		ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+	/*
+	 * Calculate the actual number of PEBs currently needed to be reserved
+	 * for future bad eraseblock handling.
+	 */
+	ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+	if (ubi->beb_rsvd_level < 0) {
+		ubi->beb_rsvd_level = 0;
+		ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+			 ubi->bad_peb_count, ubi->bad_peb_limit);
+	}
+}
+
+/**
+ * ubi_check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+int ubi_check_pattern(const void *buf, uint8_t patt, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		if (((const uint8_t *)buf)[i] != patt)
+			return 0;
+	return 1;
 }
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
deleted file mode 100644
index a6d0fbc..0000000
--- a/drivers/mtd/ubi/scan.c
+++ /dev/null
@@ -1,1348 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * SPDX-License-Identifier:	GPL-2.0+
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-/*
- * UBI scanning unit.
- *
- * This unit is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
- *
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
- * objects which are kept in volume RB-tree with root at the @volumes field.
- * The RB-tree is indexed by the volume ID.
- *
- * Found logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
- *
- * Corrupted physical eraseblocks are put to the @corr list, free physical
- * eraseblocks are put to the @free list and the physical eraseblock to be
- * erased are put to the @erase list.
- */
-
-#ifdef UBI_LINUX
-#include <linux/err.h>
-#include <linux/crc32.h>
-#include <asm/div64.h>
-#endif
-
-#include <ubi_uboot.h>
-#include "ubi.h"
-
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
-
-/* Temporary variables used during scanning */
-static struct ubi_ec_hdr *ech;
-static struct ubi_vid_hdr *vidh;
-
-/**
- * add_to_list - add physical eraseblock to a list.
- * @si: scanning information
- * @pnum: physical eraseblock number to add
- * @ec: erase counter of the physical eraseblock
- * @list: the list to add to
- *
- * This function adds physical eraseblock @pnum to free, erase, corrupted or
- * alien lists. Returns zero in case of success and a negative error code in
- * case of failure.
- */
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
-		       struct list_head *list)
-{
-	struct ubi_scan_leb *seb;
-
-	if (list == &si->free)
-		dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
-	else if (list == &si->erase)
-		dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
-	else if (list == &si->corr)
-		dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
-	else if (list == &si->alien)
-		dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
-	else
-		BUG();
-
-	seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
-	if (!seb)
-		return -ENOMEM;
-
-	seb->pnum = pnum;
-	seb->ec = ec;
-	list_add_tail(&seb->u.list, list);
-	return 0;
-}
-
-/**
- * validate_vid_hdr - check that volume identifier header is correct and
- * consistent.
- * @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
- * @pnum: physical eraseblock number the VID header came from
- *
- * This function checks that data stored in @vid_hdr is consistent. Returns
- * non-zero if an inconsistency was found and zero if not.
- *
- * Note, UBI does sanity check of everything it reads from the flash media.
- * Most of the checks are done in the I/O unit. Here we check that the
- * information in the VID header is consistent to the information in other VID
- * headers of the same volume.
- */
-static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
-			    const struct ubi_scan_volume *sv, int pnum)
-{
-	int vol_type = vid_hdr->vol_type;
-	int vol_id = be32_to_cpu(vid_hdr->vol_id);
-	int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
-	int data_pad = be32_to_cpu(vid_hdr->data_pad);
-
-	if (sv->leb_count != 0) {
-		int sv_vol_type;
-
-		/*
-		 * This is not the first logical eraseblock belonging to this
-		 * volume. Ensure that the data in its VID header is consistent
-		 * to the data in previous logical eraseblock headers.
-		 */
-
-		if (vol_id != sv->vol_id) {
-			dbg_err("inconsistent vol_id");
-			goto bad;
-		}
-
-		if (sv->vol_type == UBI_STATIC_VOLUME)
-			sv_vol_type = UBI_VID_STATIC;
-		else
-			sv_vol_type = UBI_VID_DYNAMIC;
-
-		if (vol_type != sv_vol_type) {
-			dbg_err("inconsistent vol_type");
-			goto bad;
-		}
-
-		if (used_ebs != sv->used_ebs) {
-			dbg_err("inconsistent used_ebs");
-			goto bad;
-		}
-
-		if (data_pad != sv->data_pad) {
-			dbg_err("inconsistent data_pad");
-			goto bad;
-		}
-	}
-
-	return 0;
-
-bad:
-	ubi_err("inconsistent VID header at PEB %d", pnum);
-	ubi_dbg_dump_vid_hdr(vid_hdr);
-	ubi_dbg_dump_sv(sv);
-	return -EINVAL;
-}
-
-/**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
- * @vol_id: ID of the volume to add
- * @pnum: physical eraseblock number
- * @vid_hdr: volume identifier header
- *
- * If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
- */
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
-					  int pnum,
-					  const struct ubi_vid_hdr *vid_hdr)
-{
-	struct ubi_scan_volume *sv;
-	struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
-
-	ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
-
-	/* Walk the volume RB-tree to look if this volume is already present */
-	while (*p) {
-		parent = *p;
-		sv = rb_entry(parent, struct ubi_scan_volume, rb);
-
-		if (vol_id == sv->vol_id)
-			return sv;
-
-		if (vol_id > sv->vol_id)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	/* The volume is absent - add it */
-	sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
-	if (!sv)
-		return ERR_PTR(-ENOMEM);
-
-	sv->highest_lnum = sv->leb_count = 0;
-	sv->vol_id = vol_id;
-	sv->root = RB_ROOT;
-	sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
-	sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
-	sv->compat = vid_hdr->compat;
-	sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
-							    : UBI_STATIC_VOLUME;
-	if (vol_id > si->highest_vol_id)
-		si->highest_vol_id = vol_id;
-
-	rb_link_node(&sv->rb, parent, p);
-	rb_insert_color(&sv->rb, &si->volumes);
-	si->vols_found += 1;
-	dbg_bld("added volume %d", vol_id);
-	return sv;
-}
-
-/**
- * compare_lebs - find out which logical eraseblock is newer.
- * @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
- * @pnum: physical eraseblock number of the second logical eraseblock to
- * compare
- * @vid_hdr: volume identifier header of the second logical eraseblock
- *
- * This function compares 2 copies of a LEB and informs which one is newer. In
- * case of success this function returns a positive value, in case of failure, a
- * negative error code is returned. The success return codes use the following
- * bits:
- *     o bit 0 is cleared: the first PEB (described by @seb) is newer then the
- *       second PEB (described by @pnum and @vid_hdr);
- *     o bit 0 is set: the second PEB is newer;
- *     o bit 1 is cleared: no bit-flips were detected in the newer LEB;
- *     o bit 1 is set: bit-flips were detected in the newer LEB;
- *     o bit 2 is cleared: the older LEB is not corrupted;
- *     o bit 2 is set: the older LEB is corrupted.
- */
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
-			int pnum, const struct ubi_vid_hdr *vid_hdr)
-{
-	void *buf;
-	int len, err, second_is_newer, bitflips = 0, corrupted = 0;
-	uint32_t data_crc, crc;
-	struct ubi_vid_hdr *vh = NULL;
-	unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
-
-	if (seb->sqnum == 0 && sqnum2 == 0) {
-		long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
-
-		/*
-		 * UBI constantly increases the logical eraseblock version
-		 * number and it can overflow. Thus, we have to bear in mind
-		 * that versions that are close to %0xFFFFFFFF are less then
-		 * versions that are close to %0.
-		 *
-		 * The UBI WL unit guarantees that the number of pending tasks
-		 * is not greater then %0x7FFFFFFF. So, if the difference
-		 * between any two versions is greater or equivalent to
-		 * %0x7FFFFFFF, there was an overflow and the logical
-		 * eraseblock with lower version is actually newer then the one
-		 * with higher version.
-		 *
-		 * FIXME: but this is anyway obsolete and will be removed at
-		 * some point.
-		 */
-		dbg_bld("using old crappy leb_ver stuff");
-
-		if (v1 == v2) {
-			ubi_err("PEB %d and PEB %d have the same version %lld",
-				seb->pnum, pnum, v1);
-			return -EINVAL;
-		}
-
-		abs = v1 - v2;
-		if (abs < 0)
-			abs = -abs;
-
-		if (abs < 0x7FFFFFFF)
-			/* Non-overflow situation */
-			second_is_newer = (v2 > v1);
-		else
-			second_is_newer = (v2 < v1);
-	} else
-		/* Obviously the LEB with lower sequence counter is older */
-		second_is_newer = sqnum2 > seb->sqnum;
-
-	/*
-	 * Now we know which copy is newer. If the copy flag of the PEB with
-	 * newer version is not set, then we just return, otherwise we have to
-	 * check data CRC. For the second PEB we already have the VID header,
-	 * for the first one - we'll need to re-read it from flash.
-	 *
-	 * FIXME: this may be optimized so that we wouldn't read twice.
-	 */
-
-	if (second_is_newer) {
-		if (!vid_hdr->copy_flag) {
-			/* It is not a copy, so it is newer */
-			dbg_bld("second PEB %d is newer, copy_flag is unset",
-				pnum);
-			return 1;
-		}
-	} else {
-		pnum = seb->pnum;
-
-		vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-		if (!vh)
-			return -ENOMEM;
-
-		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
-		if (err) {
-			if (err == UBI_IO_BITFLIPS)
-				bitflips = 1;
-			else {
-				dbg_err("VID of PEB %d header is bad, but it "
-					"was OK earlier", pnum);
-				if (err > 0)
-					err = -EIO;
-
-				goto out_free_vidh;
-			}
-		}
-
-		if (!vh->copy_flag) {
-			/* It is not a copy, so it is newer */
-			dbg_bld("first PEB %d is newer, copy_flag is unset",
-				pnum);
-			err = bitflips << 1;
-			goto out_free_vidh;
-		}
-
-		vid_hdr = vh;
-	}
-
-	/* Read the data of the copy and check the CRC */
-
-	len = be32_to_cpu(vid_hdr->data_size);
-	buf = vmalloc(len);
-	if (!buf) {
-		err = -ENOMEM;
-		goto out_free_vidh;
-	}
-
-	err = ubi_io_read_data(ubi, buf, pnum, 0, len);
-	if (err && err != UBI_IO_BITFLIPS)
-		goto out_free_buf;
-
-	data_crc = be32_to_cpu(vid_hdr->data_crc);
-	crc = crc32(UBI_CRC32_INIT, buf, len);
-	if (crc != data_crc) {
-		dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
-			pnum, crc, data_crc);
-		corrupted = 1;
-		bitflips = 0;
-		second_is_newer = !second_is_newer;
-	} else {
-		dbg_bld("PEB %d CRC is OK", pnum);
-		bitflips = !!err;
-	}
-
-	vfree(buf);
-	ubi_free_vid_hdr(ubi, vh);
-
-	if (second_is_newer)
-		dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
-	else
-		dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
-
-	return second_is_newer | (bitflips << 1) | (corrupted << 2);
-
-out_free_buf:
-	vfree(buf);
-out_free_vidh:
-	ubi_free_vid_hdr(ubi, vh);
-	return err;
-}
-
-/**
- * ubi_scan_add_used - add information about a physical eraseblock to the
- * scanning information.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: the physical eraseblock number
- * @ec: erase counter
- * @vid_hdr: the volume identifier header
- * @bitflips: if bit-flips were detected when this physical eraseblock was read
- *
- * This function adds information about a used physical eraseblock to the
- * 'used' tree of the corresponding volume. The function is rather complex
- * because it has to handle cases when this is not the first physical
- * eraseblock belonging to the same logical eraseblock, and the newer one has
- * to be picked, while the older one has to be dropped. This function returns
- * zero in case of success and a negative error code in case of failure.
- */
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
-		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
-		      int bitflips)
-{
-	int err, vol_id, lnum;
-	uint32_t leb_ver;
-	unsigned long long sqnum;
-	struct ubi_scan_volume *sv;
-	struct ubi_scan_leb *seb;
-	struct rb_node **p, *parent = NULL;
-
-	vol_id = be32_to_cpu(vid_hdr->vol_id);
-	lnum = be32_to_cpu(vid_hdr->lnum);
-	sqnum = be64_to_cpu(vid_hdr->sqnum);
-	leb_ver = be32_to_cpu(vid_hdr->leb_ver);
-
-	dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
-		pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
-
-	sv = add_volume(si, vol_id, pnum, vid_hdr);
-	if (IS_ERR(sv) < 0)
-		return PTR_ERR(sv);
-
-	if (si->max_sqnum < sqnum)
-		si->max_sqnum = sqnum;
-
-	/*
-	 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
-	 * if this is the first instance of this logical eraseblock or not.
-	 */
-	p = &sv->root.rb_node;
-	while (*p) {
-		int cmp_res;
-
-		parent = *p;
-		seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
-		if (lnum != seb->lnum) {
-			if (lnum < seb->lnum)
-				p = &(*p)->rb_left;
-			else
-				p = &(*p)->rb_right;
-			continue;
-		}
-
-		/*
-		 * There is already a physical eraseblock describing the same
-		 * logical eraseblock present.
-		 */
-
-		dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
-			"LEB ver %u, EC %d", seb->pnum, seb->sqnum,
-			seb->leb_ver, seb->ec);
-
-		/*
-		 * Make sure that the logical eraseblocks have different
-		 * versions. Otherwise the image is bad.
-		 */
-		if (seb->leb_ver == leb_ver && leb_ver != 0) {
-			ubi_err("two LEBs with same version %u", leb_ver);
-			ubi_dbg_dump_seb(seb, 0);
-			ubi_dbg_dump_vid_hdr(vid_hdr);
-			return -EINVAL;
-		}
-
-		/*
-		 * Make sure that the logical eraseblocks have different
-		 * sequence numbers. Otherwise the image is bad.
-		 *
-		 * FIXME: remove 'sqnum != 0' check when leb_ver is removed.
-		 */
-		if (seb->sqnum == sqnum && sqnum != 0) {
-			ubi_err("two LEBs with same sequence number %llu",
-				sqnum);
-			ubi_dbg_dump_seb(seb, 0);
-			ubi_dbg_dump_vid_hdr(vid_hdr);
-			return -EINVAL;
-		}
-
-		/*
-		 * Now we have to drop the older one and preserve the newer
-		 * one.
-		 */
-		cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
-		if (cmp_res < 0)
-			return cmp_res;
-
-		if (cmp_res & 1) {
-			/*
-			 * This logical eraseblock is newer then the one
-			 * found earlier.
-			 */
-			err = validate_vid_hdr(vid_hdr, sv, pnum);
-			if (err)
-				return err;
-
-			if (cmp_res & 4)
-				err = add_to_list(si, seb->pnum, seb->ec,
-						  &si->corr);
-			else
-				err = add_to_list(si, seb->pnum, seb->ec,
-						  &si->erase);
-			if (err)
-				return err;
-
-			seb->ec = ec;
-			seb->pnum = pnum;
-			seb->scrub = ((cmp_res & 2) || bitflips);
-			seb->sqnum = sqnum;
-			seb->leb_ver = leb_ver;
-
-			if (sv->highest_lnum == lnum)
-				sv->last_data_size =
-					be32_to_cpu(vid_hdr->data_size);
-
-			return 0;
-		} else {
-			/*
-			 * This logical eraseblock is older then the one found
-			 * previously.
-			 */
-			if (cmp_res & 4)
-				return add_to_list(si, pnum, ec, &si->corr);
-			else
-				return add_to_list(si, pnum, ec, &si->erase);
-		}
-	}
-
-	/*
-	 * We've met this logical eraseblock for the first time, add it to the
-	 * scanning information.
-	 */
-
-	err = validate_vid_hdr(vid_hdr, sv, pnum);
-	if (err)
-		return err;
-
-	seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
-	if (!seb)
-		return -ENOMEM;
-
-	seb->ec = ec;
-	seb->pnum = pnum;
-	seb->lnum = lnum;
-	seb->sqnum = sqnum;
-	seb->scrub = bitflips;
-	seb->leb_ver = leb_ver;
-
-	if (sv->highest_lnum <= lnum) {
-		sv->highest_lnum = lnum;
-		sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
-	}
-
-	sv->leb_count += 1;
-	rb_link_node(&seb->u.rb, parent, p);
-	rb_insert_color(&seb->u.rb, &sv->root);
-	return 0;
-}
-
-/**
- * ubi_scan_find_sv - find information about a particular volume in the
- * scanning information.
- * @si: scanning information
- * @vol_id: the requested volume ID
- *
- * This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
- */
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
-					 int vol_id)
-{
-	struct ubi_scan_volume *sv;
-	struct rb_node *p = si->volumes.rb_node;
-
-	while (p) {
-		sv = rb_entry(p, struct ubi_scan_volume, rb);
-
-		if (vol_id == sv->vol_id)
-			return sv;
-
-		if (vol_id > sv->vol_id)
-			p = p->rb_left;
-		else
-			p = p->rb_right;
-	}
-
-	return NULL;
-}
-
-/**
- * ubi_scan_find_seb - find information about a particular logical
- * eraseblock in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
- */
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
-				       int lnum)
-{
-	struct ubi_scan_leb *seb;
-	struct rb_node *p = sv->root.rb_node;
-
-	while (p) {
-		seb = rb_entry(p, struct ubi_scan_leb, u.rb);
-
-		if (lnum == seb->lnum)
-			return seb;
-
-		if (lnum > seb->lnum)
-			p = p->rb_left;
-		else
-			p = p->rb_right;
-	}
-
-	return NULL;
-}
-
-/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
- */
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
-{
-	struct rb_node *rb;
-	struct ubi_scan_leb *seb;
-
-	dbg_bld("remove scanning information about volume %d", sv->vol_id);
-
-	while ((rb = rb_first(&sv->root))) {
-		seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
-		rb_erase(&seb->u.rb, &sv->root);
-		list_add_tail(&seb->u.list, &si->erase);
-	}
-
-	rb_erase(&sv->rb, &si->volumes);
-	kfree(sv);
-	si->vols_found -= 1;
-}
-
-/**
- * ubi_scan_erase_peb - erase a physical eraseblock.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- *
- * This function erases physical eraseblock 'pnum', and writes the erase
- * counter header to it. This function should only be used on UBI device
- * initialization stages, when the EBA unit had not been yet initialized. This
- * function returns zero in case of success and a negative error code in case
- * of failure.
- */
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
-		       int pnum, int ec)
-{
-	int err;
-	struct ubi_ec_hdr *ec_hdr;
-
-	if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
-		/*
-		 * Erase counter overflow. Upgrade UBI and use 64-bit
-		 * erase counters internally.
-		 */
-		ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
-		return -EINVAL;
-	}
-
-	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-	if (!ec_hdr)
-		return -ENOMEM;
-
-	ec_hdr->ec = cpu_to_be64(ec);
-
-	err = ubi_io_sync_erase(ubi, pnum, 0);
-	if (err < 0)
-		goto out_free;
-
-	err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
-
-out_free:
-	kfree(ec_hdr);
-	return err;
-}
-
-/**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
- * @ubi: UBI device description object
- * @si: scanning information
- *
- * This function returns a free physical eraseblock. It is supposed to be
- * called on the UBI initialization stages when the wear-leveling unit is not
- * initialized yet. This function picks a physical eraseblocks from one of the
- * lists, writes the EC header if it is needed, and removes it from the list.
- *
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
- */
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
-					   struct ubi_scan_info *si)
-{
-	int err = 0, i;
-	struct ubi_scan_leb *seb;
-
-	if (!list_empty(&si->free)) {
-		seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
-		list_del(&seb->u.list);
-		dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
-		return seb;
-	}
-
-	for (i = 0; i < 2; i++) {
-		struct list_head *head;
-		struct ubi_scan_leb *tmp_seb;
-
-		if (i == 0)
-			head = &si->erase;
-		else
-			head = &si->corr;
-
-		/*
-		 * We try to erase the first physical eraseblock from the @head
-		 * list and pick it if we succeed, or try to erase the
-		 * next one if not. And so forth. We don't want to take care
-		 * about bad eraseblocks here - they'll be handled later.
-		 */
-		list_for_each_entry_safe(seb, tmp_seb, head, u.list) {
-			if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-				seb->ec = si->mean_ec;
-
-			err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
-			if (err)
-				continue;
-
-			seb->ec += 1;
-			list_del(&seb->u.list);
-			dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
-			return seb;
-		}
-	}
-
-	ubi_err("no eraseblocks found");
-	return ERR_PTR(-ENOSPC);
-}
-
-/**
- * process_eb - read UBI headers, check them and add corresponding data
- * to the scanning information.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: the physical eraseblock number
- *
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
- */
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
-{
-	long long uninitialized_var(ec);
-	int err, bitflips = 0, vol_id, ec_corr = 0;
-
-	dbg_bld("scan PEB %d", pnum);
-
-	/* Skip bad physical eraseblocks */
-	err = ubi_io_is_bad(ubi, pnum);
-	if (err < 0)
-		return err;
-	else if (err) {
-		/*
-		 * FIXME: this is actually duty of the I/O unit to initialize
-		 * this, but MTD does not provide enough information.
-		 */
-		si->bad_peb_count += 1;
-		return 0;
-	}
-
-	err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
-	if (err < 0)
-		return err;
-	else if (err == UBI_IO_BITFLIPS)
-		bitflips = 1;
-	else if (err == UBI_IO_PEB_EMPTY)
-		return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
-	else if (err == UBI_IO_BAD_EC_HDR) {
-		/*
-		 * We have to also look at the VID header, possibly it is not
-		 * corrupted. Set %bitflips flag in order to make this PEB be
-		 * moved and EC be re-created.
-		 */
-		ec_corr = 1;
-		ec = UBI_SCAN_UNKNOWN_EC;
-		bitflips = 1;
-	}
-
-	si->is_empty = 0;
-
-	if (!ec_corr) {
-		/* Make sure UBI version is OK */
-		if (ech->version != UBI_VERSION) {
-			ubi_err("this UBI version is %d, image version is %d",
-				UBI_VERSION, (int)ech->version);
-			return -EINVAL;
-		}
-
-		ec = be64_to_cpu(ech->ec);
-		if (ec > UBI_MAX_ERASECOUNTER) {
-			/*
-			 * Erase counter overflow. The EC headers have 64 bits
-			 * reserved, but we anyway make use of only 31 bit
-			 * values, as this seems to be enough for any existing
-			 * flash. Upgrade UBI and use 64-bit erase counters
-			 * internally.
-			 */
-			ubi_err("erase counter overflow, max is %d",
-				UBI_MAX_ERASECOUNTER);
-			ubi_dbg_dump_ec_hdr(ech);
-			return -EINVAL;
-		}
-	}
-
-	/* OK, we've done with the EC header, let's look at the VID header */
-
-	err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
-	if (err < 0)
-		return err;
-	else if (err == UBI_IO_BITFLIPS)
-		bitflips = 1;
-	else if (err == UBI_IO_BAD_VID_HDR ||
-		 (err == UBI_IO_PEB_FREE && ec_corr)) {
-		/* VID header is corrupted */
-		err = add_to_list(si, pnum, ec, &si->corr);
-		if (err)
-			return err;
-		goto adjust_mean_ec;
-	} else if (err == UBI_IO_PEB_FREE) {
-		/* No VID header - the physical eraseblock is free */
-		err = add_to_list(si, pnum, ec, &si->free);
-		if (err)
-			return err;
-		goto adjust_mean_ec;
-	}
-
-	vol_id = be32_to_cpu(vidh->vol_id);
-	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
-		int lnum = be32_to_cpu(vidh->lnum);
-
-		/* Unsupported internal volume */
-		switch (vidh->compat) {
-		case UBI_COMPAT_DELETE:
-			ubi_msg("\"delete\" compatible internal volume %d:%d"
-				" found, remove it", vol_id, lnum);
-			err = add_to_list(si, pnum, ec, &si->corr);
-			if (err)
-				return err;
-			break;
-
-		case UBI_COMPAT_RO:
-			ubi_msg("read-only compatible internal volume %d:%d"
-				" found, switch to read-only mode",
-				vol_id, lnum);
-			ubi->ro_mode = 1;
-			break;
-
-		case UBI_COMPAT_PRESERVE:
-			ubi_msg("\"preserve\" compatible internal volume %d:%d"
-				" found", vol_id, lnum);
-			err = add_to_list(si, pnum, ec, &si->alien);
-			if (err)
-				return err;
-			si->alien_peb_count += 1;
-			return 0;
-
-		case UBI_COMPAT_REJECT:
-			ubi_err("incompatible internal volume %d:%d found",
-				vol_id, lnum);
-			return -EINVAL;
-		}
-	}
-
-	/* Both UBI headers seem to be fine */
-	err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
-	if (err)
-		return err;
-
-adjust_mean_ec:
-	if (!ec_corr) {
-		si->ec_sum += ec;
-		si->ec_count += 1;
-		if (ec > si->max_ec)
-			si->max_ec = ec;
-		if (ec < si->min_ec)
-			si->min_ec = ec;
-	}
-
-	return 0;
-}
-
-/**
- * ubi_scan - scan an MTD device.
- * @ubi: UBI device description object
- *
- * This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
- */
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
-{
-	int err, pnum;
-	struct rb_node *rb1, *rb2;
-	struct ubi_scan_volume *sv;
-	struct ubi_scan_leb *seb;
-	struct ubi_scan_info *si;
-
-	si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
-	if (!si)
-		return ERR_PTR(-ENOMEM);
-
-	INIT_LIST_HEAD(&si->corr);
-	INIT_LIST_HEAD(&si->free);
-	INIT_LIST_HEAD(&si->erase);
-	INIT_LIST_HEAD(&si->alien);
-	si->volumes = RB_ROOT;
-	si->is_empty = 1;
-
-	err = -ENOMEM;
-	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-	if (!ech)
-		goto out_si;
-
-	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vidh)
-		goto out_ech;
-
-	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
-		cond_resched();
-
-		dbg_msg("process PEB %d", pnum);
-		err = process_eb(ubi, si, pnum);
-		if (err < 0)
-			goto out_vidh;
-	}
-
-	dbg_msg("scanning is finished");
-
-	/* Calculate mean erase counter */
-	if (si->ec_count) {
-		do_div(si->ec_sum, si->ec_count);
-		si->mean_ec = si->ec_sum;
-	}
-
-	if (si->is_empty)
-		ubi_msg("empty MTD device detected");
-
-	/*
-	 * In case of unknown erase counter we use the mean erase counter
-	 * value.
-	 */
-	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
-			if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-				seb->ec = si->mean_ec;
-	}
-
-	list_for_each_entry(seb, &si->free, u.list) {
-		if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-			seb->ec = si->mean_ec;
-	}
-
-	list_for_each_entry(seb, &si->corr, u.list)
-		if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-			seb->ec = si->mean_ec;
-
-	list_for_each_entry(seb, &si->erase, u.list)
-		if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-			seb->ec = si->mean_ec;
-
-	err = paranoid_check_si(ubi, si);
-	if (err) {
-		if (err > 0)
-			err = -EINVAL;
-		goto out_vidh;
-	}
-
-	ubi_free_vid_hdr(ubi, vidh);
-	kfree(ech);
-
-	return si;
-
-out_vidh:
-	ubi_free_vid_hdr(ubi, vidh);
-out_ech:
-	kfree(ech);
-out_si:
-	ubi_scan_destroy_si(si);
-	return ERR_PTR(err);
-}
-
-/**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- *
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
- */
-static void destroy_sv(struct ubi_scan_volume *sv)
-{
-	struct ubi_scan_leb *seb;
-	struct rb_node *this = sv->root.rb_node;
-
-	while (this) {
-		if (this->rb_left)
-			this = this->rb_left;
-		else if (this->rb_right)
-			this = this->rb_right;
-		else {
-			seb = rb_entry(this, struct ubi_scan_leb, u.rb);
-			this = rb_parent(this);
-			if (this) {
-				if (this->rb_left == &seb->u.rb)
-					this->rb_left = NULL;
-				else
-					this->rb_right = NULL;
-			}
-
-			kfree(seb);
-		}
-	}
-	kfree(sv);
-}
-
-/**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
- */
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
-{
-	struct ubi_scan_leb *seb, *seb_tmp;
-	struct ubi_scan_volume *sv;
-	struct rb_node *rb;
-
-	list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
-		list_del(&seb->u.list);
-		kfree(seb);
-	}
-	list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
-		list_del(&seb->u.list);
-		kfree(seb);
-	}
-	list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
-		list_del(&seb->u.list);
-		kfree(seb);
-	}
-	list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
-		list_del(&seb->u.list);
-		kfree(seb);
-	}
-
-	/* Destroy the volume RB-tree */
-	rb = si->volumes.rb_node;
-	while (rb) {
-		if (rb->rb_left)
-			rb = rb->rb_left;
-		else if (rb->rb_right)
-			rb = rb->rb_right;
-		else {
-			sv = rb_entry(rb, struct ubi_scan_volume, rb);
-
-			rb = rb_parent(rb);
-			if (rb) {
-				if (rb->rb_left == &sv->rb)
-					rb->rb_left = NULL;
-				else
-					rb->rb_right = NULL;
-			}
-
-			destroy_sv(sv);
-		}
-	}
-
-	kfree(si);
-}
-
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
-/**
- * paranoid_check_si - check if the scanning information is correct and
- * consistent.
- * @ubi: UBI device description object
- * @si: scanning information
- *
- * This function returns zero if the scanning information is all right, %1 if
- * not and a negative error code if an error occurred.
- */
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
-{
-	int pnum, err, vols_found = 0;
-	struct rb_node *rb1, *rb2;
-	struct ubi_scan_volume *sv;
-	struct ubi_scan_leb *seb, *last_seb;
-	uint8_t *buf;
-
-	/*
-	 * At first, check that scanning information is OK.
-	 */
-	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-		int leb_count = 0;
-
-		cond_resched();
-
-		vols_found += 1;
-
-		if (si->is_empty) {
-			ubi_err("bad is_empty flag");
-			goto bad_sv;
-		}
-
-		if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
-		    sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
-		    sv->data_pad < 0 || sv->last_data_size < 0) {
-			ubi_err("negative values");
-			goto bad_sv;
-		}
-
-		if (sv->vol_id >= UBI_MAX_VOLUMES &&
-		    sv->vol_id < UBI_INTERNAL_VOL_START) {
-			ubi_err("bad vol_id");
-			goto bad_sv;
-		}
-
-		if (sv->vol_id > si->highest_vol_id) {
-			ubi_err("highest_vol_id is %d, but vol_id %d is there",
-				si->highest_vol_id, sv->vol_id);
-			goto out;
-		}
-
-		if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
-		    sv->vol_type != UBI_STATIC_VOLUME) {
-			ubi_err("bad vol_type");
-			goto bad_sv;
-		}
-
-		if (sv->data_pad > ubi->leb_size / 2) {
-			ubi_err("bad data_pad");
-			goto bad_sv;
-		}
-
-		last_seb = NULL;
-		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
-			cond_resched();
-
-			last_seb = seb;
-			leb_count += 1;
-
-			if (seb->pnum < 0 || seb->ec < 0) {
-				ubi_err("negative values");
-				goto bad_seb;
-			}
-
-			if (seb->ec < si->min_ec) {
-				ubi_err("bad si->min_ec (%d), %d found",
-					si->min_ec, seb->ec);
-				goto bad_seb;
-			}
-
-			if (seb->ec > si->max_ec) {
-				ubi_err("bad si->max_ec (%d), %d found",
-					si->max_ec, seb->ec);
-				goto bad_seb;
-			}
-
-			if (seb->pnum >= ubi->peb_count) {
-				ubi_err("too high PEB number %d, total PEBs %d",
-					seb->pnum, ubi->peb_count);
-				goto bad_seb;
-			}
-
-			if (sv->vol_type == UBI_STATIC_VOLUME) {
-				if (seb->lnum >= sv->used_ebs) {
-					ubi_err("bad lnum or used_ebs");
-					goto bad_seb;
-				}
-			} else {
-				if (sv->used_ebs != 0) {
-					ubi_err("non-zero used_ebs");
-					goto bad_seb;
-				}
-			}
-
-			if (seb->lnum > sv->highest_lnum) {
-				ubi_err("incorrect highest_lnum or lnum");
-				goto bad_seb;
-			}
-		}
-
-		if (sv->leb_count != leb_count) {
-			ubi_err("bad leb_count, %d objects in the tree",
-				leb_count);
-			goto bad_sv;
-		}
-
-		if (!last_seb)
-			continue;
-
-		seb = last_seb;
-
-		if (seb->lnum != sv->highest_lnum) {
-			ubi_err("bad highest_lnum");
-			goto bad_seb;
-		}
-	}
-
-	if (vols_found != si->vols_found) {
-		ubi_err("bad si->vols_found %d, should be %d",
-			si->vols_found, vols_found);
-		goto out;
-	}
-
-	/* Check that scanning information is correct */
-	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-		last_seb = NULL;
-		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
-			int vol_type;
-
-			cond_resched();
-
-			last_seb = seb;
-
-			err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
-			if (err && err != UBI_IO_BITFLIPS) {
-				ubi_err("VID header is not OK (%d)", err);
-				if (err > 0)
-					err = -EIO;
-				return err;
-			}
-
-			vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
-				   UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
-			if (sv->vol_type != vol_type) {
-				ubi_err("bad vol_type");
-				goto bad_vid_hdr;
-			}
-
-			if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
-				ubi_err("bad sqnum %llu", seb->sqnum);
-				goto bad_vid_hdr;
-			}
-
-			if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
-				ubi_err("bad vol_id %d", sv->vol_id);
-				goto bad_vid_hdr;
-			}
-
-			if (sv->compat != vidh->compat) {
-				ubi_err("bad compat %d", vidh->compat);
-				goto bad_vid_hdr;
-			}
-
-			if (seb->lnum != be32_to_cpu(vidh->lnum)) {
-				ubi_err("bad lnum %d", seb->lnum);
-				goto bad_vid_hdr;
-			}
-
-			if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
-				ubi_err("bad used_ebs %d", sv->used_ebs);
-				goto bad_vid_hdr;
-			}
-
-			if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
-				ubi_err("bad data_pad %d", sv->data_pad);
-				goto bad_vid_hdr;
-			}
-
-			if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
-				ubi_err("bad leb_ver %u", seb->leb_ver);
-				goto bad_vid_hdr;
-			}
-		}
-
-		if (!last_seb)
-			continue;
-
-		if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
-			ubi_err("bad highest_lnum %d", sv->highest_lnum);
-			goto bad_vid_hdr;
-		}
-
-		if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
-			ubi_err("bad last_data_size %d", sv->last_data_size);
-			goto bad_vid_hdr;
-		}
-	}
-
-	/*
-	 * Make sure that all the physical eraseblocks are in one of the lists
-	 * or trees.
-	 */
-	buf = kzalloc(ubi->peb_count, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
-		err = ubi_io_is_bad(ubi, pnum);
-		if (err < 0) {
-			kfree(buf);
-			return err;
-		}
-		else if (err)
-			buf[pnum] = 1;
-	}
-
-	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
-		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
-			buf[seb->pnum] = 1;
-
-	list_for_each_entry(seb, &si->free, u.list)
-		buf[seb->pnum] = 1;
-
-	list_for_each_entry(seb, &si->corr, u.list)
-		buf[seb->pnum] = 1;
-
-	list_for_each_entry(seb, &si->erase, u.list)
-		buf[seb->pnum] = 1;
-
-	list_for_each_entry(seb, &si->alien, u.list)
-		buf[seb->pnum] = 1;
-
-	err = 0;
-	for (pnum = 0; pnum < ubi->peb_count; pnum++)
-		if (!buf[pnum]) {
-			ubi_err("PEB %d is not referred", pnum);
-			err = 1;
-		}
-
-	kfree(buf);
-	if (err)
-		goto out;
-	return 0;
-
-bad_seb:
-	ubi_err("bad scanning information about LEB %d", seb->lnum);
-	ubi_dbg_dump_seb(seb, 0);
-	ubi_dbg_dump_sv(sv);
-	goto out;
-
-bad_sv:
-	ubi_err("bad scanning information about volume %d", sv->vol_id);
-	ubi_dbg_dump_sv(sv);
-	goto out;
-
-bad_vid_hdr:
-	ubi_err("bad scanning information about volume %d", sv->vol_id);
-	ubi_dbg_dump_sv(sv);
-	ubi_dbg_dump_vid_hdr(vidh);
-
-out:
-	ubi_dbg_dump_stack();
-	return 1;
-}
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644
index 252b1f1..0000000
--- a/drivers/mtd/ubi/scan.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * SPDX-License-Identifier:	GPL-2.0+
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- * @leb_ver: logical eraseblock version (obsolete)
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
-	int ec;
-	int pnum;
-	int lnum;
-	int scrub;
-	unsigned long long sqnum;
-	union {
-		struct rb_node rb;
-		struct list_head list;
-	} u;
-	uint32_t leb_ver;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- * static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- * volume (always equivalent to the usable logical eraseblock size in case of
- * dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- * are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- * volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
-	int vol_id;
-	int highest_lnum;
-	int leb_count;
-	int vol_type;
-	int used_ebs;
-	int last_data_size;
-	int data_pad;
-	int compat;
-	struct rb_node rb;
-	struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- * @bad_peb_count: count of bad physical eraseblocks
- * those belonging to "preserve"-compatible internal volumes)
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @alien_peb_count: count of physical eraseblocks in the @alien list
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI units to build final UBI data structures, further error-recovery and so
- * on.
- */
-struct ubi_scan_info {
-	struct rb_root volumes;
-	struct list_head corr;
-	struct list_head free;
-	struct list_head erase;
-	struct list_head alien;
-	int bad_peb_count;
-	int vols_found;
-	int highest_vol_id;
-	int alien_peb_count;
-	int is_empty;
-	int min_ec;
-	int max_ec;
-	unsigned long long max_sqnum;
-	int mean_ec;
-	uint64_t ec_sum;
-	int ec_count;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a
- * list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock infprmation
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
-					 struct ubi_scan_leb *seb,
-					 struct list_head *list)
-{
-		rb_erase(&seb->u.rb, &sv->root);
-		list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
-		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
-		      int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
-					 int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
-				       int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
-					   struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
-		       int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 9012326..2809805 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -86,10 +86,11 @@
  * Compatibility constants used by internal volumes.
  *
  * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
- * to the flash
+ *                     to the flash
  * @UBI_COMPAT_RO: attach this device in read-only mode
  * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
- * physical eraseblocks, don't allow the wear-leveling unit to move them
+ *                       physical eraseblocks, don't allow the wear-leveling
+ *                       sub-system to move them
  * @UBI_COMPAT_REJECT: reject this UBI image
  */
 enum {
@@ -111,18 +112,19 @@
  * struct ubi_ec_hdr - UBI erase counter header.
  * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
  * @version: version of UBI implementation which is supposed to accept this
- * UBI image
+ *           UBI image
  * @padding1: reserved for future, zeroes
  * @ec: the erase counter
  * @vid_hdr_offset: where the VID header starts
  * @data_offset: where the user data start
+ * @image_seq: image sequence number
  * @padding2: reserved for future, zeroes
  * @hdr_crc: erase counter header CRC checksum
  *
  * The erase counter header takes 64 bytes and has a plenty of unused space for
  * future usage. The unused fields are zeroed. The @version field is used to
  * indicate the version of UBI implementation which is supposed to be able to
- * work with this UBI image. If @version is greater then the current UBI
+ * work with this UBI image. If @version is greater than the current UBI
  * version, the image is rejected. This may be useful in future if something
  * is changed radically. This field is duplicated in the volume identifier
  * header.
@@ -131,6 +133,14 @@
  * volume identifier header and user data, relative to the beginning of the
  * physical eraseblock. These values have to be the same for all physical
  * eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when attaching the flash.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when attaching the media.
  */
 struct ubi_ec_hdr {
 	__be32  magic;
@@ -139,32 +149,32 @@
 	__be64  ec; /* Warning: the current limit is 31-bit anyway! */
 	__be32  vid_hdr_offset;
 	__be32  data_offset;
-	__u8    padding2[36];
+	__be32  image_seq;
+	__u8    padding2[32];
 	__be32  hdr_crc;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct ubi_vid_hdr - on-flash UBI volume identifier header.
  * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
  * @version: UBI implementation version which is supposed to accept this UBI
- * image (%UBI_VERSION)
+ *           image (%UBI_VERSION)
  * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
  * @copy_flag: if this logical eraseblock was copied from another physical
- * eraseblock (for wear-leveling reasons)
+ *             eraseblock (for wear-leveling reasons)
  * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
- * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ *          %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
  * @vol_id: ID of this volume
  * @lnum: logical eraseblock number
- * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be
- * removed, kept only for not breaking older UBI users)
+ * @padding1: reserved for future, zeroes
  * @data_size: how many bytes of data this logical eraseblock contains
  * @used_ebs: total number of used logical eraseblocks in this volume
  * @data_pad: how many bytes at the end of this physical eraseblock are not
- * used
+ *            used
  * @data_crc: CRC checksum of the data stored in this logical eraseblock
- * @padding1: reserved for future, zeroes
- * @sqnum: sequence number
  * @padding2: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding3: reserved for future, zeroes
  * @hdr_crc: volume identifier header CRC checksum
  *
  * The @sqnum is the value of the global sequence counter at the time when this
@@ -175,7 +185,7 @@
  * (sequence number) is used to distinguish between older and newer versions of
  * logical eraseblocks.
  *
- * There are 2 situations when there may be more then one physical eraseblock
+ * There are 2 situations when there may be more than one physical eraseblock
  * corresponding to the same logical eraseblock, i.e., having the same @vol_id
  * and @lnum values in the volume identifier header. Suppose we have a logical
  * eraseblock L and it is mapped to the physical eraseblock P.
@@ -212,10 +222,6 @@
  * checksum is correct, this physical eraseblock is selected (P1). Otherwise
  * the older one (P) is selected.
  *
- * Note, there is an obsolete @leb_ver field which was used instead of @sqnum
- * in the past. But it is not used anymore and we keep it in order to be able
- * to deal with old UBI images. It will be removed at some point.
- *
  * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
  * Internal volumes are not seen from outside and are used for various internal
  * UBI purposes. In this implementation there is only one internal volume - the
@@ -236,9 +242,9 @@
  * The @data_crc field contains the CRC checksum of the contents of the logical
  * eraseblock if this is a static volume. In case of dynamic volumes, it does
  * not contain the CRC checksum as a rule. The only exception is when the
- * data of the physical eraseblock was moved by the wear-leveling unit, then
- * the wear-leveling unit calculates the data CRC and stores it in the
- * @data_crc field. And of course, the @copy_flag is %in this case.
+ * data of the physical eraseblock was moved by the wear-leveling sub-system,
+ * then the wear-leveling sub-system calculates the data CRC and stores it in
+ * the @data_crc field. And of course, the @copy_flag is %in this case.
  *
  * The @data_size field is used only for static volumes because UBI has to know
  * how many bytes of data are stored in this eraseblock. For dynamic volumes,
@@ -265,23 +271,23 @@
 	__u8    compat;
 	__be32  vol_id;
 	__be32  lnum;
-	__be32  leb_ver; /* obsolete, to be removed, don't use */
+	__u8    padding1[4];
 	__be32  data_size;
 	__be32  used_ebs;
 	__be32  data_pad;
 	__be32  data_crc;
-	__u8    padding1[4];
+	__u8    padding2[4];
 	__be64  sqnum;
-	__u8    padding2[12];
+	__u8    padding3[12];
 	__be32  hdr_crc;
-} __attribute__ ((packed));
+} __packed;
 
 /* Internal UBI volumes count */
 #define UBI_INT_VOL_COUNT 1
 
 /*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
  */
 #define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
 
@@ -351,10 +357,151 @@
 	__u8    vol_type;
 	__u8    upd_marker;
 	__be16  name_len;
+#ifndef __UBOOT__
 	__u8    name[UBI_VOL_NAME_MAX+1];
+#else
+	char    name[UBI_VOL_NAME_MAX+1];
+#endif
 	__u8    flags;
 	__u8    padding[23];
 	__be32  crc;
-} __attribute__ ((packed));
+} __packed;
 
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID	(UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID	(UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION	1
+
+#define UBI_FM_SB_MAGIC		0x7B11D69F
+#define UBI_FM_HDR_MAGIC	0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC	0xFA370ED1
+#define UBI_FM_POOL_MAGIC	0x67AF4D08
+#define UBI_FM_EBA_MAGIC	0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START	64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS	32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE	8
+#define UBI_FM_MAX_POOL_SIZE	256
+
+#define UBI_FM_WL_POOL_SIZE	25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+	__be32 magic;
+	__u8 version;
+	__u8 padding1[3];
+	__be32 data_crc;
+	__be32 used_blocks;
+	__be32 block_loc[UBI_FM_MAX_BLOCKS];
+	__be32 block_ec[UBI_FM_MAX_BLOCKS];
+	__be64 sqnum;
+	__u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+	__be32 magic;
+	__be32 free_peb_count;
+	__be32 used_peb_count;
+	__be32 scrub_peb_count;
+	__be32 bad_peb_count;
+	__be32 erase_peb_count;
+	__be32 vol_count;
+	__u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+	__be32 magic;
+	__be16 size;
+	__be16 max_size;
+	__be32 pebs[UBI_FM_MAX_POOL_SIZE];
+	__be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+	__be32 pnum;
+	__be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+	__be32 magic;
+	__be32 vol_id;
+	__u8 vol_type;
+	__u8 padding1[3];
+	__be32 data_pad;
+	__be32 used_ebs;
+	__be32 last_eb_bytes;
+	__u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+	__be32 magic;
+	__be32 reserved_pebs;
+	__be32 pnum[0];
+} __packed;
 #endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f4f7165..1c39573 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -10,7 +10,8 @@
 #ifndef __UBI_UBI_H__
 #define __UBI_UBI_H__
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/list.h>
@@ -23,22 +24,18 @@
 #include <linux/fs.h>
 #include <linux/cdev.h>
 #include <linux/device.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/ubi.h>
+#include <linux/notifier.h>
+#include <asm/pgtable.h>
+#else
+#include <ubi_uboot.h>
 #endif
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include <linux/string.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/ubi.h>
-
 #include "ubi-media.h"
-#include "scan.h"
-#include "debug.h"
+#include <mtd/ubi-user.h>
 
 /* Maximum number of supported UBI devices */
 #define UBI_MAX_DEVICES 32
@@ -52,20 +49,21 @@
 #else
 #define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
 #endif
-/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
-				  __func__, ##__VA_ARGS__)
-/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
-				 __func__, ##__VA_ARGS__)
 
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
+/* UBI warning messages */
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n",  \
+				   __func__, ##__VA_ARGS__)
+/* UBI error messages */
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n",      \
+				 __func__, ##__VA_ARGS__)
 
 /* Background thread name pattern */
 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
 
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
 #define UBI_LEB_UNMAPPED -1
 
 /*
@@ -75,37 +73,98 @@
 #define UBI_IO_RETRIES 3
 
 /*
- * Error codes returned by the I/O unit.
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN  (3 + 2 + 1)
+
+/*
+ * Error codes returned by the I/O sub-system.
  *
- * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
- * 0xFF bytes
- * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a
- * valid erase counter header, and the rest are %0xFF bytes
- * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC)
- * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or
- * CRC)
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ *                     integrity error reported by the MTD driver
+ *                     (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ *                         data integrity error reported by the MTD driver
+ *                         (uncorrectable ECC error in case of NAND)
  * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
  */
 enum {
-	UBI_IO_PEB_EMPTY = 1,
-	UBI_IO_PEB_FREE,
-	UBI_IO_BAD_EC_HDR,
-	UBI_IO_BAD_VID_HDR,
-	UBI_IO_BITFLIPS
+	UBI_IO_FF = 1,
+	UBI_IO_FF_BITFLIPS,
+	UBI_IO_BAD_HDR,
+	UBI_IO_BAD_HDR_EBADMSG,
+	UBI_IO_BITFLIPS,
+};
+
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ *                   PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ *                     PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ *                     PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ *                     PEB
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
+ *                       target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
+ */
+enum {
+	MOVE_CANCEL_RACE = 1,
+	MOVE_SOURCE_RD_ERR,
+	MOVE_TARGET_RD_ERR,
+	MOVE_TARGET_WR_ERR,
+	MOVE_TARGET_BITFLIPS,
+	MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+	UBI_NO_FASTMAP = 1,
+	UBI_BAD_FASTMAP,
 };
 
 /**
  * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
  * @ec: erase counter
  * @pnum: physical eraseblock number
  *
- * This data structure is used in the WL unit. Each physical eraseblock has a
- * corresponding &struct wl_entry object which may be kept in different
- * RB-trees. See WL unit for details.
+ * This data structure is used in the WL sub-system. Each physical eraseblock
+ * has a corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL sub-system for details.
  */
 struct ubi_wl_entry {
-	struct rb_node rb;
+	union {
+		struct rb_node rb;
+		struct list_head list;
+	} u;
 	int ec;
 	int pnum;
 };
@@ -119,10 +178,10 @@
  * @mutex: read/write mutex to implement read/write access serialization to
  *         the (@vol_id, @lnum) logical eraseblock
  *
- * This data structure is used in the EBA unit to implement per-LEB locking.
- * When a logical eraseblock is being locked - corresponding
+ * This data structure is used in the EBA sub-system to implement per-LEB
+ * locking. When a logical eraseblock is being locked - corresponding
  * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
- * See EBA unit for details.
+ * See EBA sub-system for details.
  */
 struct ubi_ltree_entry {
 	struct rb_node rb;
@@ -132,9 +191,65 @@
 	struct rw_semaphore mutex;
 };
 
+/**
+ * struct ubi_rename_entry - volume re-name description data structure.
+ * @new_name_len: new volume name length
+ * @new_name: new volume name
+ * @remove: if not zero, this volume should be removed, not re-named
+ * @desc: descriptor of the volume
+ * @list: links re-name entries into a list
+ *
+ * This data structure is utilized in the multiple volume re-name code. Namely,
+ * UBI first creates a list of &struct ubi_rename_entry objects from the
+ * &struct ubi_rnvol_req request object, and then utilizes this list to do all
+ * the job.
+ */
+struct ubi_rename_entry {
+	int new_name_len;
+	char new_name[UBI_VOL_NAME_MAX + 1];
+	int remove;
+	struct ubi_volume_desc *desc;
+	struct list_head list;
+};
+
 struct ubi_volume_desc;
 
 /**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+	struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+	int to_be_tortured[UBI_FM_MAX_BLOCKS];
+	int used_blocks;
+	int max_pool_size;
+	int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+	int pebs[UBI_FM_MAX_POOL_SIZE];
+	int used;
+	int size;
+	int max_size;
+};
+
+/**
  * struct ubi_volume - UBI volume description data structure.
  * @dev: device object to make use of the the Linux device model
  * @cdev: character device object to create character device
@@ -160,8 +275,6 @@
  * @upd_ebs: how many eraseblocks are expected to be updated
  * @ch_lnum: LEB number which is being changing by the atomic LEB change
  *           operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- *            change operation
  * @upd_bytes: how many bytes are expected to be received for volume update or
  *             atomic LEB change
  * @upd_received: how many bytes were already received for volume update or
@@ -175,10 +288,7 @@
  * @upd_marker: %1 if the update marker is set for this volume
  * @updating: %1 if the volume is being updated
  * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
- *
- * @gluebi_desc: gluebi UBI volume descriptor
- * @gluebi_refcount: reference count of the gluebi MTD device
- * @gluebi_mtd: MTD device description object of the gluebi MTD device
+ * @direct_writes: %1 if direct writes are enabled for this volume
  *
  * The @corrupted field indicates that the volume's contents is corrupted.
  * Since UBI protects only static volumes, this field is not relevant to
@@ -202,16 +312,19 @@
 	int vol_type;
 	int usable_leb_size;
 	int used_ebs;
+#ifndef __UBOOT__
 	int last_eb_bytes;
+#else
+	u32 last_eb_bytes;
+#endif
 	long long used_bytes;
 	int alignment;
 	int data_pad;
 	int name_len;
-	char name[UBI_VOL_NAME_MAX+1];
+	char name[UBI_VOL_NAME_MAX + 1];
 
 	int upd_ebs;
 	int ch_lnum;
-	int ch_dtype;
 	long long upd_bytes;
 	long long upd_received;
 	void *upd_buf;
@@ -222,22 +335,11 @@
 	unsigned int upd_marker:1;
 	unsigned int updating:1;
 	unsigned int changing_leb:1;
-
-#ifdef CONFIG_MTD_UBI_GLUEBI
-	/*
-	 * Gluebi-related stuff may be compiled out.
-	 * TODO: this should not be built into UBI but should be a separate
-	 * ubimtd driver which works on top of UBI and emulates MTD devices.
-	 */
-	struct ubi_volume_desc *gluebi_desc;
-	int gluebi_refcount;
-	struct mtd_info gluebi_mtd;
-#endif
+	unsigned int direct_writes:1;
 };
 
 /**
- * struct ubi_volume_desc - descriptor of the UBI volume returned when it is
- * opened.
+ * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
  * @vol: reference to the corresponding volume description object
  * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
  */
@@ -249,6 +351,37 @@
 struct ubi_wl_entry;
 
 /**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+	unsigned int chk_gen:1;
+	unsigned int chk_io:1;
+	unsigned int disable_bgt:1;
+	unsigned int emulate_bitflips:1;
+	unsigned int emulate_io_failures:1;
+	char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+	struct dentry *dfs_dir;
+	struct dentry *dfs_chk_gen;
+	struct dentry *dfs_chk_io;
+	struct dentry *dfs_disable_bgt;
+	struct dentry *dfs_emulate_bitflips;
+	struct dentry *dfs_emulate_io_failures;
+};
+
+/**
  * struct ubi_device - UBI device description structure
  * @dev: UBI device object to use the the Linux device model
  * @cdev: character device object to create character device
@@ -261,6 +394,7 @@
  *                @vol->readers, @vol->writers, @vol->exclusive,
  *                @vol->ref_count, @vol->mapping and @vol->eba_tbl.
  * @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
  *
  * @rsvd_pebs: count of reserved physical eraseblocks
  * @avail_pebs: count of available physical eraseblocks
@@ -269,12 +403,13 @@
  * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
  *
  * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
- *                     of UBI ititializetion
+ *                     of UBI initialization
  * @vtbl_slots: how many slots are available in the volume table
  * @vtbl_size: size of the volume table in bytes
  * @vtbl: in-RAM volume table copy
- * @volumes_mutex: protects on-flash volume table and serializes volume
- *                 changes, like creation, deletion, update, resize
+ * @device_mutex: protects on-flash volume table and serializes volume
+ *                creation, deletion, update, re-size, re-name and set
+ *                property
  *
  * @max_ec: current highest erase counter value
  * @mean_ec: current mean erase counter value
@@ -284,20 +419,33 @@
  * @ltree: the lock tree
  * @alc_mutex: serializes "atomic LEB change" operations
  *
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ *		sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
  * @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
  * @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
  * @scrub: RB-tree of physical eraseblocks which need scrubbing
- * @prot: protection trees
- * @prot.pnum: protection tree indexed by physical eraseblock numbers
- * @prot.aec: protection tree indexed by absolute erase counter value
- * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- *           @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- *           fields
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ *      protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ *	     @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ *	     @erroneous, and @erroneous_peb_count fields
  * @move_mutex: serializes eraseblock moves
+ * @work_sem: synchronizes the WL worker with use tasks
  * @wl_scheduled: non-zero if the wear-leveling was scheduled
  * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
  *             physical eraseblock
- * @abs_ec: absolute erase counter
  * @move_from: physical eraseblock from where the data is being moved
  * @move_to: physical eraseblock where the data is being moved to
  * @move_to_put: if the "to" PEB was put
@@ -310,30 +458,38 @@
  * @flash_size: underlying MTD device size (in bytes)
  * @peb_count: count of physical eraseblocks on the MTD device
  * @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
  * @bad_peb_count: count of bad physical eraseblocks
  * @good_peb_count: count of good physical eraseblocks
+ * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
+ *                  used by UBI)
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
  * @min_io_size: minimal input/output unit size of the underlying MTD device
  * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
  * @ro_mode: if the UBI device is in read-only mode
  * @leb_size: logical eraseblock size
  * @leb_start: starting offset of logical eraseblocks within physical
- * eraseblocks
+ *             eraseblocks
  * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
  * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
  * @vid_hdr_offset: starting offset of the volume identifier header (might be
- * unaligned)
+ *                  unaligned)
  * @vid_hdr_aloffset: starting offset of the VID header aligned to
  * @hdrs_min_io_size
  * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
  * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
  *               not
+ * @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ *                  time (MTD write buffer size)
  * @mtd: MTD device descriptor
  *
- * @peb_buf1: a buffer of PEB size used for different purposes
- * @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: proptects @peb_buf1 and @peb_buf2
- * @dbg_peb_buf: buffer of PEB size used for debugging
- * @dbg_buf_mutex: proptects @dbg_peb_buf
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
+ * @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
  */
 struct ubi_device {
 	struct cdev cdev;
@@ -344,42 +500,56 @@
 	struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
 	spinlock_t volumes_lock;
 	int ref_count;
+	int image_seq;
 
 	int rsvd_pebs;
 	int avail_pebs;
 	int beb_rsvd_pebs;
 	int beb_rsvd_level;
+	int bad_peb_limit;
 
 	int autoresize_vol_id;
 	int vtbl_slots;
 	int vtbl_size;
 	struct ubi_vtbl_record *vtbl;
-	struct mutex volumes_mutex;
+	struct mutex device_mutex;
 
 	int max_ec;
-	/* TODO: mean_ec is not updated run-time, fix */
+	/* Note, mean_ec is not updated run-time - should be fixed */
 	int mean_ec;
 
-	/* EBA unit's stuff */
+	/* EBA sub-system's stuff */
 	unsigned long long global_sqnum;
 	spinlock_t ltree_lock;
 	struct rb_root ltree;
 	struct mutex alc_mutex;
 
-	/* Wear-leveling unit's stuff */
+	/* Fastmap stuff */
+	int fm_disabled;
+	struct ubi_fastmap_layout *fm;
+	struct ubi_fm_pool fm_pool;
+	struct ubi_fm_pool fm_wl_pool;
+	struct rw_semaphore fm_sem;
+	struct mutex fm_mutex;
+	void *fm_buf;
+	size_t fm_size;
+#ifndef __UBOOT__
+	struct work_struct fm_work;
+#endif
+
+	/* Wear-leveling sub-system's stuff */
 	struct rb_root used;
+	struct rb_root erroneous;
 	struct rb_root free;
+	int free_count;
 	struct rb_root scrub;
-	struct {
-		struct rb_root pnum;
-		struct rb_root aec;
-	} prot;
+	struct list_head pq[UBI_PROT_QUEUE_LEN];
+	int pq_head;
 	spinlock_t wl_lock;
 	struct mutex move_mutex;
 	struct rw_semaphore work_sem;
 	int wl_scheduled;
 	struct ubi_wl_entry **lookuptbl;
-	unsigned long long abs_ec;
 	struct ubi_wl_entry *move_from;
 	struct ubi_wl_entry *move_to;
 	int move_to_put;
@@ -389,12 +559,15 @@
 	int thread_enabled;
 	char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
 
-	/* I/O unit's stuff */
+	/* I/O sub-system's stuff */
 	long long flash_size;
 	int peb_count;
 	int peb_size;
 	int bad_peb_count;
 	int good_peb_count;
+	int corr_peb_count;
+	int erroneous_peb_count;
+	int max_erroneous;
 	int min_io_size;
 	int hdrs_min_io_size;
 	int ro_mode;
@@ -405,35 +578,195 @@
 	int vid_hdr_offset;
 	int vid_hdr_aloffset;
 	int vid_hdr_shift;
-	int bad_allowed;
+	unsigned int bad_allowed:1;
+	unsigned int nor_flash:1;
+	int max_write_size;
 	struct mtd_info *mtd;
 
-	void *peb_buf1;
-	void *peb_buf2;
+	void *peb_buf;
 	struct mutex buf_mutex;
 	struct mutex ckvol_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG
-	void *dbg_peb_buf;
-	struct mutex dbg_buf_mutex;
-#endif
+
+	struct ubi_debug_info dbg;
 };
 
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+	int ec;
+	int pnum;
+	int vol_id;
+	int lnum;
+	unsigned int scrub:1;
+	unsigned int copy_flag:1;
+	unsigned long long sqnum;
+	union {
+		struct rb_node rb;
+		struct list_head list;
+	} u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ *            static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ *                  volume (always equivalent to the usable logical eraseblock
+ *                  size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ *            are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ *        volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+	int vol_id;
+	int highest_lnum;
+	int leb_count;
+	int vol_type;
+	int used_ebs;
+	int last_data_size;
+	int data_pad;
+	int compat;
+	struct rb_node rb;
+	struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ *         those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ *                   0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ *                       as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+	struct rb_root volumes;
+	struct list_head corr;
+	struct list_head free;
+	struct list_head erase;
+	struct list_head alien;
+	int corr_peb_count;
+	int empty_peb_count;
+	int alien_peb_count;
+	int bad_peb_count;
+	int maybe_bad_peb_count;
+	int vols_found;
+	int highest_vol_id;
+	int is_empty;
+	int min_ec;
+	int max_ec;
+	unsigned long long max_sqnum;
+	int mean_ec;
+	uint64_t ec_sum;
+	int ec_count;
+	struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+	struct list_head list;
+	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+	/* The below fields are only relevant to erasure works */
+	struct ubi_wl_entry *e;
+	int vol_id;
+	int lnum;
+	int torture;
+	int anchor;
+};
+
+#include "debug.h"
+
 extern struct kmem_cache *ubi_wl_entry_slab;
-extern struct file_operations ubi_ctrl_cdev_operations;
-extern struct file_operations ubi_cdev_operations;
-extern struct file_operations ubi_vol_cdev_operations;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
 extern struct class *ubi_class;
 extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
+
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+		  int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+				    int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+				       struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
 
 /* vtbl.c */
 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
 			   struct ubi_vtbl_record *vtbl_rec);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+			    struct list_head *rename_list);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
 
 /* vmt.c */
 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
-int ubi_remove_volume(struct ubi_volume_desc *desc);
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
 int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
 void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
 
@@ -448,9 +781,12 @@
 			     const void __user *buf, int count);
 
 /* misc.c */
-int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+		      int length);
 int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
 void ubi_calculate_reserved(struct ubi_device *ubi);
+int ubi_check_pattern(const void *buf, uint8_t patt, int size);
 
 /* gluebi.c */
 #ifdef CONFIG_MTD_UBI_GLUEBI
@@ -474,25 +810,33 @@
 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		     void *buf, int offset, int len, int check);
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
-		      const void *buf, int offset, int len, int dtype);
+		      const void *buf, int offset, int len);
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
-			 int lnum, const void *buf, int len, int dtype,
-			 int used_ebs);
+			 int lnum, const void *buf, int len, int used_ebs);
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
-			      int lnum, const void *buf, int len, int dtype);
+			      int lnum, const void *buf, int len);
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		     struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
-void ubi_eba_close(const struct ubi_device *ubi);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+		   struct ubi_attach_info *ai_scan);
 
 /* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+		   int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
 void ubi_wl_close(struct ubi_device *ubi);
 int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+		      int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
 
 /* io.c */
 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -512,16 +856,37 @@
 			 struct ubi_vid_hdr *vid_hdr);
 
 /* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+		       int vid_hdr_offset, int max_beb_per1024);
 int ubi_detach_mtd_dev(int ubi_num, int anyway);
 struct ubi_device *ubi_get_device(int ubi_num);
 void ubi_put_device(struct ubi_device *ubi);
 struct ubi_device *ubi_get_by_major(int major);
 int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+		      int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+		   struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+			    struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+		      int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		     int fm_anchor);
 
 /*
  * ubi_rb_for_each_entry - walk an RB-tree.
- * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
  * @pos: a pointer to RB-tree entry type to use as a loop counter
  * @root: RB-tree's root
  * @member: the name of the 'struct rb_node' within the RB-tree entry
@@ -530,7 +895,23 @@
 	for (rb = rb_first(root),                                            \
 	     pos = (rb ? container_of(rb, typeof(*pos), member) : NULL);     \
 	     rb;                                                             \
-	     rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member))
+	     rb = rb_next(rb),                                               \
+	     pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+					 struct ubi_ainf_peb *aeb,
+					 struct list_head *list)
+{
+		rb_erase(&aeb->u.rb, &av->root);
+		list_add_tail(&aeb->u.list, list);
+}
 
 /**
  * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
@@ -606,6 +987,7 @@
 	if (!ubi->ro_mode) {
 		ubi->ro_mode = 1;
 		ubi_warn("switch to read-only mode");
+		dump_stack();
 	}
 }
 
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index e597f82..220c120 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -26,13 +26,16 @@
  * transaction with a roll-back capability.
  */
 
-#ifdef UBI_LINUX
-#include <linux/err.h>
-#include <asm/uaccess.h>
-#include <asm/div64.h>
-#endif
-
+#define __UBOOT__
+#ifndef __UBOOT__
+#include <linux/uaccess.h>
+#else
+#include <div64.h>
 #include <ubi_uboot.h>
+#endif
+#include <linux/err.h>
+#include <linux/math64.h>
+
 #include "ubi.h"
 
 /**
@@ -48,22 +51,21 @@
 	int err;
 	struct ubi_vtbl_record vtbl_rec;
 
-	dbg_msg("set update marker for volume %d", vol->vol_id);
+	dbg_gen("set update marker for volume %d", vol->vol_id);
 
 	if (vol->upd_marker) {
 		ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
-		dbg_msg("already set");
+		dbg_gen("already set");
 		return 0;
 	}
 
-	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
-	       sizeof(struct ubi_vtbl_record));
+	vtbl_rec = ubi->vtbl[vol->vol_id];
 	vtbl_rec.upd_marker = 1;
 
-	mutex_lock(&ubi->volumes_mutex);
+	mutex_lock(&ubi->device_mutex);
 	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
-	mutex_unlock(&ubi->volumes_mutex);
 	vol->upd_marker = 1;
+	mutex_unlock(&ubi->device_mutex);
 	return err;
 }
 
@@ -81,31 +83,29 @@
 			       long long bytes)
 {
 	int err;
-	uint64_t tmp;
 	struct ubi_vtbl_record vtbl_rec;
 
-	dbg_msg("clear update marker for volume %d", vol->vol_id);
+	dbg_gen("clear update marker for volume %d", vol->vol_id);
 
-	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
-	       sizeof(struct ubi_vtbl_record));
+	vtbl_rec = ubi->vtbl[vol->vol_id];
 	ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
 	vtbl_rec.upd_marker = 0;
 
 	if (vol->vol_type == UBI_STATIC_VOLUME) {
 		vol->corrupted = 0;
-		vol->used_bytes = tmp = bytes;
-		vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size);
-		vol->used_ebs = tmp;
+		vol->used_bytes = bytes;
+		vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+					    &vol->last_eb_bytes);
 		if (vol->last_eb_bytes)
 			vol->used_ebs += 1;
 		else
 			vol->last_eb_bytes = vol->usable_leb_size;
 	}
 
-	mutex_lock(&ubi->volumes_mutex);
+	mutex_lock(&ubi->device_mutex);
 	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
-	mutex_unlock(&ubi->volumes_mutex);
 	vol->upd_marker = 0;
+	mutex_unlock(&ubi->device_mutex);
 	return err;
 }
 
@@ -123,9 +123,8 @@
 		     long long bytes)
 {
 	int i, err;
-	uint64_t tmp;
 
-	dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+	dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
 	ubi_assert(!vol->updating && !vol->changing_leb);
 	vol->updating = 1;
 
@@ -141,21 +140,23 @@
 	}
 
 	if (bytes == 0) {
+		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+		if (err)
+			return err;
+
 		err = clear_update_marker(ubi, vol, 0);
 		if (err)
 			return err;
-		err = ubi_wl_flush(ubi);
-		if (!err)
-			vol->updating = 0;
+		vol->updating = 0;
+		return 0;
 	}
 
 	vol->upd_buf = vmalloc(ubi->leb_size);
 	if (!vol->upd_buf)
 		return -ENOMEM;
 
-	tmp = bytes;
-	vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size);
-	vol->upd_ebs += tmp;
+	vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+			       vol->usable_leb_size);
 	vol->upd_bytes = bytes;
 	vol->upd_received = 0;
 	return 0;
@@ -175,17 +176,15 @@
 {
 	ubi_assert(!vol->updating && !vol->changing_leb);
 
-	dbg_msg("start changing LEB %d:%d, %u bytes",
+	dbg_gen("start changing LEB %d:%d, %u bytes",
 		vol->vol_id, req->lnum, req->bytes);
 	if (req->bytes == 0)
-		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
-						 req->dtype);
+		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
 
 	vol->upd_bytes = req->bytes;
 	vol->upd_received = 0;
 	vol->changing_leb = 1;
 	vol->ch_lnum = req->lnum;
-	vol->ch_dtype = req->dtype;
 
 	vol->upd_buf = vmalloc(req->bytes);
 	if (!vol->upd_buf)
@@ -234,11 +233,11 @@
 		memset(buf + len, 0xFF, l - len);
 		len = ubi_calc_data_len(ubi, buf, l);
 		if (len == 0) {
-			dbg_msg("all %d bytes contain 0xFF - skip", len);
+			dbg_gen("all %d bytes contain 0xFF - skip", len);
 			return 0;
 		}
 
-		err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
+		err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
 	} else {
 		/*
 		 * When writing static volume, and this is the last logical
@@ -250,8 +249,7 @@
 		 * contain zeros, not random trash.
 		 */
 		memset(buf + len, 0, vol->usable_leb_size - len);
-		err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
-					   UBI_UNKNOWN, used_ebs);
+		err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
 	}
 
 	return err;
@@ -259,6 +257,7 @@
 
 /**
  * ubi_more_update_data - write more update data.
+ * @ubi: UBI device description object
  * @vol: volume description object
  * @buf: write data (user-space memory buffer)
  * @count: how much bytes to write
@@ -272,19 +271,20 @@
 int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
 			 const void __user *buf, int count)
 {
-	uint64_t tmp;
+#ifndef __UBOOT__
 	int lnum, offs, err = 0, len, to_write = count;
+#else
+	int lnum, err = 0, len, to_write = count;
+	u32 offs;
+#endif
 
-	dbg_msg("write %d of %lld bytes, %lld already passed",
+	dbg_gen("write %d of %lld bytes, %lld already passed",
 		count, vol->upd_bytes, vol->upd_received);
 
 	if (ubi->ro_mode)
 		return -EROFS;
 
-	tmp = vol->upd_received;
-	offs = do_div(tmp, vol->usable_leb_size);
-	lnum = tmp;
-
+	lnum = div_u64_rem(vol->upd_received,  vol->usable_leb_size, &offs);
 	if (vol->upd_received + count > vol->upd_bytes)
 		to_write = count = vol->upd_bytes - vol->upd_received;
 
@@ -359,16 +359,16 @@
 
 	ubi_assert(vol->upd_received <= vol->upd_bytes);
 	if (vol->upd_received == vol->upd_bytes) {
+		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+		if (err)
+			return err;
 		/* The update is finished, clear the update marker */
 		err = clear_update_marker(ubi, vol, vol->upd_bytes);
 		if (err)
 			return err;
-		err = ubi_wl_flush(ubi);
-		if (err == 0) {
-			vol->updating = 0;
-			err = to_write;
-			vfree(vol->upd_buf);
-		}
+		vol->updating = 0;
+		err = to_write;
+		vfree(vol->upd_buf);
 	}
 
 	return err;
@@ -376,6 +376,7 @@
 
 /**
  * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @ubi: UBI device description object
  * @vol: volume description object
  * @buf: write data (user-space memory buffer)
  * @count: how much bytes to write
@@ -392,7 +393,7 @@
 {
 	int err;
 
-	dbg_msg("write %d of %lld bytes, %lld already passed",
+	dbg_gen("write %d of %lld bytes, %lld already passed",
 		count, vol->upd_bytes, vol->upd_received);
 
 	if (ubi->ro_mode)
@@ -410,10 +411,11 @@
 	if (vol->upd_received == vol->upd_bytes) {
 		int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
 
-		memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
+		memset(vol->upd_buf + vol->upd_bytes, 0xFF,
+		       len - vol->upd_bytes);
 		len = ubi_calc_data_len(ubi, vol->upd_buf, len);
 		err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
-						vol->upd_buf, len, UBI_UNKNOWN);
+						vol->upd_buf, len);
 		if (err)
 			return err;
 	}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index c4e894b..d9665a4 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -11,21 +11,22 @@
  * resizing.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/err.h>
-#include <asm/div64.h>
-#endif
-
+#include <linux/slab.h>
+#include <linux/export.h>
+#else
+#include <div64.h>
 #include <ubi_uboot.h>
+#endif
+#include <linux/math64.h>
+
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static void paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi)
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
 
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 static ssize_t vol_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf);
 
@@ -121,10 +122,11 @@
 {
 	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
 
+	kfree(vol->eba_tbl);
 	kfree(vol);
 }
 
-#ifdef UBI_LINUX
+#ifndef __UBOOT__
 /**
  * volume_sysfs_init - initialize sysfs for new volume.
  * @ubi: UBI device description object
@@ -193,14 +195,13 @@
  * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
  * and saves it in @req->vol_id. Returns zero in case of success and a negative
  * error code in case of failure. Note, the caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
  */
 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 {
-	int i, err, vol_id = req->vol_id, dont_free = 0;
+	int i, err, vol_id = req->vol_id, do_free = 1;
 	struct ubi_volume *vol;
 	struct ubi_vtbl_record vtbl_rec;
-	uint64_t bytes;
 	dev_t dev;
 
 	if (ubi->ro_mode)
@@ -213,7 +214,7 @@
 	spin_lock(&ubi->volumes_lock);
 	if (vol_id == UBI_VOL_NUM_AUTO) {
 		/* Find unused volume ID */
-		dbg_msg("search for vacant volume ID");
+		dbg_gen("search for vacant volume ID");
 		for (i = 0; i < ubi->vtbl_slots; i++)
 			if (!ubi->volumes[i]) {
 				vol_id = i;
@@ -221,21 +222,21 @@
 			}
 
 		if (vol_id == UBI_VOL_NUM_AUTO) {
-			dbg_err("out of volume IDs");
+			ubi_err("out of volume IDs");
 			err = -ENFILE;
 			goto out_unlock;
 		}
 		req->vol_id = vol_id;
 	}
 
-	dbg_msg("volume ID %d, %llu bytes, type %d, name %s",
-		vol_id, (unsigned long long)req->bytes,
+	dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+		ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
 		(int)req->vol_type, req->name);
 
 	/* Ensure that this volume does not exist */
 	err = -EEXIST;
 	if (ubi->volumes[vol_id]) {
-		dbg_err("volume %d already exists", vol_id);
+		ubi_err("volume %d already exists", vol_id);
 		goto out_unlock;
 	}
 
@@ -244,20 +245,21 @@
 		if (ubi->volumes[i] &&
 		    ubi->volumes[i]->name_len == req->name_len &&
 		    !strcmp(ubi->volumes[i]->name, req->name)) {
-			dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+			ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
 			goto out_unlock;
 		}
 
 	/* Calculate how many eraseblocks are requested */
 	vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
-	bytes = req->bytes;
-	if (do_div(bytes, vol->usable_leb_size))
-		vol->reserved_pebs = 1;
-	vol->reserved_pebs += bytes;
+	vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
+				      vol->usable_leb_size);
 
 	/* Reserve physical eraseblocks */
 	if (vol->reserved_pebs > ubi->avail_pebs) {
-		dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+		ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
+		if (ubi->corr_peb_count)
+			ubi_err("%d PEBs are corrupted and not used",
+				ubi->corr_peb_count);
 		err = -ENOSPC;
 		goto out_unlock;
 	}
@@ -270,14 +272,14 @@
 	vol->data_pad  = ubi->leb_size % vol->alignment;
 	vol->vol_type  = req->vol_type;
 	vol->name_len  = req->name_len;
-	memcpy(vol->name, req->name, vol->name_len + 1);
+	memcpy(vol->name, req->name, vol->name_len);
 	vol->ubi = ubi;
 
 	/*
 	 * Finish all pending erases because there may be some LEBs belonging
 	 * to the same volume ID.
 	 */
-	err = ubi_wl_flush(ubi);
+	err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
 	if (err)
 		goto out_acc;
 
@@ -296,10 +298,10 @@
 		vol->used_bytes =
 			(long long)vol->used_ebs * vol->usable_leb_size;
 	} else {
-		bytes = vol->used_bytes;
-		vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
-		vol->used_ebs = bytes;
-		if (vol->last_eb_bytes)
+		vol->used_ebs = div_u64_rem(vol->used_bytes,
+					    vol->usable_leb_size,
+					    &vol->last_eb_bytes);
+		if (vol->last_eb_bytes != 0)
 			vol->used_ebs += 1;
 		else
 			vol->last_eb_bytes = vol->usable_leb_size;
@@ -315,20 +317,16 @@
 		goto out_mapping;
 	}
 
-	err = ubi_create_gluebi(ubi, vol);
-	if (err)
-		goto out_cdev;
-
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
 	vol->dev.devt = dev;
 	vol->dev.class = ubi_class;
 
-	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
 	if (err) {
 		ubi_err("cannot register device");
-		goto out_gluebi;
+		goto out_cdev;
 	}
 
 	err = volume_sysfs_init(ubi, vol);
@@ -345,7 +343,7 @@
 		vtbl_rec.vol_type = UBI_VID_DYNAMIC;
 	else
 		vtbl_rec.vol_type = UBI_VID_STATIC;
-	memcpy(vtbl_rec.name, vol->name, vol->name_len + 1);
+	memcpy(vtbl_rec.name, vol->name, vol->name_len);
 
 	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
 	if (err)
@@ -356,39 +354,37 @@
 	ubi->vol_count += 1;
 	spin_unlock(&ubi->volumes_lock);
 
-	paranoid_check_volumes(ubi);
-	return 0;
+	ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+	self_check_volumes(ubi);
+	return err;
 
 out_sysfs:
 	/*
-	 * We have registered our device, we should not free the volume*
+	 * We have registered our device, we should not free the volume
 	 * description object in this function in case of an error - it is
 	 * freed by the release function.
 	 *
 	 * Get device reference to prevent the release function from being
 	 * called just after sysfs has been closed.
 	 */
-	dont_free = 1;
+	do_free = 0;
 	get_device(&vol->dev);
 	volume_sysfs_close(vol);
-out_gluebi:
-	if (ubi_destroy_gluebi(vol))
-		dbg_err("cannot destroy gluebi for volume %d:%d",
-			ubi->ubi_num, vol_id);
 out_cdev:
 	cdev_del(&vol->cdev);
 out_mapping:
-	kfree(vol->eba_tbl);
+	if (do_free)
+		kfree(vol->eba_tbl);
 out_acc:
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= vol->reserved_pebs;
 	ubi->avail_pebs += vol->reserved_pebs;
 out_unlock:
 	spin_unlock(&ubi->volumes_lock);
-	if (dont_free)
-		put_device(&vol->dev);
-	else
+	if (do_free)
 		kfree(vol);
+	else
+		put_device(&vol->dev);
 	ubi_err("cannot create volume %d, error %d", vol_id, err);
 	return err;
 }
@@ -396,19 +392,20 @@
 /**
  * ubi_remove_volume - remove volume.
  * @desc: volume descriptor
+ * @no_vtbl: do not change volume table if not zero
  *
  * This function removes volume described by @desc. The volume has to be opened
  * in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * code in case of failure. The caller has to have the @ubi->device_mutex
  * locked.
  */
-int ubi_remove_volume(struct ubi_volume_desc *desc)
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 	int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
 
-	dbg_msg("remove UBI volume %d", vol_id);
+	dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
 	ubi_assert(desc->mode == UBI_EXCLUSIVE);
 	ubi_assert(vol == ubi->volumes[vol_id]);
 
@@ -427,13 +424,11 @@
 	ubi->volumes[vol_id] = NULL;
 	spin_unlock(&ubi->volumes_lock);
 
-	err = ubi_destroy_gluebi(vol);
-	if (err)
-		goto out_err;
-
-	err = ubi_change_vtbl_record(ubi, vol_id, NULL);
-	if (err)
-		goto out_err;
+	if (!no_vtbl) {
+		err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+		if (err)
+			goto out_err;
+	}
 
 	for (i = 0; i < vol->reserved_pebs; i++) {
 		err = ubi_eba_unmap_leb(ubi, vol, i);
@@ -441,28 +436,21 @@
 			goto out_err;
 	}
 
-	kfree(vol->eba_tbl);
-	vol->eba_tbl = NULL;
 	cdev_del(&vol->cdev);
 	volume_sysfs_close(vol);
 
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= reserved_pebs;
 	ubi->avail_pebs += reserved_pebs;
-	i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
-	if (i > 0) {
-		i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
-		ubi->avail_pebs -= i;
-		ubi->rsvd_pebs += i;
-		ubi->beb_rsvd_pebs += i;
-		if (i > 0)
-			ubi_msg("reserve more %d PEBs", i);
-	}
+	ubi_update_reserved(ubi);
 	ubi->vol_count -= 1;
 	spin_unlock(&ubi->volumes_lock);
 
-	paranoid_check_volumes(ubi);
-	return 0;
+	ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+	if (!no_vtbl)
+		self_check_volumes(ubi);
+
+	return err;
 
 out_err:
 	ubi_err("cannot remove volume %d, error %d", vol_id, err);
@@ -480,7 +468,7 @@
  *
  * This function re-sizes the volume and returns zero in case of success, and a
  * negative error code in case of failure. The caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
  */
 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 {
@@ -493,12 +481,12 @@
 	if (ubi->ro_mode)
 		return -EROFS;
 
-	dbg_msg("re-size volume %d to from %d to %d PEBs",
-		vol_id, vol->reserved_pebs, reserved_pebs);
+	dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+		ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
 
 	if (vol->vol_type == UBI_STATIC_VOLUME &&
 	    reserved_pebs < vol->used_ebs) {
-		dbg_err("too small size %d, %d LEBs contain data",
+		ubi_err("too small size %d, %d LEBs contain data",
 			reserved_pebs, vol->used_ebs);
 		return -EINVAL;
 	}
@@ -527,8 +515,11 @@
 	if (pebs > 0) {
 		spin_lock(&ubi->volumes_lock);
 		if (pebs > ubi->avail_pebs) {
-			dbg_err("not enough PEBs: requested %d, available %d",
+			ubi_err("not enough PEBs: requested %d, available %d",
 				pebs, ubi->avail_pebs);
+			if (ubi->corr_peb_count)
+				ubi_err("%d PEBs are corrupted and not used",
+					ubi->corr_peb_count);
 			spin_unlock(&ubi->volumes_lock);
 			err = -ENOSPC;
 			goto out_free;
@@ -543,7 +534,7 @@
 	}
 
 	/* Change volume table record */
-	memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+	vtbl_rec = ubi->vtbl[vol_id];
 	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
 	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
 	if (err)
@@ -558,15 +549,7 @@
 		spin_lock(&ubi->volumes_lock);
 		ubi->rsvd_pebs += pebs;
 		ubi->avail_pebs -= pebs;
-		pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
-		if (pebs > 0) {
-			pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
-			ubi->avail_pebs -= pebs;
-			ubi->rsvd_pebs += pebs;
-			ubi->beb_rsvd_pebs += pebs;
-			if (pebs > 0)
-				ubi_msg("reserve more %d PEBs", pebs);
-		}
+		ubi_update_reserved(ubi);
 		for (i = 0; i < reserved_pebs; i++)
 			new_mapping[i] = vol->eba_tbl[i];
 		kfree(vol->eba_tbl);
@@ -582,8 +565,9 @@
 			(long long)vol->used_ebs * vol->usable_leb_size;
 	}
 
-	paranoid_check_volumes(ubi);
-	return 0;
+	ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+	self_check_volumes(ubi);
+	return err;
 
 out_acc:
 	if (pebs > 0) {
@@ -598,6 +582,45 @@
 }
 
 /**
+ * ubi_rename_volumes - re-name UBI volumes.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names or removes volumes specified in the re-name list.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
+{
+	int err;
+	struct ubi_rename_entry *re;
+
+	err = ubi_vtbl_rename_volumes(ubi, rename_list);
+	if (err)
+		return err;
+
+	list_for_each_entry(re, rename_list, list) {
+		if (re->remove) {
+			err = ubi_remove_volume(re->desc, 1);
+			if (err)
+				break;
+		} else {
+			struct ubi_volume *vol = re->desc->vol;
+
+			spin_lock(&ubi->volumes_lock);
+			vol->name_len = re->new_name_len;
+			memcpy(vol->name, re->new_name, re->new_name_len + 1);
+			spin_unlock(&ubi->volumes_lock);
+			ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
+		}
+	}
+
+	if (!err)
+		self_check_volumes(ubi);
+	return err;
+}
+
+/**
  * ubi_add_volume - add volume.
  * @ubi: UBI device description object
  * @vol: volume description object
@@ -611,8 +634,7 @@
 	int err, vol_id = vol->vol_id;
 	dev_t dev;
 
-	dbg_msg("add volume %d", vol_id);
-	ubi_dbg_dump_vol_info(vol);
+	dbg_gen("add volume %d", vol_id);
 
 	/* Register character device for the volume */
 	cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
@@ -625,32 +647,25 @@
 		return err;
 	}
 
-	err = ubi_create_gluebi(ubi, vol);
-	if (err)
-		goto out_cdev;
-
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
 	vol->dev.devt = dev;
 	vol->dev.class = ubi_class;
-	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
 	if (err)
-		goto out_gluebi;
+		goto out_cdev;
 
 	err = volume_sysfs_init(ubi, vol);
 	if (err) {
 		cdev_del(&vol->cdev);
-		err = ubi_destroy_gluebi(vol);
 		volume_sysfs_close(vol);
 		return err;
 	}
 
-	paranoid_check_volumes(ubi);
-	return 0;
+	self_check_volumes(ubi);
+	return err;
 
-out_gluebi:
-	err = ubi_destroy_gluebi(vol);
 out_cdev:
 	cdev_del(&vol->cdev);
 	return err;
@@ -666,22 +681,21 @@
  */
 void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
 {
-	dbg_msg("free volume %d", vol->vol_id);
+	dbg_gen("free volume %d", vol->vol_id);
 
 	ubi->volumes[vol->vol_id] = NULL;
-	ubi_destroy_gluebi(vol);
 	cdev_del(&vol->cdev);
 	volume_sysfs_close(vol);
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
 /**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
  * @ubi: UBI device description object
  * @vol_id: volume ID
+ *
+ * Returns zero if volume is all right and a a negative error code if not.
  */
-static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
 {
 	int idx = vol_id2idx(ubi, vol_id);
 	int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -699,16 +713,7 @@
 			goto fail;
 		}
 		spin_unlock(&ubi->volumes_lock);
-		return;
-	}
-
-	if (vol->exclusive) {
-		/*
-		 * The volume may be being created at the moment, do not check
-		 * it (e.g., it may be in the middle of ubi_create_volume().
-		 */
-		spin_unlock(&ubi->volumes_lock);
-		return;
+		return 0;
 	}
 
 	if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
@@ -740,7 +745,7 @@
 	}
 
 	if (vol->upd_marker && vol->corrupted) {
-		dbg_err("update marker and corrupted simultaneously");
+		ubi_err("update marker and corrupted simultaneously");
 		goto fail;
 	}
 
@@ -760,11 +765,6 @@
 		goto fail;
 	}
 
-	if (!vol->name) {
-		ubi_err("NULL volume name");
-		goto fail;
-	}
-
 	n = strnlen(vol->name, vol->name_len + 1);
 	if (n != vol->name_len) {
 		ubi_err("bad name_len %lld", n);
@@ -818,31 +818,42 @@
 
 	if (alignment != vol->alignment || data_pad != vol->data_pad ||
 	    upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
-	    name_len!= vol->name_len || strncmp(name, vol->name, name_len)) {
+	    name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
 		ubi_err("volume info is different");
 		goto fail;
 	}
 
 	spin_unlock(&ubi->volumes_lock);
-	return;
+	return 0;
 
 fail:
-	ubi_err("paranoid check failed for volume %d", vol_id);
-	ubi_dbg_dump_vol_info(vol);
-	ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+	ubi_err("self-check failed for volume %d", vol_id);
+	if (vol)
+		ubi_dump_vol_info(vol);
+	ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+	dump_stack();
 	spin_unlock(&ubi->volumes_lock);
-	BUG();
+	return -EINVAL;
 }
 
 /**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
  * @ubi: UBI device description object
+ *
+ * Returns zero if volumes are all right and a a negative error code if not.
  */
-static void paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
 {
-	int i;
+	int i, err = 0;
 
-	for (i = 0; i < ubi->vtbl_slots; i++)
-		paranoid_check_volume(ubi, i);
+	if (!ubi_dbg_chk_gen(ubi))
+		return 0;
+
+	for (i = 0; i < ubi->vtbl_slots; i++) {
+		err = self_check_volume(ubi, i);
+		if (err)
+			break;
+	}
+
+	return err;
 }
-#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 3fbb4a0..e6c8f5b 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -25,16 +25,15 @@
  * LEB 1. This scheme guarantees recoverability from unclean reboots.
  *
  * In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
  *
  * But it would still be beneficial to store this information in the volume
  * table. For example, suppose we have a static volume X, and all its physical
  * eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
  * corresponding to the volume X. According to the volume table volume X does
  * exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
  *
  * The volume table also stores so-called "update marker", which is used for
  * volume updates. Before updating the volume, the update marker is set, and
@@ -44,20 +43,20 @@
  * damaged.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/crc32.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <asm/div64.h>
+#else
+#include <ubi_uboot.h>
 #endif
 
-#include <ubi_uboot.h>
+#include <linux/err.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
 
 /* Empty volume table record */
 static struct ubi_vtbl_record empty_vtbl_record;
@@ -97,18 +96,68 @@
 			return err;
 
 		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
-					ubi->vtbl_size, UBI_LONGTERM);
+					ubi->vtbl_size);
 		if (err)
 			return err;
 	}
 
-	paranoid_vtbl_check(ubi);
+	self_vtbl_check(ubi);
 	return 0;
 }
 
 /**
- * vtbl_check - check if volume table is not corrupted and contains sensible
- *              data.
+ * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names multiple volumes specified in @req in the volume
+ * table. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+			    struct list_head *rename_list)
+{
+	int i, err;
+	struct ubi_rename_entry *re;
+	struct ubi_volume *layout_vol;
+
+	list_for_each_entry(re, rename_list, list) {
+		uint32_t crc;
+		struct ubi_volume *vol = re->desc->vol;
+		struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
+
+		if (re->remove) {
+			memcpy(vtbl_rec, &empty_vtbl_record,
+			       sizeof(struct ubi_vtbl_record));
+			continue;
+		}
+
+		vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
+		memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
+		memset(vtbl_rec->name + re->new_name_len, 0,
+		       UBI_VOL_NAME_MAX + 1 - re->new_name_len);
+		crc = crc32(UBI_CRC32_INIT, vtbl_rec,
+			    UBI_VTBL_RECORD_SIZE_CRC);
+		vtbl_rec->crc = cpu_to_be32(crc);
+	}
+
+	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+		err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+		if (err)
+			return err;
+
+		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
+					ubi->vtbl_size);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/**
+ * vtbl_check - check if volume table is not corrupted and sensible.
  * @ubi: UBI device description object
  * @vtbl: volume table
  *
@@ -132,13 +181,13 @@
 		upd_marker = vtbl[i].upd_marker;
 		vol_type = vtbl[i].vol_type;
 		name_len = be16_to_cpu(vtbl[i].name_len);
-		name = (const char *) &vtbl[i].name[0];
+		name = &vtbl[i].name[0];
 
 		crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
 		if (be32_to_cpu(vtbl[i].crc) != crc) {
 			ubi_err("bad CRC at record %u: %#08x, not %#08x",
 				 i, crc, be32_to_cpu(vtbl[i].crc));
-			ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+			ubi_dump_vtbl_record(&vtbl[i], i);
 			return 1;
 		}
 
@@ -170,7 +219,7 @@
 
 		n = ubi->leb_size % alignment;
 		if (data_pad != n) {
-			dbg_err("bad data_pad, has to be %d", n);
+			ubi_err("bad data_pad, has to be %d", n);
 			err = 6;
 			goto bad;
 		}
@@ -186,8 +235,8 @@
 		}
 
 		if (reserved_pebs > ubi->good_peb_count) {
-			dbg_err("too large reserved_pebs, good PEBs %d",
-				ubi->good_peb_count);
+			ubi_err("too large reserved_pebs %d, good PEBs %d",
+				reserved_pebs, ubi->good_peb_count);
 			err = 9;
 			goto bad;
 		}
@@ -215,11 +264,15 @@
 			int len2 = be16_to_cpu(vtbl[n].name_len);
 
 			if (len1 > 0 && len1 == len2 &&
-			    !strncmp((char *)vtbl[i].name, (char *)vtbl[n].name, len1)) {
-				ubi_err("volumes %d and %d have the same name"
-					" \"%s\"", i, n, vtbl[i].name);
-				ubi_dbg_dump_vtbl_record(&vtbl[i], i);
-				ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+#ifndef __UBOOT__
+			    !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+#else
+			    !strncmp((char *)vtbl[i].name, vtbl[n].name, len1)) {
+#endif
+				ubi_err("volumes %d and %d have the same name \"%s\"",
+					i, n, vtbl[i].name);
+				ubi_dump_vtbl_record(&vtbl[i], i);
+				ubi_dump_vtbl_record(&vtbl[n], n);
 				return -EINVAL;
 			}
 		}
@@ -229,76 +282,64 @@
 
 bad:
 	ubi_err("volume table check failed: record %d, error %d", i, err);
-	ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+	ubi_dump_vtbl_record(&vtbl[i], i);
 	return -EINVAL;
 }
 
 /**
  * create_vtbl - create a copy of volume table.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  * @copy: number of the volume table copy
  * @vtbl: contents of the volume table
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
 		       int copy, void *vtbl)
 {
 	int err, tries = 0;
-	static struct ubi_vid_hdr *vid_hdr;
-	struct ubi_scan_volume *sv;
-	struct ubi_scan_leb *new_seb, *old_seb = NULL;
+	struct ubi_vid_hdr *vid_hdr;
+	struct ubi_ainf_peb *new_aeb;
 
-	ubi_msg("create volume table (copy #%d)", copy + 1);
+	dbg_gen("create volume table (copy #%d)", copy + 1);
 
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
 	if (!vid_hdr)
 		return -ENOMEM;
 
-	/*
-	 * Check if there is a logical eraseblock which would have to contain
-	 * this volume table copy was found during scanning. It has to be wiped
-	 * out.
-	 */
-	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
-	if (sv)
-		old_seb = ubi_scan_find_seb(sv, copy);
-
 retry:
-	new_seb = ubi_scan_get_free_peb(ubi, si);
-	if (IS_ERR(new_seb)) {
-		err = PTR_ERR(new_seb);
+	new_aeb = ubi_early_get_peb(ubi, ai);
+	if (IS_ERR(new_aeb)) {
+		err = PTR_ERR(new_aeb);
 		goto out_free;
 	}
 
-	vid_hdr->vol_type = UBI_VID_DYNAMIC;
+	vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
 	vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
 	vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
 	vid_hdr->data_size = vid_hdr->used_ebs =
 			     vid_hdr->data_pad = cpu_to_be32(0);
 	vid_hdr->lnum = cpu_to_be32(copy);
-	vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
-	vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
+	vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
 
 	/* The EC header is already there, write the VID header */
-	err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+	err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
 	if (err)
 		goto write_error;
 
 	/* Write the layout volume contents */
-	err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+	err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
 	if (err)
 		goto write_error;
 
 	/*
-	 * And add it to the scanning information. Don't delete the old
-	 * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+	 * And add it to the attaching information. Don't delete the old version
+	 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
 	 */
-	err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
-				vid_hdr, 0);
-	kfree(new_seb);
+	err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+	kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
 
@@ -308,10 +349,10 @@
 		 * Probably this physical eraseblock went bad, try to pick
 		 * another one.
 		 */
-		list_add_tail(&new_seb->u.list, &si->corr);
+		list_add(&new_aeb->u.list, &ai->erase);
 		goto retry;
 	}
-	kfree(new_seb);
+	kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 out_free:
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
@@ -321,20 +362,20 @@
 /**
  * process_lvol - process the layout volume.
  * @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
  *
  * This function is responsible for reading the layout volume, ensuring it is
  * not corrupted, and recovering from corruptions if needed. Returns volume
  * table in case of success and a negative error code in case of failure.
  */
 static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
-					    struct ubi_scan_info *si,
-					    struct ubi_scan_volume *sv)
+					    struct ubi_attach_info *ai,
+					    struct ubi_ainf_volume *av)
 {
 	int err;
 	struct rb_node *rb;
-	struct ubi_scan_leb *seb;
+	struct ubi_ainf_peb *aeb;
 	struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
 	int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
 
@@ -356,25 +397,24 @@
 	 * 0 contains more recent information.
 	 *
 	 * So the plan is to first check LEB 0. Then
-	 * a. if LEB 0 is OK, it must be containing the most resent data; then
+	 * a. if LEB 0 is OK, it must be containing the most recent data; then
 	 *    we compare it with LEB 1, and if they are different, we copy LEB
 	 *    0 to LEB 1;
 	 * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
 	 *    to LEB 0.
 	 */
 
-	dbg_msg("check layout volume");
+	dbg_gen("check layout volume");
 
 	/* Read both LEB 0 and LEB 1 into memory */
-	ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-		leb[seb->lnum] = vmalloc(ubi->vtbl_size);
-		if (!leb[seb->lnum]) {
+	ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+		leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+		if (!leb[aeb->lnum]) {
 			err = -ENOMEM;
 			goto out_free;
 		}
-		memset(leb[seb->lnum], 0, ubi->vtbl_size);
 
-		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+		err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
 				       ubi->vtbl_size);
 		if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
 			/*
@@ -382,12 +422,12 @@
 			 * uncorrectable ECC error, but we have our own CRC and
 			 * the data will be checked later. If the data is OK,
 			 * the PEB will be scrubbed (because we set
-			 * seb->scrub). If the data is not OK, the contents of
+			 * aeb->scrub). If the data is not OK, the contents of
 			 * the PEB will be recovered from the second copy, and
-			 * seb->scrub will be cleared in
-			 * 'ubi_scan_add_used()'.
+			 * aeb->scrub will be cleared in
+			 * 'ubi_add_to_av()'.
 			 */
-			seb->scrub = 1;
+			aeb->scrub = 1;
 		else if (err)
 			goto out_free;
 	}
@@ -402,10 +442,11 @@
 	if (!leb_corrupted[0]) {
 		/* LEB 0 is OK */
 		if (leb[1])
-			leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size);
+			leb_corrupted[1] = memcmp(leb[0], leb[1],
+						  ubi->vtbl_size);
 		if (leb_corrupted[1]) {
 			ubi_warn("volume table copy #2 is corrupted");
-			err = create_vtbl(ubi, si, 1, leb[0]);
+			err = create_vtbl(ubi, ai, 1, leb[0]);
 			if (err)
 				goto out_free;
 			ubi_msg("volume table was restored");
@@ -428,7 +469,7 @@
 		}
 
 		ubi_warn("volume table copy #1 is corrupted");
-		err = create_vtbl(ubi, si, 0, leb[1]);
+		err = create_vtbl(ubi, ai, 0, leb[1]);
 		if (err)
 			goto out_free;
 		ubi_msg("volume table was restored");
@@ -446,21 +487,20 @@
 /**
  * create_empty_lvol - create empty layout volume.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns volume table contents in case of success and a
  * negative error code in case of failure.
  */
 static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
-						 struct ubi_scan_info *si)
+						 struct ubi_attach_info *ai)
 {
 	int i;
 	struct ubi_vtbl_record *vtbl;
 
-	vtbl = vmalloc(ubi->vtbl_size);
+	vtbl = vzalloc(ubi->vtbl_size);
 	if (!vtbl)
 		return ERR_PTR(-ENOMEM);
-	memset(vtbl, 0, ubi->vtbl_size);
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
@@ -468,7 +508,7 @@
 	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
 		int err;
 
-		err = create_vtbl(ubi, si, i, vtbl);
+		err = create_vtbl(ubi, ai, i, vtbl);
 		if (err) {
 			vfree(vtbl);
 			return ERR_PTR(err);
@@ -481,18 +521,19 @@
 /**
  * init_volumes - initialize volume information for existing volumes.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
  * @vtbl: volume table
  *
  * This function allocates volume description objects for existing volumes.
  * Returns zero in case of success and a negative error code in case of
  * failure.
  */
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+			const struct ubi_attach_info *ai,
 			const struct ubi_vtbl_record *vtbl)
 {
 	int i, reserved_pebs = 0;
-	struct ubi_scan_volume *sv;
+	struct ubi_ainf_volume *av;
 	struct ubi_volume *vol;
 
 	for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -520,8 +561,8 @@
 		if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
 			/* Auto re-size flag may be set only for one volume */
 			if (ubi->autoresize_vol_id != -1) {
-				ubi_err("more then one auto-resize volume (%d "
-					"and %d)", ubi->autoresize_vol_id, i);
+				ubi_err("more than one auto-resize volume (%d and %d)",
+					ubi->autoresize_vol_id, i);
 				kfree(vol);
 				return -EINVAL;
 			}
@@ -548,8 +589,8 @@
 		}
 
 		/* Static volumes only */
-		sv = ubi_scan_find_sv(si, i);
-		if (!sv) {
+		av = ubi_find_av(ai, i);
+		if (!av) {
 			/*
 			 * No eraseblocks belonging to this volume found. We
 			 * don't actually know whether this static volume is
@@ -561,22 +602,22 @@
 			continue;
 		}
 
-		if (sv->leb_count != sv->used_ebs) {
+		if (av->leb_count != av->used_ebs) {
 			/*
 			 * We found a static volume which misses several
 			 * eraseblocks. Treat it as corrupted.
 			 */
 			ubi_warn("static volume %d misses %d LEBs - corrupted",
-				 sv->vol_id, sv->used_ebs - sv->leb_count);
+				 av->vol_id, av->used_ebs - av->leb_count);
 			vol->corrupted = 1;
 			continue;
 		}
 
-		vol->used_ebs = sv->used_ebs;
+		vol->used_ebs = av->used_ebs;
 		vol->used_bytes =
 			(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
-		vol->used_bytes += sv->last_data_size;
-		vol->last_eb_bytes = sv->last_data_size;
+		vol->used_bytes += av->last_data_size;
+		vol->last_eb_bytes = av->last_data_size;
 	}
 
 	/* And add the layout volume */
@@ -585,7 +626,7 @@
 		return -ENOMEM;
 
 	vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
-	vol->alignment = 1;
+	vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
 	vol->vol_type = UBI_DYNAMIC_VOLUME;
 	vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
 	memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -603,9 +644,13 @@
 	ubi->vol_count += 1;
 	vol->ubi = ubi;
 
-	if (reserved_pebs > ubi->avail_pebs)
+	if (reserved_pebs > ubi->avail_pebs) {
 		ubi_err("not enough PEBs, required %d, available %d",
 			reserved_pebs, ubi->avail_pebs);
+		if (ubi->corr_peb_count)
+			ubi_err("%d PEBs are corrupted and not used",
+				ubi->corr_peb_count);
+	}
 	ubi->rsvd_pebs += reserved_pebs;
 	ubi->avail_pebs -= reserved_pebs;
 
@@ -613,105 +658,104 @@
 }
 
 /**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
  * @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
  *
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
  * to the data read from the volume tabla, and %-EINVAL if not.
  */
-static int check_sv(const struct ubi_volume *vol,
-		    const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+		    const struct ubi_ainf_volume *av)
 {
 	int err;
 
-	if (sv->highest_lnum >= vol->reserved_pebs) {
+	if (av->highest_lnum >= vol->reserved_pebs) {
 		err = 1;
 		goto bad;
 	}
-	if (sv->leb_count > vol->reserved_pebs) {
+	if (av->leb_count > vol->reserved_pebs) {
 		err = 2;
 		goto bad;
 	}
-	if (sv->vol_type != vol->vol_type) {
+	if (av->vol_type != vol->vol_type) {
 		err = 3;
 		goto bad;
 	}
-	if (sv->used_ebs > vol->reserved_pebs) {
+	if (av->used_ebs > vol->reserved_pebs) {
 		err = 4;
 		goto bad;
 	}
-	if (sv->data_pad != vol->data_pad) {
+	if (av->data_pad != vol->data_pad) {
 		err = 5;
 		goto bad;
 	}
 	return 0;
 
 bad:
-	ubi_err("bad scanning information, error %d", err);
-	ubi_dbg_dump_sv(sv);
-	ubi_dbg_dump_vol_info(vol);
+	ubi_err("bad attaching information, error %d", err);
+	ubi_dump_av(av);
+	ubi_dump_vol_info(vol);
 	return -EINVAL;
 }
 
 /**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
  * information is OK and %-EINVAL if it is not.
  */
-static int check_scanning_info(const struct ubi_device *ubi,
-			       struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+			       struct ubi_attach_info *ai)
 {
 	int err, i;
-	struct ubi_scan_volume *sv;
+	struct ubi_ainf_volume *av;
 	struct ubi_volume *vol;
 
-	if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
-		ubi_err("scanning found %d volumes, maximum is %d + %d",
-			si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+	if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+		ubi_err("found %d volumes while attaching, maximum is %d + %d",
+			ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
 		return -EINVAL;
 	}
 
-	if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
-	    si->highest_vol_id < UBI_INTERNAL_VOL_START) {
-		ubi_err("too large volume ID %d found by scanning",
-			si->highest_vol_id);
+	if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+	    ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+		ubi_err("too large volume ID %d found", ai->highest_vol_id);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
 		cond_resched();
 
-		sv = ubi_scan_find_sv(si, i);
+		av = ubi_find_av(ai, i);
 		vol = ubi->volumes[i];
 		if (!vol) {
-			if (sv)
-				ubi_scan_rm_volume(si, sv);
+			if (av)
+				ubi_remove_av(ai, av);
 			continue;
 		}
 
 		if (vol->reserved_pebs == 0) {
 			ubi_assert(i < ubi->vtbl_slots);
 
-			if (!sv)
+			if (!av)
 				continue;
 
 			/*
-			 * During scanning we found a volume which does not
+			 * During attaching we found a volume which does not
 			 * exist according to the information in the volume
 			 * table. This must have happened due to an unclean
 			 * reboot while the volume was being removed. Discard
 			 * these eraseblocks.
 			 */
-			ubi_msg("finish volume %d removal", sv->vol_id);
-			ubi_scan_rm_volume(si, sv);
-		} else if (sv) {
-			err = check_sv(vol, sv);
+			ubi_msg("finish volume %d removal", av->vol_id);
+			ubi_remove_av(ai, av);
+		} else if (av) {
+			err = check_av(vol, av);
 			if (err)
 				return err;
 		}
@@ -721,19 +765,18 @@
 }
 
 /**
- * ubi_read_volume_table - read volume table.
- * information.
+ * ubi_read_volume_table - read the volume table.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function reads volume table, checks it, recover from errors if needed,
  * or creates it if needed. Returns zero in case of success and a negative
  * error code in case of failure.
  */
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
 	int i, err;
-	struct ubi_scan_volume *sv;
+	struct ubi_ainf_volume *av;
 
 	empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
 
@@ -748,8 +791,8 @@
 	ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
 	ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
 
-	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
-	if (!sv) {
+	av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+	if (!av) {
 		/*
 		 * No logical eraseblocks belonging to the layout volume were
 		 * found. This could mean that the flash is just empty. In
@@ -758,8 +801,8 @@
 		 * But if flash is not empty this must be a corruption or the
 		 * MTD device just contains garbage.
 		 */
-		if (si->is_empty) {
-			ubi->vtbl = create_empty_lvol(ubi, si);
+		if (ai->is_empty) {
+			ubi->vtbl = create_empty_lvol(ubi, ai);
 			if (IS_ERR(ubi->vtbl))
 				return PTR_ERR(ubi->vtbl);
 		} else {
@@ -767,33 +810,33 @@
 			return -EINVAL;
 		}
 	} else {
-		if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+		if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
 			/* This must not happen with proper UBI images */
-			dbg_err("too many LEBs (%d) in layout volume",
-				sv->leb_count);
+			ubi_err("too many LEBs (%d) in layout volume",
+				av->leb_count);
 			return -EINVAL;
 		}
 
-		ubi->vtbl = process_lvol(ubi, si, sv);
+		ubi->vtbl = process_lvol(ubi, ai, av);
 		if (IS_ERR(ubi->vtbl))
 			return PTR_ERR(ubi->vtbl);
 	}
 
-	ubi->avail_pebs = ubi->good_peb_count;
+	ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
 
 	/*
 	 * The layout volume is OK, initialize the corresponding in-RAM data
 	 * structures.
 	 */
-	err = init_volumes(ubi, si, ubi->vtbl);
+	err = init_volumes(ubi, ai, ubi->vtbl);
 	if (err)
 		goto out_free;
 
 	/*
-	 * Get sure that the scanning information is consistent to the
+	 * Make sure that the attaching information is consistent to the
 	 * information stored in the volume table.
 	 */
-	err = check_scanning_info(ubi, si);
+	err = check_attaching_info(ubi, ai);
 	if (err)
 		goto out_free;
 
@@ -801,26 +844,24 @@
 
 out_free:
 	vfree(ubi->vtbl);
-	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
-		if (ubi->volumes[i]) {
-			kfree(ubi->volumes[i]);
-			ubi->volumes[i] = NULL;
-		}
+	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+		kfree(ubi->volumes[i]);
+		ubi->volumes[i] = NULL;
+	}
 	return err;
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
 /**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
  * @ubi: UBI device description object
  */
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
 {
+	if (!ubi_dbg_chk_gen(ubi))
+		return;
+
 	if (vtbl_check(ubi, ubi->vtbl)) {
-		ubi_err("paranoid check failed");
+		ubi_err("self-check failed");
 		BUG();
 	}
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 1eaa88b..2987ffc 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -7,97 +7,117 @@
  */
 
 /*
- * UBI wear-leveling unit.
+ * UBI wear-leveling sub-system.
  *
- * This unit is responsible for wear-leveling. It works in terms of physical
- * eraseblocks and erase counters and knows nothing about logical eraseblocks,
- * volumes, etc. From this unit's perspective all physical eraseblocks are of
- * two types - used and free. Used physical eraseblocks are those that were
- * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
- * those that were put by the 'ubi_wl_put_peb()' function.
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
  *
  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
- * header. The rest of the physical eraseblock contains only 0xFF bytes.
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
  *
- * When physical eraseblocks are returned to the WL unit by means of the
+ * When physical eraseblocks are returned to the WL sub-system by means of the
  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
  * done asynchronously in context of the per-UBI device background thread,
- * which is also managed by the WL unit.
+ * which is also managed by the WL sub-system.
  *
  * The wear-leveling is ensured by means of moving the contents of used
  * physical eraseblocks with low erase counter to free physical eraseblocks
  * with high erase counter.
  *
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL unit may pick a free physical eraseblock with low erase counter, and
- * so forth.
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
  *
- * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
  *
- * This unit is also responsible for scrubbing. If a bit-flip is detected in a
- * physical eraseblock, it has to be moved. Technically this is the same as
- * moving it for wear-leveling reasons.
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
  *
- * As it was said, for the UBI unit all physical eraseblocks are either "free"
- * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
- * eraseblocks are kept in a set of different RB-trees: @wl->used,
- * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ *  o we don't want to move physical eraseblocks just after we have given them
+ *    to the user; instead, we first want to let users fill them up with data;
+ *
+ *  o there is a chance that the user will put the physical eraseblock very
+ *    soon, so it makes sense not to move it for some time, but wait.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ *   erroneous - e.g., there was a read error;
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
  *
  * Note, in this implementation, we keep a small in-RAM object for each physical
  * eraseblock. This is surely not a scalable solution. But it appears to be good
  * enough for moderately large flashes and it is simple. In future, one may
- * re-work this unit and make it more scalable.
+ * re-work this sub-system and make it more scalable.
  *
- * At the moment this unit does not utilize the sequence number, which was
- * introduced relatively recently. But it would be wise to do this because the
- * sequence number of a logical eraseblock characterizes how old is it. For
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
  * example, when we move a PEB with low erase counter, and we need to pick the
  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
  * pick target PEB with an average EC if our PEB is not very "old". This is a
- * room for future re-works of the WL unit.
- *
- * FIXME: looks too complex, should be simplified (later).
+ * room for future re-works of the WL sub-system.
  */
 
-#ifdef UBI_LINUX
+#define __UBOOT__
+#ifndef __UBOOT__
 #include <linux/slab.h>
 #include <linux/crc32.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#else
+#include <ubi_uboot.h>
 #endif
 
-#include <ubi_uboot.h>
 #include "ubi.h"
 
 /* Number of physical eraseblocks reserved for wear-leveling purposes */
 #define WL_RESERVED_PEBS 1
 
 /*
- * How many erase cycles are short term, unknown, and long term physical
- * eraseblocks protected.
- */
-#define ST_PROTECTION 16
-#define U_PROTECTION  10
-#define LT_PROTECTION 4
-
-/*
  * Maximum difference between two erase counters. If this threshold is
- * exceeded, the WL unit starts moving data from used physical eraseblocks with
- * low erase counter to free physical eraseblocks with high erase counter.
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
  */
 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
 
 /*
- * When a physical eraseblock is moved, the WL unit has to pick the target
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
  * physical eraseblock to move to. The simplest way would be just to pick the
  * one with the highest erase counter. But in certain workloads this could lead
  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
  * situation when the picked physical eraseblock is constantly erased after the
  * data is written to it. So, we have a constant which limits the highest erase
- * counter of the free physical eraseblock to pick. Namely, the WL unit does
- * not pick eraseblocks with erase counter greater then the lowest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater than the lowest erase
  * counter plus %WL_FREE_MAX_DIFF.
  */
 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
@@ -108,89 +128,48 @@
  */
 #define WL_MAX_FAILURES 32
 
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+				 struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+			    struct ubi_wl_entry *e);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+#ifndef __UBOOT__
 /**
- * struct ubi_wl_prot_entry - PEB protection entry.
- * @rb_pnum: link in the @wl->prot.pnum RB-tree
- * @rb_aec: link in the @wl->prot.aec RB-tree
- * @abs_ec: the absolute erase counter value when the protection ends
- * @e: the wear-leveling entry of the physical eraseblock under protection
- *
- * When the WL unit returns a physical eraseblock, the physical eraseblock is
- * protected from being moved for some "time". For this reason, the physical
- * eraseblock is not directly moved from the @wl->free tree to the @wl->used
- * tree. There is one more tree in between where this physical eraseblock is
- * temporarily stored (@wl->prot).
- *
- * All this protection stuff is needed because:
- *  o we don't want to move physical eraseblocks just after we have given them
- *    to the user; instead, we first want to let users fill them up with data;
- *
- *  o there is a chance that the user will put the physical eraseblock very
- *    soon, so it makes sense not to move it for some time, but wait; this is
- *    especially important in case of "short term" physical eraseblocks.
- *
- * Physical eraseblocks stay protected only for limited time. But the "time" is
- * measured in erase cycles in this case. This is implemented with help of the
- * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
- * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
- * the @wl->used tree.
- *
- * Protected physical eraseblocks are searched by physical eraseblock number
- * (when they are put) and by the absolute erase counter (to check if it is
- * time to move them to the @wl->used tree). So there are actually 2 RB-trees
- * storing the protected physical eraseblocks: @wl->prot.pnum and
- * @wl->prot.aec. They are referred to as the "protection" trees. The
- * first one is indexed by the physical eraseblock number. The second one is
- * indexed by the absolute erase counter. Both trees store
- * &struct ubi_wl_prot_entry objects.
- *
- * Each physical eraseblock has 2 main states: free and used. The former state
- * corresponds to the @wl->free tree. The latter state is split up on several
- * sub-states:
- * o the WL movement is allowed (@wl->used tree);
- * o the WL movement is temporarily prohibited (@wl->prot.pnum and
- * @wl->prot.aec trees);
- * o scrubbing is needed (@wl->scrub tree).
- *
- * Depending on the sub-state, wear-leveling entries of the used physical
- * eraseblocks may be kept in one of those trees.
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
  */
-struct ubi_wl_prot_entry {
-	struct rb_node rb_pnum;
-	struct rb_node rb_aec;
-	unsigned long long abs_ec;
-	struct ubi_wl_entry *e;
-};
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+	ubi_update_fastmap(ubi);
+}
+#endif
 
 /**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @priv: private data of the worker function
- *
- * @e: physical eraseblock to erase
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
+ *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ *  @ubi: UBI device description object
+ *  @pnum: the to be checked PEB
  */
-struct ubi_work {
-	struct list_head list;
-	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
-	/* The below fields are only relevant to erasure works */
-	struct ubi_wl_entry *e;
-	int torture;
-};
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+	int i;
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
-				     struct rb_root *root);
+	if (!ubi->fm)
+		return 0;
+
+	for (i = 0; i < ubi->fm->used_blocks; i++)
+		if (ubi->fm->e[i]->pnum == pnum)
+			return 1;
+
+	return 0;
+}
 #else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(e, root)
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+	return 0;
+}
 #endif
 
 /**
@@ -210,7 +189,7 @@
 		struct ubi_wl_entry *e1;
 
 		parent = *p;
-		e1 = rb_entry(parent, struct ubi_wl_entry, rb);
+		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
 
 		if (e->ec < e1->ec)
 			p = &(*p)->rb_left;
@@ -225,8 +204,8 @@
 		}
 	}
 
-	rb_link_node(&e->rb, parent, p);
-	rb_insert_color(&e->rb, root);
+	rb_link_node(&e->u.rb, parent, p);
+	rb_insert_color(&e->u.rb, root);
 }
 
 /**
@@ -289,18 +268,16 @@
 {
 	int err;
 
-	spin_lock(&ubi->wl_lock);
 	while (!ubi->free.rb_node) {
 		spin_unlock(&ubi->wl_lock);
 
 		dbg_wl("do one work synchronously");
 		err = do_work(ubi);
-		if (err)
-			return err;
 
 		spin_lock(&ubi->wl_lock);
+		if (err)
+			return err;
 	}
-	spin_unlock(&ubi->wl_lock);
 
 	return 0;
 }
@@ -321,7 +298,7 @@
 	while (p) {
 		struct ubi_wl_entry *e1;
 
-		e1 = rb_entry(p, struct ubi_wl_entry, rb);
+		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
 
 		if (e->pnum == e1->pnum) {
 			ubi_assert(e == e1);
@@ -345,223 +322,401 @@
 }
 
 /**
- * prot_tree_add - add physical eraseblock to protection trees.
+ * prot_queue_add - add physical eraseblock to the protection queue.
  * @ubi: UBI device description object
  * @e: the physical eraseblock to add
- * @pe: protection entry object to use
- * @abs_ec: absolute erase counter value when this physical eraseblock has
- * to be removed from the protection trees.
  *
- * @wl->lock has to be locked.
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
  */
-static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
-			  struct ubi_wl_prot_entry *pe, int abs_ec)
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
 {
-	struct rb_node **p, *parent = NULL;
-	struct ubi_wl_prot_entry *pe1;
+	int pq_tail = ubi->pq_head - 1;
 
-	pe->e = e;
-	pe->abs_ec = ubi->abs_ec + abs_ec;
-
-	p = &ubi->prot.pnum.rb_node;
-	while (*p) {
-		parent = *p;
-		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
-
-		if (e->pnum < pe1->e->pnum)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-	rb_link_node(&pe->rb_pnum, parent, p);
-	rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
-
-	p = &ubi->prot.aec.rb_node;
-	parent = NULL;
-	while (*p) {
-		parent = *p;
-		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
-
-		if (pe->abs_ec < pe1->abs_ec)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-	rb_link_node(&pe->rb_aec, parent, p);
-	rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
+	if (pq_tail < 0)
+		pq_tail = UBI_PROT_QUEUE_LEN - 1;
+	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
 }
 
 /**
  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
  * @root: the RB-tree where to look for
- * @max: highest possible erase counter
+ * @diff: maximum possible difference from the smallest erase counter
  *
  * This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * min + @diff, where min is the smallest erase counter.
  */
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+					  struct rb_root *root, int diff)
 {
 	struct rb_node *p;
-	struct ubi_wl_entry *e;
+	struct ubi_wl_entry *e, *prev_e = NULL;
+	int max;
 
-	e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
-	max += e->ec;
+	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+	max = e->ec + diff;
 
 	p = root->rb_node;
 	while (p) {
 		struct ubi_wl_entry *e1;
 
-		e1 = rb_entry(p, struct ubi_wl_entry, rb);
+		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
 		if (e1->ec >= max)
 			p = p->rb_left;
 		else {
 			p = p->rb_right;
+			prev_e = e;
 			e = e1;
 		}
 	}
 
+	/* If no fastmap has been written and this WL entry can be used
+	 * as anchor PEB, hold it back and return the second best WL entry
+	 * such that fastmap can use the anchor PEB later. */
+	if (prev_e && !ubi->fm_disabled &&
+	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
+		return prev_e;
+
 	return e;
 }
 
 /**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
  * @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
+ * @root: the RB-tree where to look for
  *
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
  */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+					       struct rb_root *root)
 {
-	int err, protect, medium_ec;
 	struct ubi_wl_entry *e, *first, *last;
-	struct ubi_wl_prot_entry *pe;
 
-	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
-		   dtype == UBI_UNKNOWN);
+	first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+	last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
 
-	pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
-	if (!pe)
-		return -ENOMEM;
+	if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
 
-retry:
-	spin_lock(&ubi->wl_lock);
-	if (!ubi->free.rb_node) {
-		if (ubi->works_count == 0) {
-			ubi_assert(list_empty(&ubi->works));
-			ubi_err("no free eraseblocks");
-			spin_unlock(&ubi->wl_lock);
-			kfree(pe);
-			return -ENOSPC;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+		/* If no fastmap has been written and this WL entry can be used
+		 * as anchor PEB, hold it back and return the second best
+		 * WL entry such that fastmap can use the anchor PEB later. */
+		if (e && !ubi->fm_disabled && !ubi->fm &&
+		    e->pnum < UBI_FM_MAX_START)
+			e = rb_entry(rb_next(root->rb_node),
+				     struct ubi_wl_entry, u.rb);
+#endif
+	} else
+		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+	return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+	struct rb_node *p;
+	struct ubi_wl_entry *e, *victim = NULL;
+	int max_ec = UBI_MAX_ERASECOUNTER;
+
+	ubi_rb_for_each_entry(p, e, root, u.rb) {
+		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+			victim = e;
+			max_ec = e->ec;
 		}
-		spin_unlock(&ubi->wl_lock);
-
-		err = produce_free_peb(ubi);
-		if (err < 0) {
-			kfree(pe);
-			return err;
-		}
-		goto retry;
 	}
 
-	switch (dtype) {
-		case UBI_LONGTERM:
-			/*
-			 * For long term data we pick a physical eraseblock
-			 * with high erase counter. But the highest erase
-			 * counter we can pick is bounded by the the lowest
-			 * erase counter plus %WL_FREE_MAX_DIFF.
-			 */
-			e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
-			protect = LT_PROTECTION;
-			break;
-		case UBI_UNKNOWN:
-			/*
-			 * For unknown data we pick a physical eraseblock with
-			 * medium erase counter. But we by no means can pick a
-			 * physical eraseblock with erase counter greater or
-			 * equivalent than the lowest erase counter plus
-			 * %WL_FREE_MAX_DIFF.
-			 */
-			first = rb_entry(rb_first(&ubi->free),
-					 struct ubi_wl_entry, rb);
-			last = rb_entry(rb_last(&ubi->free),
-					struct ubi_wl_entry, rb);
+	return victim;
+}
 
-			if (last->ec - first->ec < WL_FREE_MAX_DIFF)
-				e = rb_entry(ubi->free.rb_node,
-						struct ubi_wl_entry, rb);
-			else {
-				medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
-				e = find_wl_entry(&ubi->free, medium_ec);
-			}
-			protect = U_PROTECTION;
-			break;
-		case UBI_SHORTTERM:
-			/*
-			 * For short term data we pick a physical eraseblock
-			 * with the lowest erase counter as we expect it will
-			 * be erased soon.
-			 */
-			e = rb_entry(rb_first(&ubi->free),
-				     struct ubi_wl_entry, rb);
-			protect = ST_PROTECTION;
-			break;
-		default:
-			protect = 0;
-			e = NULL;
-			BUG();
-	}
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+	struct rb_node *p;
+	struct ubi_wl_entry *e;
 
-	/*
-	 * Move the physical eraseblock to the protection trees where it will
-	 * be protected from being moved for some time.
-	 */
-	paranoid_check_in_wl_tree(e, &ubi->free);
-	rb_erase(&e->rb, &ubi->free);
-	prot_tree_add(ubi, e, pe, protect);
+	ubi_rb_for_each_entry(p, e, root, u.rb)
+		if (e->pnum < UBI_FM_MAX_START)
+			return 1;
 
-	dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
-	spin_unlock(&ubi->wl_lock);
-
-	return e->pnum;
+	return 0;
 }
 
 /**
- * prot_tree_del - remove a physical eraseblock from the protection trees
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+	struct ubi_wl_entry *e = NULL;
+
+	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+		goto out;
+
+	if (anchor)
+		e = find_anchor_wl_entry(&ubi->free);
+	else
+		e = find_mean_wl_entry(ubi, &ubi->free);
+
+	if (!e)
+		goto out;
+
+	self_check_in_wl_tree(ubi, e, &ubi->free);
+
+	/* remove it from the free list,
+	 * the wl subsystem does no longer know this erase block */
+	rb_erase(&e->u.rb, &ubi->free);
+	ubi->free_count--;
+out:
+	return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ */
+static int __wl_get_peb(struct ubi_device *ubi)
+{
+	int err;
+	struct ubi_wl_entry *e;
+
+retry:
+	if (!ubi->free.rb_node) {
+		if (ubi->works_count == 0) {
+			ubi_err("no free eraseblocks");
+			ubi_assert(list_empty(&ubi->works));
+			return -ENOSPC;
+		}
+
+		err = produce_free_peb(ubi);
+		if (err < 0)
+			return err;
+		goto retry;
+	}
+
+	e = find_mean_wl_entry(ubi, &ubi->free);
+	if (!e) {
+		ubi_err("no free eraseblocks");
+		return -ENOSPC;
+	}
+
+	self_check_in_wl_tree(ubi, e, &ubi->free);
+
+	/*
+	 * Move the physical eraseblock to the protection queue where it will
+	 * be protected from being moved for some time.
+	 */
+	rb_erase(&e->u.rb, &ubi->free);
+	ubi->free_count--;
+	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+	/* We have to enqueue e only if fastmap is disabled,
+	 * is fastmap enabled prot_queue_add() will be called by
+	 * ubi_wl_get_peb() after removing e from the pool. */
+	prot_queue_add(ubi, e);
+#endif
+	return e->pnum;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+				    struct ubi_fm_pool *pool)
+{
+	int i;
+	struct ubi_wl_entry *e;
+
+	for (i = pool->used; i < pool->size; i++) {
+		e = ubi->lookuptbl[pool->pebs[i]];
+		wl_tree_add(e, &ubi->free);
+		ubi->free_count++;
+	}
+}
+
+/**
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+	struct ubi_wl_entry *e;
+	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+	return_unused_pool_pebs(ubi, pool);
+
+	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+		if (!ubi->free.rb_node ||
+		   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+			break;
+
+		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+		self_check_in_wl_tree(ubi, e, &ubi->free);
+		rb_erase(&e->u.rb, &ubi->free);
+		ubi->free_count--;
+
+		pool->pebs[pool->size] = e->pnum;
+	}
+	pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+	struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+	return_unused_pool_pebs(ubi, pool);
+
+	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+		pool->pebs[pool->size] = __wl_get_peb(ubi);
+		if (pool->pebs[pool->size] < 0)
+			break;
+	}
+	pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+	spin_lock(&ubi->wl_lock);
+	refill_wl_pool(ubi);
+	refill_wl_user_pool(ubi);
+	spin_unlock(&ubi->wl_lock);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+	int ret;
+	struct ubi_fm_pool *pool = &ubi->fm_pool;
+	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+	if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+	    wl_pool->used == wl_pool->size)
+		ubi_update_fastmap(ubi);
+
+	/* we got not a single free PEB */
+	if (!pool->size)
+		ret = -ENOSPC;
+	else {
+		spin_lock(&ubi->wl_lock);
+		ret = pool->pebs[pool->used++];
+		prot_queue_add(ubi, ubi->lookuptbl[ret]);
+		spin_unlock(&ubi->wl_lock);
+	}
+
+	return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+	int pnum;
+
+	if (pool->used == pool->size || !pool->size) {
+		/* We cannot update the fastmap here because this
+		 * function is called in atomic context.
+		 * Let's fail here and refill/update it as soon as possible. */
+#ifndef __UBOOT__
+		schedule_work(&ubi->fm_work);
+#else
+		/* In U-Boot we must call this directly */
+	        ubi_update_fastmap(ubi);
+#endif
+		return NULL;
+	} else {
+		pnum = pool->pebs[pool->used++];
+		return ubi->lookuptbl[pnum];
+	}
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+	struct ubi_wl_entry *e;
+
+	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+	self_check_in_wl_tree(ubi, e, &ubi->free);
+	rb_erase(&e->u.rb, &ubi->free);
+
+	return e;
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+	int peb, err;
+
+	spin_lock(&ubi->wl_lock);
+	peb = __wl_get_peb(ubi);
+	spin_unlock(&ubi->wl_lock);
+
+	err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+				    ubi->peb_size - ubi->vid_hdr_aloffset);
+	if (err) {
+		ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
+		return err;
+	}
+
+	return peb;
+}
+#endif
+
+/**
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock to remove
  *
- * This function returns PEB @pnum from the protection trees and returns zero
- * in case of success and %-ENODEV if the PEB was not found in the protection
- * trees.
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
  */
-static int prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
 {
-	struct rb_node *p;
-	struct ubi_wl_prot_entry *pe = NULL;
+	struct ubi_wl_entry *e;
 
-	p = ubi->prot.pnum.rb_node;
-	while (p) {
+	e = ubi->lookuptbl[pnum];
+	if (!e)
+		return -ENODEV;
 
-		pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
+	if (self_check_in_pq(ubi, e))
+		return -ENODEV;
 
-		if (pnum == pe->e->pnum)
-			goto found;
-
-		if (pnum < pe->e->pnum)
-			p = p->rb_left;
-		else
-			p = p->rb_right;
-	}
-
-	return -ENODEV;
-
-found:
-	ubi_assert(pe->e->pnum == pnum);
-	rb_erase(&pe->rb_aec, &ubi->prot.aec);
-	rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
-	kfree(pe);
+	list_del(&e->u.list);
+	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
 	return 0;
 }
 
@@ -574,7 +729,8 @@
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+		      int torture)
 {
 	int err;
 	struct ubi_ec_hdr *ec_hdr;
@@ -582,8 +738,8 @@
 
 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
 
-	err = paranoid_check_ec(ubi, e->pnum, e->ec);
-	if (err > 0)
+	err = self_check_ec(ubi, e->pnum, e->ec);
+	if (err)
 		return -EINVAL;
 
 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -626,48 +782,74 @@
 }
 
 /**
- * check_protection_over - check if it is time to stop protecting some
- * physical eraseblocks.
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
  * @ubi: UBI device description object
  *
- * This function is called after each erase operation, when the absolute erase
- * counter is incremented, to check if some physical eraseblock  have not to be
- * protected any longer. These physical eraseblocks are moved from the
- * protection trees to the used tree.
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
  */
-static void check_protection_over(struct ubi_device *ubi)
+static void serve_prot_queue(struct ubi_device *ubi)
 {
-	struct ubi_wl_prot_entry *pe;
+	struct ubi_wl_entry *e, *tmp;
+	int count;
 
 	/*
 	 * There may be several protected physical eraseblock to remove,
 	 * process them all.
 	 */
-	while (1) {
-		spin_lock(&ubi->wl_lock);
-		if (!ubi->prot.aec.rb_node) {
+repeat:
+	count = 0;
+	spin_lock(&ubi->wl_lock);
+	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+		dbg_wl("PEB %d EC %d protection over, move to used tree",
+			e->pnum, e->ec);
+
+		list_del(&e->u.list);
+		wl_tree_add(e, &ubi->used);
+		if (count++ > 32) {
+			/*
+			 * Let's be nice and avoid holding the spinlock for
+			 * too long.
+			 */
 			spin_unlock(&ubi->wl_lock);
-			break;
+			cond_resched();
+			goto repeat;
 		}
-
-		pe = rb_entry(rb_first(&ubi->prot.aec),
-			      struct ubi_wl_prot_entry, rb_aec);
-
-		if (pe->abs_ec > ubi->abs_ec) {
-			spin_unlock(&ubi->wl_lock);
-			break;
-		}
-
-		dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
-		       pe->e->pnum, ubi->abs_ec, pe->abs_ec);
-		rb_erase(&pe->rb_aec, &ubi->prot.aec);
-		rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
-		wl_tree_add(pe->e, &ubi->used);
-		spin_unlock(&ubi->wl_lock);
-
-		kfree(pe);
-		cond_resched();
 	}
+
+	ubi->pq_head += 1;
+	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+		ubi->pq_head = 0;
+	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
+	spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * __schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list. Can only be used of ubi->work_sem is already held in read mode!
+ */
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+	spin_lock(&ubi->wl_lock);
+	list_add_tail(&wrk->list, &ubi->works);
+	ubi_assert(ubi->works_count >= 0);
+	ubi->works_count += 1;
+#ifndef __UBOOT__
+	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
+		wake_up_process(ubi->bgt_thread);
+#else
+	/*
+	 * U-Boot special: We have no bgt_thread in U-Boot!
+	 * So just call do_work() here directly.
+	 */
+	do_work(ubi);
+#endif
+	spin_unlock(&ubi->wl_lock);
 }
 
 /**
@@ -675,42 +857,49 @@
  * @ubi: UBI device description object
  * @wrk: the work to schedule
  *
- * This function enqueues a work defined by @wrk to the tail of the pending
- * works list.
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
  */
 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
 {
-	spin_lock(&ubi->wl_lock);
-	list_add_tail(&wrk->list, &ubi->works);
-	ubi_assert(ubi->works_count >= 0);
-	ubi->works_count += 1;
-
-	/*
-	 * U-Boot special: We have no bgt_thread in U-Boot!
-	 * So just call do_work() here directly.
-	 */
-	do_work(ubi);
-
-	spin_unlock(&ubi->wl_lock);
+	down_read(&ubi->work_sem);
+	__schedule_ubi_work(ubi, wrk);
+	up_read(&ubi->work_sem);
 }
 
 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 			int cancel);
 
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+	return wrk->func == erase_worker;
+}
+#endif
+
 /**
  * schedule_erase - schedule an erase work.
  * @ubi: UBI device description object
  * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
  * @torture: if the physical eraseblock has to be tortured
  *
  * This function returns zero in case of success and a %-ENOMEM in case of
  * failure.
  */
 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
-			  int torture)
+			  int vol_id, int lnum, int torture)
 {
 	struct ubi_work *wl_wrk;
 
+	ubi_assert(e);
+	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
 	       e->pnum, e->ec, torture);
 
@@ -720,6 +909,8 @@
 
 	wl_wrk->func = &erase_worker;
 	wl_wrk->e = e;
+	wl_wrk->vol_id = vol_id;
+	wl_wrk->lnum = lnum;
 	wl_wrk->torture = torture;
 
 	schedule_ubi_work(ubi, wl_wrk);
@@ -727,6 +918,79 @@
 }
 
 /**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+			 int vol_id, int lnum, int torture)
+{
+	struct ubi_work *wl_wrk;
+
+	dbg_wl("sync erase of PEB %i", e->pnum);
+
+	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+	if (!wl_wrk)
+		return -ENOMEM;
+
+	wl_wrk->e = e;
+	wl_wrk->vol_id = vol_id;
+	wl_wrk->lnum = lnum;
+	wl_wrk->torture = torture;
+
+	return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+		      int lnum, int torture)
+{
+	struct ubi_wl_entry *e;
+	int vol_id, pnum = fm_e->pnum;
+
+	dbg_wl("PEB %d", pnum);
+
+	ubi_assert(pnum >= 0);
+	ubi_assert(pnum < ubi->peb_count);
+
+	spin_lock(&ubi->wl_lock);
+	e = ubi->lookuptbl[pnum];
+
+	/* This can happen if we recovered from a fastmap the very
+	 * first time and writing now a new one. In this case the wl system
+	 * has never seen any PEB used by the original fastmap.
+	 */
+	if (!e) {
+		e = fm_e;
+		ubi_assert(e->ec >= 0);
+		ubi->lookuptbl[pnum] = e;
+	} else {
+		e->ec = fm_e->ec;
+		kfree(fm_e);
+	}
+
+	spin_unlock(&ubi->wl_lock);
+
+	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+	return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
  * wear_leveling_worker - wear-leveling worker function.
  * @ubi: UBI device description object
  * @wrk: the work object
@@ -739,13 +1003,15 @@
 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 				int cancel)
 {
-	int err, put = 0, scrubbing = 0, protect = 0;
-	struct ubi_wl_prot_entry *uninitialized_var(pe);
+	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+	int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	int anchor = wrk->anchor;
+#endif
 	struct ubi_wl_entry *e1, *e2;
 	struct ubi_vid_hdr *vid_hdr;
 
 	kfree(wrk);
-
 	if (cancel)
 		return 0;
 
@@ -775,36 +1041,61 @@
 		goto out_cancel;
 	}
 
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	/* Check whether we need to produce an anchor PEB */
+	if (!anchor)
+		anchor = !anchor_pebs_avalible(&ubi->free);
+
+	if (anchor) {
+		e1 = find_anchor_wl_entry(&ubi->used);
+		if (!e1)
+			goto out_cancel;
+		e2 = get_peb_for_wl(ubi);
+		if (!e2)
+			goto out_cancel;
+
+		self_check_in_wl_tree(ubi, e1, &ubi->used);
+		rb_erase(&e1->u.rb, &ubi->used);
+		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+	} else if (!ubi->scrub.rb_node) {
+#else
 	if (!ubi->scrub.rb_node) {
+#endif
 		/*
 		 * Now pick the least worn-out used physical eraseblock and a
 		 * highly worn-out free physical eraseblock. If the erase
 		 * counters differ much enough, start wear-leveling.
 		 */
-		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
-		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+		e2 = get_peb_for_wl(ubi);
+		if (!e2)
+			goto out_cancel;
 
 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
 			       e1->ec, e2->ec);
+
+			/* Give the unused PEB back */
+			wl_tree_add(e2, &ubi->free);
 			goto out_cancel;
 		}
-		paranoid_check_in_wl_tree(e1, &ubi->used);
-		rb_erase(&e1->rb, &ubi->used);
+		self_check_in_wl_tree(ubi, e1, &ubi->used);
+		rb_erase(&e1->u.rb, &ubi->used);
 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
 	} else {
 		/* Perform scrubbing */
 		scrubbing = 1;
-		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
-		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
-		paranoid_check_in_wl_tree(e1, &ubi->scrub);
-		rb_erase(&e1->rb, &ubi->scrub);
+		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
+		e2 = get_peb_for_wl(ubi);
+		if (!e2)
+			goto out_cancel;
+
+		self_check_in_wl_tree(ubi, e1, &ubi->scrub);
+		rb_erase(&e1->u.rb, &ubi->scrub);
 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
 	}
 
-	paranoid_check_in_wl_tree(e2, &ubi->free);
-	rb_erase(&e2->rb, &ubi->free);
 	ubi->move_from = e1;
 	ubi->move_to = e2;
 	spin_unlock(&ubi->wl_lock);
@@ -822,81 +1113,127 @@
 
 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
 	if (err && err != UBI_IO_BITFLIPS) {
-		if (err == UBI_IO_PEB_FREE) {
+		if (err == UBI_IO_FF) {
 			/*
 			 * We are trying to move PEB without a VID header. UBI
 			 * always write VID headers shortly after the PEB was
-			 * given, so we have a situation when it did not have
-			 * chance to write it down because it was preempted.
-			 * Just re-schedule the work, so that next time it will
-			 * likely have the VID header in place.
+			 * given, so we have a situation when it has not yet
+			 * had a chance to write it, because it was preempted.
+			 * So add this PEB to the protection queue so far,
+			 * because presumably more data will be written there
+			 * (including the missing VID header), and then we'll
+			 * move it.
 			 */
 			dbg_wl("PEB %d has no VID header", e1->pnum);
+			protect = 1;
+			goto out_not_moved;
+		} else if (err == UBI_IO_FF_BITFLIPS) {
+			/*
+			 * The same situation as %UBI_IO_FF, but bit-flips were
+			 * detected. It is better to schedule this PEB for
+			 * scrubbing.
+			 */
+			dbg_wl("PEB %d has no VID header but has bit-flips",
+			       e1->pnum);
+			scrubbing = 1;
 			goto out_not_moved;
 		}
 
 		ubi_err("error %d while reading VID header from PEB %d",
 			err, e1->pnum);
-		if (err > 0)
-			err = -EIO;
 		goto out_error;
 	}
 
+	vol_id = be32_to_cpu(vid_hdr->vol_id);
+	lnum = be32_to_cpu(vid_hdr->lnum);
+
 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
 	if (err) {
+		if (err == MOVE_CANCEL_RACE) {
+			/*
+			 * The LEB has not been moved because the volume is
+			 * being deleted or the PEB has been put meanwhile. We
+			 * should prevent this PEB from being selected for
+			 * wear-leveling movement again, so put it to the
+			 * protection queue.
+			 */
+			protect = 1;
+			goto out_not_moved;
+		}
+		if (err == MOVE_RETRY) {
+			scrubbing = 1;
+			goto out_not_moved;
+		}
+		if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+		    err == MOVE_TARGET_RD_ERR) {
+			/*
+			 * Target PEB had bit-flips or write error - torture it.
+			 */
+			torture = 1;
+			goto out_not_moved;
+		}
+
+		if (err == MOVE_SOURCE_RD_ERR) {
+			/*
+			 * An error happened while reading the source PEB. Do
+			 * not switch to R/O mode in this case, and give the
+			 * upper layers a possibility to recover from this,
+			 * e.g. by unmapping corresponding LEB. Instead, just
+			 * put this PEB to the @ubi->erroneous list to prevent
+			 * UBI from trying to move it over and over again.
+			 */
+			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+				ubi_err("too many erroneous eraseblocks (%d)",
+					ubi->erroneous_peb_count);
+				goto out_error;
+			}
+			erroneous = 1;
+			goto out_not_moved;
+		}
 
 		if (err < 0)
 			goto out_error;
-		if (err == 1)
-			goto out_not_moved;
 
-		/*
-		 * For some reason the LEB was not moved - it might be because
-		 * the volume is being deleted. We should prevent this PEB from
-		 * being selected for wear-levelling movement for some "time",
-		 * so put it to the protection tree.
-		 */
-
-		dbg_wl("cancelled moving PEB %d", e1->pnum);
-		pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
-		if (!pe) {
-			err = -ENOMEM;
-			goto out_error;
-		}
-
-		protect = 1;
+		ubi_assert(0);
 	}
 
+	/* The PEB has been successfully moved */
+	if (scrubbing)
+		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+			e1->pnum, vol_id, lnum, e2->pnum);
 	ubi_free_vid_hdr(ubi, vid_hdr);
+
 	spin_lock(&ubi->wl_lock);
-	if (protect)
-		prot_tree_add(ubi, e1, pe, protect);
-	if (!ubi->move_to_put)
+	if (!ubi->move_to_put) {
 		wl_tree_add(e2, &ubi->used);
-	else
-		put = 1;
+		e2 = NULL;
+	}
 	ubi->move_from = ubi->move_to = NULL;
 	ubi->move_to_put = ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
-	if (put) {
+	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+	if (err) {
+		kmem_cache_free(ubi_wl_entry_slab, e1);
+		if (e2)
+			kmem_cache_free(ubi_wl_entry_slab, e2);
+		goto out_ro;
+	}
+
+	if (e2) {
 		/*
 		 * Well, the target PEB was put meanwhile, schedule it for
 		 * erasure.
 		 */
-		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
-		err = schedule_erase(ubi, e2, 0);
-		if (err)
-			goto out_error;
+		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+		       e2->pnum, vol_id, lnum);
+		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+		if (err) {
+			kmem_cache_free(ubi_wl_entry_slab, e2);
+			goto out_ro;
+		}
 	}
 
-	if (!protect) {
-		err = schedule_erase(ubi, e1, 0);
-		if (err)
-			goto out_error;
-	}
-
-
 	dbg_wl("done");
 	mutex_unlock(&ubi->move_mutex);
 	return 0;
@@ -904,42 +1241,60 @@
 	/*
 	 * For some reasons the LEB was not moved, might be an error, might be
 	 * something else. @e1 was not changed, so return it back. @e2 might
-	 * be changed, schedule it for erasure.
+	 * have been changed, schedule it for erasure.
 	 */
 out_not_moved:
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	if (vol_id != -1)
+		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+		       e1->pnum, vol_id, lnum, e2->pnum, err);
+	else
+		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+		       e1->pnum, e2->pnum, err);
 	spin_lock(&ubi->wl_lock);
-	if (scrubbing)
+	if (protect)
+		prot_queue_add(ubi, e1);
+	else if (erroneous) {
+		wl_tree_add(e1, &ubi->erroneous);
+		ubi->erroneous_peb_count += 1;
+	} else if (scrubbing)
 		wl_tree_add(e1, &ubi->scrub);
 	else
 		wl_tree_add(e1, &ubi->used);
+	ubi_assert(!ubi->move_to_put);
 	ubi->move_from = ubi->move_to = NULL;
-	ubi->move_to_put = ubi->wl_scheduled = 0;
+	ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
-	err = schedule_erase(ubi, e2, 0);
-	if (err)
-		goto out_error;
-
+	ubi_free_vid_hdr(ubi, vid_hdr);
+	err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+	if (err) {
+		kmem_cache_free(ubi_wl_entry_slab, e2);
+		goto out_ro;
+	}
 	mutex_unlock(&ubi->move_mutex);
 	return 0;
 
 out_error:
-	ubi_err("error %d while moving PEB %d to PEB %d",
-		err, e1->pnum, e2->pnum);
-
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	if (vol_id != -1)
+		ubi_err("error %d while moving PEB %d to PEB %d",
+			err, e1->pnum, e2->pnum);
+	else
+		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+			err, e1->pnum, vol_id, lnum, e2->pnum);
 	spin_lock(&ubi->wl_lock);
 	ubi->move_from = ubi->move_to = NULL;
 	ubi->move_to_put = ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
+	ubi_free_vid_hdr(ubi, vid_hdr);
 	kmem_cache_free(ubi_wl_entry_slab, e1);
 	kmem_cache_free(ubi_wl_entry_slab, e2);
-	ubi_ro_mode(ubi);
 
+out_ro:
+	ubi_ro_mode(ubi);
 	mutex_unlock(&ubi->move_mutex);
-	return err;
+	ubi_assert(err != 0);
+	return err < 0 ? err : -EIO;
 
 out_cancel:
 	ubi->wl_scheduled = 0;
@@ -952,12 +1307,13 @@
 /**
  * ensure_wear_leveling - schedule wear-leveling if it is needed.
  * @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
  *
  * This function checks if it is time to start wear-leveling and schedules it
  * if yes. This function returns zero in case of success and a negative error
  * code in case of failure.
  */
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
 {
 	int err = 0;
 	struct ubi_wl_entry *e1;
@@ -981,11 +1337,11 @@
 		/*
 		 * We schedule wear-leveling only if the difference between the
 		 * lowest erase counter of used physical eraseblocks and a high
-		 * erase counter of free physical eraseblocks is greater then
+		 * erase counter of free physical eraseblocks is greater than
 		 * %UBI_WL_THRESHOLD.
 		 */
-		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
-		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+		e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
 
 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
 			goto out_unlock;
@@ -1002,8 +1358,12 @@
 		goto out_cancel;
 	}
 
+	wrk->anchor = 0;
 	wrk->func = &wear_leveling_worker;
-	schedule_ubi_work(ubi, wrk);
+	if (nested)
+		__schedule_ubi_work(ubi, wrk);
+	else
+		schedule_ubi_work(ubi, wrk);
 	return err;
 
 out_cancel:
@@ -1014,6 +1374,38 @@
 	return err;
 }
 
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+	struct ubi_work *wrk;
+
+	spin_lock(&ubi->wl_lock);
+	if (ubi->wl_scheduled) {
+		spin_unlock(&ubi->wl_lock);
+		return 0;
+	}
+	ubi->wl_scheduled = 1;
+	spin_unlock(&ubi->wl_lock);
+
+	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+	if (!wrk) {
+		spin_lock(&ubi->wl_lock);
+		ubi->wl_scheduled = 0;
+		spin_unlock(&ubi->wl_lock);
+		return -ENOMEM;
+	}
+
+	wrk->anchor = 1;
+	wrk->func = &wear_leveling_worker;
+	schedule_ubi_work(ubi, wrk);
+	return 0;
+}
+#endif
+
 /**
  * erase_worker - physical eraseblock erase worker function.
  * @ubi: UBI device description object
@@ -1029,7 +1421,10 @@
 			int cancel)
 {
 	struct ubi_wl_entry *e = wl_wrk->e;
-	int pnum = e->pnum, err, need;
+	int pnum = e->pnum;
+	int vol_id = wl_wrk->vol_id;
+	int lnum = wl_wrk->lnum;
+	int err, available_consumed = 0;
 
 	if (cancel) {
 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1038,7 +1433,10 @@
 		return 0;
 	}
 
-	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+	dbg_wl("erase PEB %d EC %d LEB %d:%d",
+	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 
 	err = sync_erase(ubi, e, wl_wrk->torture);
 	if (!err) {
@@ -1046,44 +1444,45 @@
 		kfree(wl_wrk);
 
 		spin_lock(&ubi->wl_lock);
-		ubi->abs_ec += 1;
 		wl_tree_add(e, &ubi->free);
+		ubi->free_count++;
 		spin_unlock(&ubi->wl_lock);
 
 		/*
-		 * One more erase operation has happened, take care about protected
-		 * physical eraseblocks.
+		 * One more erase operation has happened, take care about
+		 * protected physical eraseblocks.
 		 */
-		check_protection_over(ubi);
+		serve_prot_queue(ubi);
 
 		/* And take care about wear-leveling */
-		err = ensure_wear_leveling(ubi);
+		err = ensure_wear_leveling(ubi, 1);
 		return err;
 	}
 
 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
 	kfree(wl_wrk);
-	kmem_cache_free(ubi_wl_entry_slab, e);
 
 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
 	    err == -EBUSY) {
 		int err1;
 
 		/* Re-schedule the LEB for erasure */
-		err1 = schedule_erase(ubi, e, 0);
+		err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
 		if (err1) {
 			err = err1;
 			goto out_ro;
 		}
 		return err;
-	} else if (err != -EIO) {
+	}
+
+	kmem_cache_free(ubi_wl_entry_slab, e);
+	if (err != -EIO)
 		/*
 		 * If this is not %-EIO, we have no idea what to do. Scheduling
 		 * this physical eraseblock for erasure again would cause
-		 * errors again and again. Well, lets switch to RO mode.
+		 * errors again and again. Well, lets switch to R/O mode.
 		 */
 		goto out_ro;
-	}
 
 	/* It is %-EIO, the PEB went bad */
 
@@ -1093,48 +1492,62 @@
 	}
 
 	spin_lock(&ubi->volumes_lock);
-	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
-	if (need > 0) {
-		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
-		ubi->avail_pebs -= need;
-		ubi->rsvd_pebs += need;
-		ubi->beb_rsvd_pebs += need;
-		if (need > 0)
-			ubi_msg("reserve more %d PEBs", need);
-	}
-
 	if (ubi->beb_rsvd_pebs == 0) {
-		spin_unlock(&ubi->volumes_lock);
-		ubi_err("no reserved physical eraseblocks");
-		goto out_ro;
+		if (ubi->avail_pebs == 0) {
+			spin_unlock(&ubi->volumes_lock);
+			ubi_err("no reserved/available physical eraseblocks");
+			goto out_ro;
+		}
+		ubi->avail_pebs -= 1;
+		available_consumed = 1;
 	}
-
 	spin_unlock(&ubi->volumes_lock);
-	ubi_msg("mark PEB %d as bad", pnum);
 
+	ubi_msg("mark PEB %d as bad", pnum);
 	err = ubi_io_mark_bad(ubi, pnum);
 	if (err)
 		goto out_ro;
 
 	spin_lock(&ubi->volumes_lock);
-	ubi->beb_rsvd_pebs -= 1;
+	if (ubi->beb_rsvd_pebs > 0) {
+		if (available_consumed) {
+			/*
+			 * The amount of reserved PEBs increased since we last
+			 * checked.
+			 */
+			ubi->avail_pebs += 1;
+			available_consumed = 0;
+		}
+		ubi->beb_rsvd_pebs -= 1;
+	}
 	ubi->bad_peb_count += 1;
 	ubi->good_peb_count -= 1;
 	ubi_calculate_reserved(ubi);
-	if (ubi->beb_rsvd_pebs == 0)
-		ubi_warn("last PEB from the reserved pool was used");
+	if (available_consumed)
+		ubi_warn("no PEBs in the reserved pool, used an available PEB");
+	else if (ubi->beb_rsvd_pebs)
+		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+	else
+		ubi_warn("last PEB from the reserve was used");
 	spin_unlock(&ubi->volumes_lock);
 
 	return err;
 
 out_ro:
+	if (available_consumed) {
+		spin_lock(&ubi->volumes_lock);
+		ubi->avail_pebs += 1;
+		spin_unlock(&ubi->volumes_lock);
+	}
 	ubi_ro_mode(ubi);
 	return err;
 }
 
 /**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
  * @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
  * @pnum: physical eraseblock to return
  * @torture: if this physical eraseblock has to be tortured
  *
@@ -1143,7 +1556,8 @@
  * occurred to this @pnum and it has to be tested. This function returns zero
  * in case of success, and a negative error code in case of failure.
  */
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+		   int pnum, int torture)
 {
 	int err;
 	struct ubi_wl_entry *e;
@@ -1172,11 +1586,11 @@
 		/*
 		 * User is putting the physical eraseblock which was selected
 		 * as the target the data is moved to. It may happen if the EBA
-		 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
-		 * the WL unit has not put the PEB to the "used" tree yet, but
-		 * it is about to do this. So we just set a flag which will
-		 * tell the WL worker that the PEB is not needed anymore and
-		 * should be scheduled for erasure.
+		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+		 * but the WL sub-system has not put the PEB to the "used" tree
+		 * yet, but it is about to do this. So we just set a flag which
+		 * will tell the WL worker that the PEB is not needed anymore
+		 * and should be scheduled for erasure.
 		 */
 		dbg_wl("PEB %d is the target of data moving", pnum);
 		ubi_assert(!ubi->move_to_put);
@@ -1185,13 +1599,20 @@
 		return 0;
 	} else {
 		if (in_wl_tree(e, &ubi->used)) {
-			paranoid_check_in_wl_tree(e, &ubi->used);
-			rb_erase(&e->rb, &ubi->used);
+			self_check_in_wl_tree(ubi, e, &ubi->used);
+			rb_erase(&e->u.rb, &ubi->used);
 		} else if (in_wl_tree(e, &ubi->scrub)) {
-			paranoid_check_in_wl_tree(e, &ubi->scrub);
-			rb_erase(&e->rb, &ubi->scrub);
+			self_check_in_wl_tree(ubi, e, &ubi->scrub);
+			rb_erase(&e->u.rb, &ubi->scrub);
+		} else if (in_wl_tree(e, &ubi->erroneous)) {
+			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
+			rb_erase(&e->u.rb, &ubi->erroneous);
+			ubi->erroneous_peb_count -= 1;
+			ubi_assert(ubi->erroneous_peb_count >= 0);
+			/* Erroneous PEBs should be tortured */
+			torture = 1;
 		} else {
-			err = prot_tree_del(ubi, e->pnum);
+			err = prot_queue_del(ubi, e->pnum);
 			if (err) {
 				ubi_err("PEB %d not found", pnum);
 				ubi_ro_mode(ubi);
@@ -1202,7 +1623,7 @@
 	}
 	spin_unlock(&ubi->wl_lock);
 
-	err = schedule_erase(ubi, e, torture);
+	err = schedule_erase(ubi, e, vol_id, lnum, torture);
 	if (err) {
 		spin_lock(&ubi->wl_lock);
 		wl_tree_add(e, &ubi->used);
@@ -1231,7 +1652,8 @@
 retry:
 	spin_lock(&ubi->wl_lock);
 	e = ubi->lookuptbl[pnum];
-	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+				   in_wl_tree(e, &ubi->erroneous)) {
 		spin_unlock(&ubi->wl_lock);
 		return 0;
 	}
@@ -1250,12 +1672,12 @@
 	}
 
 	if (in_wl_tree(e, &ubi->used)) {
-		paranoid_check_in_wl_tree(e, &ubi->used);
-		rb_erase(&e->rb, &ubi->used);
+		self_check_in_wl_tree(ubi, e, &ubi->used);
+		rb_erase(&e->u.rb, &ubi->used);
 	} else {
 		int err;
 
-		err = prot_tree_del(ubi, e->pnum);
+		err = prot_queue_del(ubi, e->pnum);
 		if (err) {
 			ubi_err("PEB %d not found", pnum);
 			ubi_ro_mode(ubi);
@@ -1271,29 +1693,60 @@
 	 * Technically scrubbing is the same as wear-leveling, so it is done
 	 * by the WL worker.
 	 */
-	return ensure_wear_leveling(ubi);
+	return ensure_wear_leveling(ubi, 0);
 }
 
 /**
  * ubi_wl_flush - flush all pending works.
  * @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
  *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
  */
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	int err;
+	int err = 0;
+	int found = 1;
 
 	/*
-	 * Erase while the pending works queue is not empty, but not more then
+	 * Erase while the pending works queue is not empty, but not more than
 	 * the number of currently pending works.
 	 */
-	dbg_wl("flush (%d pending works)", ubi->works_count);
-	while (ubi->works_count) {
-		err = do_work(ubi);
-		if (err)
-			return err;
+	dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+	       vol_id, lnum, ubi->works_count);
+
+	while (found) {
+		struct ubi_work *wrk;
+		found = 0;
+
+		down_read(&ubi->work_sem);
+		spin_lock(&ubi->wl_lock);
+		list_for_each_entry(wrk, &ubi->works, list) {
+			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+			    (lnum == UBI_ALL || wrk->lnum == lnum)) {
+				list_del(&wrk->list);
+				ubi->works_count -= 1;
+				ubi_assert(ubi->works_count >= 0);
+				spin_unlock(&ubi->wl_lock);
+
+				err = wrk->func(ubi, wrk, 0);
+				if (err) {
+					up_read(&ubi->work_sem);
+					return err;
+				}
+
+				spin_lock(&ubi->wl_lock);
+				found = 1;
+				break;
+			}
+		}
+		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->work_sem);
 	}
 
 	/*
@@ -1303,18 +1756,7 @@
 	down_write(&ubi->work_sem);
 	up_write(&ubi->work_sem);
 
-	/*
-	 * And in case last was the WL worker and it cancelled the LEB
-	 * movement, flush again.
-	 */
-	while (ubi->works_count) {
-		dbg_wl("flush more (%d pending works)", ubi->works_count);
-		err = do_work(ubi);
-		if (err)
-			return err;
-	}
-
-	return 0;
+	return err;
 }
 
 /**
@@ -1333,11 +1775,11 @@
 		else if (rb->rb_right)
 			rb = rb->rb_right;
 		else {
-			e = rb_entry(rb, struct ubi_wl_entry, rb);
+			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
 
 			rb = rb_parent(rb);
 			if (rb) {
-				if (rb->rb_left == &e->rb)
+				if (rb->rb_left == &e->u.rb)
 					rb->rb_left = NULL;
 				else
 					rb->rb_right = NULL;
@@ -1372,7 +1814,7 @@
 
 		spin_lock(&ubi->wl_lock);
 		if (list_empty(&ubi->works) || ubi->ro_mode ||
-			       !ubi->thread_enabled) {
+		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			spin_unlock(&ubi->wl_lock);
 			schedule();
@@ -1392,7 +1834,8 @@
 				ubi_msg("%s: %d consecutive failures",
 					ubi->bgt_name, WL_MAX_FAILURES);
 				ubi_ro_mode(ubi);
-				break;
+				ubi->thread_enabled = 0;
+				continue;
 			}
 		} else
 			failures = 0;
@@ -1422,30 +1865,32 @@
 }
 
 /**
- * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
- * information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns zero in case of success, and a negative error code in
  * case of failure.
  */
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
-	int err;
+	int err, i, reserved_pebs, found_pebs = 0;
 	struct rb_node *rb1, *rb2;
-	struct ubi_scan_volume *sv;
-	struct ubi_scan_leb *seb, *tmp;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb, *tmp;
 	struct ubi_wl_entry *e;
 
-
-	ubi->used = ubi->free = ubi->scrub = RB_ROOT;
-	ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
+	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
 	spin_lock_init(&ubi->wl_lock);
 	mutex_init(&ubi->move_mutex);
 	init_rwsem(&ubi->work_sem);
-	ubi->max_ec = si->max_ec;
+	ubi->max_ec = ai->max_ec;
 	INIT_LIST_HEAD(&ubi->works);
+#ifndef __UBOOT__
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
+#endif
 
 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
 
@@ -1454,64 +1899,63 @@
 	if (!ubi->lookuptbl)
 		return err;
 
-	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+		INIT_LIST_HEAD(&ubi->pq[i]);
+	ubi->pq_head = 0;
+
+	list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
 		cond_resched();
 
 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
-		e->pnum = seb->pnum;
-		e->ec = seb->ec;
+		e->pnum = aeb->pnum;
+		e->ec = aeb->ec;
+		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 		ubi->lookuptbl[e->pnum] = e;
-		if (schedule_erase(ubi, e, 0)) {
+		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
 			kmem_cache_free(ubi_wl_entry_slab, e);
 			goto out_free;
 		}
+
+		found_pebs++;
 	}
 
-	list_for_each_entry(seb, &si->free, u.list) {
+	ubi->free_count = 0;
+	list_for_each_entry(aeb, &ai->free, u.list) {
 		cond_resched();
 
 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
-		e->pnum = seb->pnum;
-		e->ec = seb->ec;
+		e->pnum = aeb->pnum;
+		e->ec = aeb->ec;
 		ubi_assert(e->ec >= 0);
+		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
 		wl_tree_add(e, &ubi->free);
+		ubi->free_count++;
+
 		ubi->lookuptbl[e->pnum] = e;
+
+		found_pebs++;
 	}
 
-	list_for_each_entry(seb, &si->corr, u.list) {
-		cond_resched();
-
-		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
-		if (!e)
-			goto out_free;
-
-		e->pnum = seb->pnum;
-		e->ec = seb->ec;
-		ubi->lookuptbl[e->pnum] = e;
-		if (schedule_erase(ubi, e, 0)) {
-			kmem_cache_free(ubi_wl_entry_slab, e);
-			goto out_free;
-		}
-	}
-
-	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
 			cond_resched();
 
 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 			if (!e)
 				goto out_free;
 
-			e->pnum = seb->pnum;
-			e->ec = seb->ec;
+			e->pnum = aeb->pnum;
+			e->ec = aeb->ec;
 			ubi->lookuptbl[e->pnum] = e;
-			if (!seb->scrub) {
+
+			if (!aeb->scrub) {
 				dbg_wl("add PEB %d EC %d to the used tree",
 				       e->pnum, e->ec);
 				wl_tree_add(e, &ubi->used);
@@ -1520,20 +1964,38 @@
 				       e->pnum, e->ec);
 				wl_tree_add(e, &ubi->scrub);
 			}
+
+			found_pebs++;
 		}
 	}
 
-	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+	dbg_wl("found %i PEBs", found_pebs);
+
+	if (ubi->fm)
+		ubi_assert(ubi->good_peb_count == \
+			   found_pebs + ubi->fm->used_blocks);
+	else
+		ubi_assert(ubi->good_peb_count == found_pebs);
+
+	reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+	/* Reserve enough LEBs to store two fastmaps. */
+	reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
+#endif
+
+	if (ubi->avail_pebs < reserved_pebs) {
 		ubi_err("no enough physical eraseblocks (%d, need %d)",
-			ubi->avail_pebs, WL_RESERVED_PEBS);
-		err = -ENOSPC;
+			ubi->avail_pebs, reserved_pebs);
+		if (ubi->corr_peb_count)
+			ubi_err("%d PEBs are corrupted and not used",
+				ubi->corr_peb_count);
 		goto out_free;
 	}
-	ubi->avail_pebs -= WL_RESERVED_PEBS;
-	ubi->rsvd_pebs += WL_RESERVED_PEBS;
+	ubi->avail_pebs -= reserved_pebs;
+	ubi->rsvd_pebs += reserved_pebs;
 
 	/* Schedule wear-leveling if needed */
-	err = ensure_wear_leveling(ubi);
+	err = ensure_wear_leveling(ubi, 0);
 	if (err)
 		goto out_free;
 
@@ -1549,72 +2011,57 @@
 }
 
 /**
- * protection_trees_destroy - destroy the protection RB-trees.
+ * protection_queue_destroy - destroy the protection queue.
  * @ubi: UBI device description object
  */
-static void protection_trees_destroy(struct ubi_device *ubi)
+static void protection_queue_destroy(struct ubi_device *ubi)
 {
-	struct rb_node *rb;
-	struct ubi_wl_prot_entry *pe;
+	int i;
+	struct ubi_wl_entry *e, *tmp;
 
-	rb = ubi->prot.aec.rb_node;
-	while (rb) {
-		if (rb->rb_left)
-			rb = rb->rb_left;
-		else if (rb->rb_right)
-			rb = rb->rb_right;
-		else {
-			pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
-
-			rb = rb_parent(rb);
-			if (rb) {
-				if (rb->rb_left == &pe->rb_aec)
-					rb->rb_left = NULL;
-				else
-					rb->rb_right = NULL;
-			}
-
-			kmem_cache_free(ubi_wl_entry_slab, pe->e);
-			kfree(pe);
+	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+			list_del(&e->u.list);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 		}
 	}
 }
 
 /**
- * ubi_wl_close - close the wear-leveling unit.
+ * ubi_wl_close - close the wear-leveling sub-system.
  * @ubi: UBI device description object
  */
 void ubi_wl_close(struct ubi_device *ubi)
 {
-	dbg_wl("close the UBI wear-leveling unit");
-
+	dbg_wl("close the WL sub-system");
 	cancel_pending(ubi);
-	protection_trees_destroy(ubi);
+	protection_queue_destroy(ubi);
 	tree_destroy(&ubi->used);
+	tree_destroy(&ubi->erroneous);
 	tree_destroy(&ubi->free);
 	tree_destroy(&ubi->scrub);
 	kfree(ubi->lookuptbl);
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
 /**
- * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
- * is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  * @ec: the erase counter to check
  *
  * This function returns zero if the erase counter of physical eraseblock @pnum
- * is equivalent to @ec, %1 if not, and a negative error code if an error
+ * is equivalent to @ec, and a negative error code if not or if an error
  * occurred.
  */
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
 {
 	int err;
 	long long read_ec;
 	struct ubi_ec_hdr *ec_hdr;
 
+	if (!ubi_dbg_chk_gen(ubi))
+		return 0;
+
 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
 	if (!ec_hdr)
 		return -ENOMEM;
@@ -1627,10 +2074,10 @@
 	}
 
 	read_ec = be64_to_cpu(ec_hdr->ec);
-	if (ec != read_ec) {
-		ubi_err("paranoid check failed for PEB %d", pnum);
+	if (ec != read_ec && read_ec - ec > 1) {
+		ubi_err("self-check failed for PEB %d", pnum);
 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
-		ubi_dbg_dump_stack();
+		dump_stack();
 		err = 1;
 	} else
 		err = 0;
@@ -1641,24 +2088,53 @@
 }
 
 /**
- * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
- * in a WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
  * @e: the wear-leveling entry to check
  * @root: the root of the tree
  *
- * This function returns zero if @e is in the @root RB-tree and %1 if it
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
  * is not.
  */
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
-				     struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+				 struct ubi_wl_entry *e, struct rb_root *root)
 {
+	if (!ubi_dbg_chk_gen(ubi))
+		return 0;
+
 	if (in_wl_tree(e, root))
 		return 0;
 
-	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+	ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
 		e->pnum, e->ec, root);
-	ubi_dbg_dump_stack();
-	return 1;
+	dump_stack();
+	return -EINVAL;
 }
 
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+/**
+ * self_check_in_pq - check if wear-leveling entry is in the protection
+ *                        queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
+ */
+static int self_check_in_pq(const struct ubi_device *ubi,
+			    struct ubi_wl_entry *e)
+{
+	struct ubi_wl_entry *p;
+	int i;
+
+	if (!ubi_dbg_chk_gen(ubi))
+		return 0;
+
+	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+		list_for_each_entry(p, &ubi->pq[i], u.list)
+			if (p == e)
+				return 0;
+
+	ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
+		e->pnum, e->ec);
+	dump_stack();
+	return -EINVAL;
+}