Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Simple MTD partitioning layer |
| 3 | * |
| 4 | * (C) 2000 Nicolas Pitre <nico@cam.org> |
| 5 | * |
| 6 | * This code is GPL |
| 7 | * |
| 8 | * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> |
| 9 | * added support for read_oob, write_oob |
| 10 | */ |
| 11 | |
| 12 | #include <common.h> |
| 13 | #include <malloc.h> |
| 14 | #include <asm/errno.h> |
| 15 | |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/mtd/mtd.h> |
| 19 | #include <linux/mtd/partitions.h> |
Mike Frysinger | 7b15e2b | 2012-04-09 13:39:55 +0000 | [diff] [blame] | 20 | #include <linux/compat.h> |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 21 | |
| 22 | /* Our partition linked list */ |
Stefan Roese | 9def12c | 2008-11-27 14:05:15 +0100 | [diff] [blame] | 23 | struct list_head mtd_partitions; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 24 | |
| 25 | /* Our partition node structure */ |
| 26 | struct mtd_part { |
| 27 | struct mtd_info mtd; |
| 28 | struct mtd_info *master; |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 29 | uint64_t offset; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 30 | int index; |
| 31 | struct list_head list; |
| 32 | int registered; |
| 33 | }; |
| 34 | |
| 35 | /* |
| 36 | * Given a pointer to the MTD object in the mtd_part structure, we can retrieve |
| 37 | * the pointer to that structure with this macro. |
| 38 | */ |
| 39 | #define PART(x) ((struct mtd_part *)(x)) |
| 40 | |
| 41 | |
| 42 | /* |
| 43 | * MTD methods which simply translate the effective address and pass through |
| 44 | * to the _real_ device. |
| 45 | */ |
| 46 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 47 | static int part_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 48 | size_t *retlen, u_char *buf) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 49 | { |
| 50 | struct mtd_part *part = PART(mtd); |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 51 | struct mtd_ecc_stats stats; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 52 | int res; |
| 53 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 54 | stats = part->master->ecc_stats; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 55 | res = mtd_read(part->master, from + part->offset, len, retlen, buf); |
Paul Burton | 40462e5 | 2013-09-04 15:16:56 +0100 | [diff] [blame] | 56 | if (unlikely(mtd_is_eccerr(res))) |
| 57 | mtd->ecc_stats.failed += |
| 58 | part->master->ecc_stats.failed - stats.failed; |
| 59 | else |
| 60 | mtd->ecc_stats.corrected += |
| 61 | part->master->ecc_stats.corrected - stats.corrected; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 62 | return res; |
| 63 | } |
| 64 | |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 65 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 66 | struct mtd_oob_ops *ops) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 67 | { |
| 68 | struct mtd_part *part = PART(mtd); |
| 69 | int res; |
| 70 | |
| 71 | if (from >= mtd->size) |
| 72 | return -EINVAL; |
| 73 | if (ops->datbuf && from + ops->len > mtd->size) |
| 74 | return -EINVAL; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 75 | res = mtd_read_oob(part->master, from + part->offset, ops); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 76 | |
| 77 | if (unlikely(res)) { |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 78 | if (mtd_is_bitflip(res)) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 79 | mtd->ecc_stats.corrected++; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 80 | if (mtd_is_eccerr(res)) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 81 | mtd->ecc_stats.failed++; |
| 82 | } |
| 83 | return res; |
| 84 | } |
| 85 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 86 | static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, |
| 87 | size_t len, size_t *retlen, u_char *buf) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 88 | { |
| 89 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 90 | return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 91 | } |
| 92 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 93 | static int part_get_user_prot_info(struct mtd_info *mtd, |
| 94 | struct otp_info *buf, size_t len) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 95 | { |
| 96 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 97 | return mtd_get_user_prot_info(part->master, buf, len); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 98 | } |
| 99 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 100 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
| 101 | size_t len, size_t *retlen, u_char *buf) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 102 | { |
| 103 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 104 | return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 105 | } |
| 106 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 107 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
| 108 | size_t len) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 109 | { |
| 110 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 111 | return mtd_get_fact_prot_info(part->master, buf, len); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 112 | } |
| 113 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 114 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 115 | size_t *retlen, const u_char *buf) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 116 | { |
| 117 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 118 | return mtd_write(part->master, to + part->offset, len, retlen, buf); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 119 | } |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 120 | |
| 121 | static int part_write_oob(struct mtd_info *mtd, loff_t to, |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 122 | struct mtd_oob_ops *ops) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 123 | { |
| 124 | struct mtd_part *part = PART(mtd); |
| 125 | |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 126 | if (to >= mtd->size) |
| 127 | return -EINVAL; |
| 128 | if (ops->datbuf && to + ops->len > mtd->size) |
| 129 | return -EINVAL; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 130 | return mtd_write_oob(part->master, to + part->offset, ops); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 131 | } |
| 132 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 133 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
| 134 | size_t len, size_t *retlen, u_char *buf) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 135 | { |
| 136 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 137 | return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 138 | } |
| 139 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 140 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
| 141 | size_t len) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 142 | { |
| 143 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 144 | return mtd_lock_user_prot_reg(part->master, from, len); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 145 | } |
| 146 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 147 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 148 | { |
| 149 | struct mtd_part *part = PART(mtd); |
| 150 | int ret; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 151 | |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 152 | instr->addr += part->offset; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 153 | ret = mtd_erase(part->master, instr); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 154 | if (ret) { |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 155 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 156 | instr->fail_addr -= part->offset; |
| 157 | instr->addr -= part->offset; |
| 158 | } |
| 159 | return ret; |
| 160 | } |
| 161 | |
| 162 | void mtd_erase_callback(struct erase_info *instr) |
| 163 | { |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 164 | if (instr->mtd->_erase == part_erase) { |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 165 | struct mtd_part *part = PART(instr->mtd); |
| 166 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 167 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 168 | instr->fail_addr -= part->offset; |
| 169 | instr->addr -= part->offset; |
| 170 | } |
| 171 | if (instr->callback) |
| 172 | instr->callback(instr); |
| 173 | } |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 174 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 175 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 176 | { |
| 177 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 178 | return mtd_lock(part->master, ofs + part->offset, len); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 181 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 182 | { |
| 183 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 184 | return mtd_unlock(part->master, ofs + part->offset, len); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 185 | } |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 186 | |
| 187 | static void part_sync(struct mtd_info *mtd) |
| 188 | { |
| 189 | struct mtd_part *part = PART(mtd); |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 190 | mtd_sync(part->master); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 191 | } |
| 192 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 193 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 194 | { |
| 195 | struct mtd_part *part = PART(mtd); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 196 | ofs += part->offset; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 197 | return mtd_block_isbad(part->master, ofs); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 198 | } |
| 199 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 200 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 201 | { |
| 202 | struct mtd_part *part = PART(mtd); |
| 203 | int res; |
| 204 | |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 205 | ofs += part->offset; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 206 | res = mtd_block_markbad(part->master, ofs); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 207 | if (!res) |
| 208 | mtd->ecc_stats.badblocks++; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 209 | return res; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * This function unregisters and destroy all slave MTD objects which are |
| 214 | * attached to the given master MTD object. |
| 215 | */ |
| 216 | |
| 217 | int del_mtd_partitions(struct mtd_info *master) |
| 218 | { |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 219 | struct mtd_part *slave, *next; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 220 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 221 | list_for_each_entry_safe(slave, next, &mtd_partitions, list) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 222 | if (slave->master == master) { |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 223 | list_del(&slave->list); |
| 224 | if (slave->registered) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 225 | del_mtd_device(&slave->mtd); |
| 226 | kfree(slave); |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static struct mtd_part *add_one_partition(struct mtd_info *master, |
| 233 | const struct mtd_partition *part, int partno, |
| 234 | uint64_t cur_offset) |
| 235 | { |
| 236 | struct mtd_part *slave; |
| 237 | |
| 238 | /* allocate the partition structure */ |
| 239 | slave = kzalloc(sizeof(*slave), GFP_KERNEL); |
| 240 | if (!slave) { |
| 241 | printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", |
| 242 | master->name); |
| 243 | del_mtd_partitions(master); |
| 244 | return NULL; |
| 245 | } |
| 246 | list_add(&slave->list, &mtd_partitions); |
| 247 | |
| 248 | /* set up the MTD object for this partition */ |
| 249 | slave->mtd.type = master->type; |
| 250 | slave->mtd.flags = master->flags & ~part->mask_flags; |
| 251 | slave->mtd.size = part->size; |
| 252 | slave->mtd.writesize = master->writesize; |
| 253 | slave->mtd.oobsize = master->oobsize; |
| 254 | slave->mtd.oobavail = master->oobavail; |
| 255 | slave->mtd.subpage_sft = master->subpage_sft; |
| 256 | |
| 257 | slave->mtd.name = part->name; |
| 258 | slave->mtd.owner = master->owner; |
| 259 | |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 260 | slave->mtd._read = part_read; |
| 261 | slave->mtd._write = part_write; |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 262 | |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 263 | if (master->_read_oob) |
| 264 | slave->mtd._read_oob = part_read_oob; |
| 265 | if (master->_write_oob) |
| 266 | slave->mtd._write_oob = part_write_oob; |
| 267 | if (master->_read_user_prot_reg) |
| 268 | slave->mtd._read_user_prot_reg = part_read_user_prot_reg; |
| 269 | if (master->_read_fact_prot_reg) |
| 270 | slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; |
| 271 | if (master->_write_user_prot_reg) |
| 272 | slave->mtd._write_user_prot_reg = part_write_user_prot_reg; |
| 273 | if (master->_lock_user_prot_reg) |
| 274 | slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; |
| 275 | if (master->_get_user_prot_info) |
| 276 | slave->mtd._get_user_prot_info = part_get_user_prot_info; |
| 277 | if (master->_get_fact_prot_info) |
| 278 | slave->mtd._get_fact_prot_info = part_get_fact_prot_info; |
| 279 | if (master->_sync) |
| 280 | slave->mtd._sync = part_sync; |
| 281 | if (master->_lock) |
| 282 | slave->mtd._lock = part_lock; |
| 283 | if (master->_unlock) |
| 284 | slave->mtd._unlock = part_unlock; |
| 285 | if (master->_block_isbad) |
| 286 | slave->mtd._block_isbad = part_block_isbad; |
| 287 | if (master->_block_markbad) |
| 288 | slave->mtd._block_markbad = part_block_markbad; |
| 289 | slave->mtd._erase = part_erase; |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 290 | slave->master = master; |
| 291 | slave->offset = part->offset; |
| 292 | slave->index = partno; |
| 293 | |
| 294 | if (slave->offset == MTDPART_OFS_APPEND) |
| 295 | slave->offset = cur_offset; |
| 296 | if (slave->offset == MTDPART_OFS_NXTBLK) { |
| 297 | slave->offset = cur_offset; |
| 298 | if (mtd_mod_by_eb(cur_offset, master) != 0) { |
| 299 | /* Round up to next erasesize */ |
| 300 | slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; |
Joe Hershberger | 147162d | 2013-04-08 10:32:49 +0000 | [diff] [blame] | 301 | debug("Moving partition %d: 0x%012llx -> 0x%012llx\n", |
| 302 | partno, (unsigned long long)cur_offset, |
| 303 | (unsigned long long)slave->offset); |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 304 | } |
| 305 | } |
| 306 | if (slave->mtd.size == MTDPART_SIZ_FULL) |
| 307 | slave->mtd.size = master->size - slave->offset; |
| 308 | |
Joe Hershberger | 147162d | 2013-04-08 10:32:49 +0000 | [diff] [blame] | 309 | debug("0x%012llx-0x%012llx : \"%s\"\n", |
| 310 | (unsigned long long)slave->offset, |
| 311 | (unsigned long long)(slave->offset + slave->mtd.size), |
| 312 | slave->mtd.name); |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 313 | |
| 314 | /* let's do some sanity checks */ |
| 315 | if (slave->offset >= master->size) { |
| 316 | /* let's register it anyway to preserve ordering */ |
| 317 | slave->offset = 0; |
| 318 | slave->mtd.size = 0; |
| 319 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", |
| 320 | part->name); |
| 321 | goto out_register; |
| 322 | } |
| 323 | if (slave->offset + slave->mtd.size > master->size) { |
| 324 | slave->mtd.size = master->size - slave->offset; |
| 325 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", |
| 326 | part->name, master->name, (unsigned long long)slave->mtd.size); |
| 327 | } |
| 328 | if (master->numeraseregions > 1) { |
| 329 | /* Deal with variable erase size stuff */ |
| 330 | int i, max = master->numeraseregions; |
| 331 | u64 end = slave->offset + slave->mtd.size; |
| 332 | struct mtd_erase_region_info *regions = master->eraseregions; |
| 333 | |
| 334 | /* Find the first erase regions which is part of this |
| 335 | * partition. */ |
| 336 | for (i = 0; i < max && regions[i].offset <= slave->offset; i++) |
| 337 | ; |
| 338 | /* The loop searched for the region _behind_ the first one */ |
| 339 | i--; |
| 340 | |
| 341 | /* Pick biggest erasesize */ |
| 342 | for (; i < max && regions[i].offset < end; i++) { |
| 343 | if (slave->mtd.erasesize < regions[i].erasesize) { |
| 344 | slave->mtd.erasesize = regions[i].erasesize; |
| 345 | } |
| 346 | } |
| 347 | BUG_ON(slave->mtd.erasesize == 0); |
| 348 | } else { |
| 349 | /* Single erase size */ |
| 350 | slave->mtd.erasesize = master->erasesize; |
| 351 | } |
| 352 | |
| 353 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
| 354 | mtd_mod_by_eb(slave->offset, &slave->mtd)) { |
| 355 | /* Doesn't start on a boundary of major erase size */ |
| 356 | /* FIXME: Let it be writable if it is on a boundary of |
| 357 | * _minor_ erase size though */ |
| 358 | slave->mtd.flags &= ~MTD_WRITEABLE; |
| 359 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", |
| 360 | part->name); |
| 361 | } |
| 362 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
| 363 | mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { |
| 364 | slave->mtd.flags &= ~MTD_WRITEABLE; |
| 365 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", |
| 366 | part->name); |
| 367 | } |
| 368 | |
| 369 | slave->mtd.ecclayout = master->ecclayout; |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 370 | if (master->_block_isbad) { |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 371 | uint64_t offs = 0; |
| 372 | |
| 373 | while (offs < slave->mtd.size) { |
Sergey Lapin | dfe64e2 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 374 | if (mtd_block_isbad(master, offs + slave->offset)) |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 375 | slave->mtd.ecc_stats.badblocks++; |
| 376 | offs += slave->mtd.erasesize; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 377 | } |
| 378 | } |
| 379 | |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 380 | out_register: |
| 381 | if (part->mtdp) { |
| 382 | /* store the object pointer (caller may or may not register it*/ |
| 383 | *part->mtdp = &slave->mtd; |
| 384 | slave->registered = 0; |
| 385 | } else { |
| 386 | /* register our partition */ |
| 387 | add_mtd_device(&slave->mtd); |
| 388 | slave->registered = 1; |
| 389 | } |
| 390 | return slave; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | /* |
| 394 | * This function, given a master MTD object and a partition table, creates |
| 395 | * and registers slave MTD objects which are bound to the master according to |
| 396 | * the partition definitions. |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 397 | * |
| 398 | * We don't register the master, or expect the caller to have done so, |
| 399 | * for reasons of data integrity. |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 400 | */ |
| 401 | |
| 402 | int add_mtd_partitions(struct mtd_info *master, |
| 403 | const struct mtd_partition *parts, |
| 404 | int nbparts) |
| 405 | { |
| 406 | struct mtd_part *slave; |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 407 | uint64_t cur_offset = 0; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 408 | int i; |
| 409 | |
Stefan Roese | 9def12c | 2008-11-27 14:05:15 +0100 | [diff] [blame] | 410 | /* |
| 411 | * Need to init the list here, since LIST_INIT() does not |
| 412 | * work on platforms where relocation has problems (like MIPS |
| 413 | * & PPC). |
| 414 | */ |
| 415 | if (mtd_partitions.next == NULL) |
| 416 | INIT_LIST_HEAD(&mtd_partitions); |
| 417 | |
Joe Hershberger | 147162d | 2013-04-08 10:32:49 +0000 | [diff] [blame] | 418 | debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 419 | |
| 420 | for (i = 0; i < nbparts; i++) { |
Stefan Roese | 8d2effe | 2009-05-11 16:03:55 +0200 | [diff] [blame] | 421 | slave = add_one_partition(master, parts + i, i, cur_offset); |
| 422 | if (!slave) |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 423 | return -ENOMEM; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 424 | cur_offset = slave->offset + slave->mtd.size; |
Kyungmin Park | e29c22f | 2008-11-19 16:20:36 +0100 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | return 0; |
| 428 | } |