1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/kmod.h> 30 #include <linux/mtd/mtd.h> 31 #include <linux/mtd/partitions.h> 32 33 /* Our partition linked list */ 34 static LIST_HEAD(mtd_partitions); 35 36 /* Our partition node structure */ 37 struct mtd_part { 38 struct mtd_info mtd; 39 struct mtd_info *master; 40 uint64_t offset; 41 struct list_head list; 42 }; 43 44 /* 45 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 46 * the pointer to that structure with this macro. 47 */ 48 #define PART(x) ((struct mtd_part *)(x)) 49 50 51 /* 52 * MTD methods which simply translate the effective address and pass through 53 * to the _real_ device. 54 */ 55 56 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 57 size_t *retlen, u_char *buf) 58 { 59 struct mtd_part *part = PART(mtd); 60 struct mtd_ecc_stats stats; 61 int res; 62 63 stats = part->master->ecc_stats; 64 65 if (from >= mtd->size) 66 len = 0; 67 else if (from + len > mtd->size) 68 len = mtd->size - from; 69 res = part->master->read(part->master, from + part->offset, 70 len, retlen, buf); 71 if (unlikely(res)) { 72 if (res == -EUCLEAN) 73 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 74 if (res == -EBADMSG) 75 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 76 } 77 return res; 78 } 79 80 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 81 size_t *retlen, void **virt, resource_size_t *phys) 82 { 83 struct mtd_part *part = PART(mtd); 84 if (from >= mtd->size) 85 len = 0; 86 else if (from + len > mtd->size) 87 len = mtd->size - from; 88 return part->master->point (part->master, from + part->offset, 89 len, retlen, virt, phys); 90 } 91 92 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 93 { 94 struct mtd_part *part = PART(mtd); 95 96 part->master->unpoint(part->master, from + part->offset, len); 97 } 98 99 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 100 unsigned long len, 101 unsigned long offset, 102 unsigned long flags) 103 { 104 struct mtd_part *part = PART(mtd); 105 106 offset += part->offset; 107 return part->master->get_unmapped_area(part->master, len, offset, 108 flags); 109 } 110 111 static int part_read_oob(struct mtd_info *mtd, loff_t from, 112 struct mtd_oob_ops *ops) 113 { 114 struct mtd_part *part = PART(mtd); 115 int res; 116 117 if (from >= mtd->size) 118 return -EINVAL; 119 if (ops->datbuf && from + ops->len > mtd->size) 120 return -EINVAL; 121 res = part->master->read_oob(part->master, from + part->offset, ops); 122 123 if (unlikely(res)) { 124 if (res == -EUCLEAN) 125 mtd->ecc_stats.corrected++; 126 if (res == -EBADMSG) 127 mtd->ecc_stats.failed++; 128 } 129 return res; 130 } 131 132 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 133 size_t len, size_t *retlen, u_char *buf) 134 { 135 struct mtd_part *part = PART(mtd); 136 return part->master->read_user_prot_reg(part->master, from, 137 len, retlen, buf); 138 } 139 140 static int part_get_user_prot_info(struct mtd_info *mtd, 141 struct otp_info *buf, size_t len) 142 { 143 struct mtd_part *part = PART(mtd); 144 return part->master->get_user_prot_info(part->master, buf, len); 145 } 146 147 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 148 size_t len, size_t *retlen, u_char *buf) 149 { 150 struct mtd_part *part = PART(mtd); 151 return part->master->read_fact_prot_reg(part->master, from, 152 len, retlen, buf); 153 } 154 155 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 156 size_t len) 157 { 158 struct mtd_part *part = PART(mtd); 159 return part->master->get_fact_prot_info(part->master, buf, len); 160 } 161 162 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 163 size_t *retlen, const u_char *buf) 164 { 165 struct mtd_part *part = PART(mtd); 166 if (!(mtd->flags & MTD_WRITEABLE)) 167 return -EROFS; 168 if (to >= mtd->size) 169 len = 0; 170 else if (to + len > mtd->size) 171 len = mtd->size - to; 172 return part->master->write(part->master, to + part->offset, 173 len, retlen, buf); 174 } 175 176 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 177 size_t *retlen, const u_char *buf) 178 { 179 struct mtd_part *part = PART(mtd); 180 if (!(mtd->flags & MTD_WRITEABLE)) 181 return -EROFS; 182 if (to >= mtd->size) 183 len = 0; 184 else if (to + len > mtd->size) 185 len = mtd->size - to; 186 return part->master->panic_write(part->master, to + part->offset, 187 len, retlen, buf); 188 } 189 190 static int part_write_oob(struct mtd_info *mtd, loff_t to, 191 struct mtd_oob_ops *ops) 192 { 193 struct mtd_part *part = PART(mtd); 194 195 if (!(mtd->flags & MTD_WRITEABLE)) 196 return -EROFS; 197 198 if (to >= mtd->size) 199 return -EINVAL; 200 if (ops->datbuf && to + ops->len > mtd->size) 201 return -EINVAL; 202 return part->master->write_oob(part->master, to + part->offset, ops); 203 } 204 205 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 206 size_t len, size_t *retlen, u_char *buf) 207 { 208 struct mtd_part *part = PART(mtd); 209 return part->master->write_user_prot_reg(part->master, from, 210 len, retlen, buf); 211 } 212 213 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 214 size_t len) 215 { 216 struct mtd_part *part = PART(mtd); 217 return part->master->lock_user_prot_reg(part->master, from, len); 218 } 219 220 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 221 unsigned long count, loff_t to, size_t *retlen) 222 { 223 struct mtd_part *part = PART(mtd); 224 if (!(mtd->flags & MTD_WRITEABLE)) 225 return -EROFS; 226 return part->master->writev(part->master, vecs, count, 227 to + part->offset, retlen); 228 } 229 230 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 231 { 232 struct mtd_part *part = PART(mtd); 233 int ret; 234 if (!(mtd->flags & MTD_WRITEABLE)) 235 return -EROFS; 236 if (instr->addr >= mtd->size) 237 return -EINVAL; 238 instr->addr += part->offset; 239 ret = part->master->erase(part->master, instr); 240 if (ret) { 241 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 242 instr->fail_addr -= part->offset; 243 instr->addr -= part->offset; 244 } 245 return ret; 246 } 247 248 void mtd_erase_callback(struct erase_info *instr) 249 { 250 if (instr->mtd->erase == part_erase) { 251 struct mtd_part *part = PART(instr->mtd); 252 253 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 254 instr->fail_addr -= part->offset; 255 instr->addr -= part->offset; 256 } 257 if (instr->callback) 258 instr->callback(instr); 259 } 260 EXPORT_SYMBOL_GPL(mtd_erase_callback); 261 262 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 263 { 264 struct mtd_part *part = PART(mtd); 265 if ((len + ofs) > mtd->size) 266 return -EINVAL; 267 return part->master->lock(part->master, ofs + part->offset, len); 268 } 269 270 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 271 { 272 struct mtd_part *part = PART(mtd); 273 if ((len + ofs) > mtd->size) 274 return -EINVAL; 275 return part->master->unlock(part->master, ofs + part->offset, len); 276 } 277 278 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 279 { 280 struct mtd_part *part = PART(mtd); 281 if ((len + ofs) > mtd->size) 282 return -EINVAL; 283 return part->master->is_locked(part->master, ofs + part->offset, len); 284 } 285 286 static void part_sync(struct mtd_info *mtd) 287 { 288 struct mtd_part *part = PART(mtd); 289 part->master->sync(part->master); 290 } 291 292 static int part_suspend(struct mtd_info *mtd) 293 { 294 struct mtd_part *part = PART(mtd); 295 return part->master->suspend(part->master); 296 } 297 298 static void part_resume(struct mtd_info *mtd) 299 { 300 struct mtd_part *part = PART(mtd); 301 part->master->resume(part->master); 302 } 303 304 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 305 { 306 struct mtd_part *part = PART(mtd); 307 if (ofs >= mtd->size) 308 return -EINVAL; 309 ofs += part->offset; 310 return part->master->block_isbad(part->master, ofs); 311 } 312 313 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 314 { 315 struct mtd_part *part = PART(mtd); 316 int res; 317 318 if (!(mtd->flags & MTD_WRITEABLE)) 319 return -EROFS; 320 if (ofs >= mtd->size) 321 return -EINVAL; 322 ofs += part->offset; 323 res = part->master->block_markbad(part->master, ofs); 324 if (!res) 325 mtd->ecc_stats.badblocks++; 326 return res; 327 } 328 329 /* 330 * This function unregisters and destroy all slave MTD objects which are 331 * attached to the given master MTD object. 332 */ 333 334 int del_mtd_partitions(struct mtd_info *master) 335 { 336 struct mtd_part *slave, *next; 337 338 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 339 if (slave->master == master) { 340 list_del(&slave->list); 341 del_mtd_device(&slave->mtd); 342 kfree(slave); 343 } 344 345 return 0; 346 } 347 EXPORT_SYMBOL(del_mtd_partitions); 348 349 static struct mtd_part *add_one_partition(struct mtd_info *master, 350 const struct mtd_partition *part, int partno, 351 uint64_t cur_offset) 352 { 353 struct mtd_part *slave; 354 355 /* allocate the partition structure */ 356 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 357 if (!slave) { 358 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 359 master->name); 360 del_mtd_partitions(master); 361 return NULL; 362 } 363 list_add(&slave->list, &mtd_partitions); 364 365 /* set up the MTD object for this partition */ 366 slave->mtd.type = master->type; 367 slave->mtd.flags = master->flags & ~part->mask_flags; 368 slave->mtd.size = part->size; 369 slave->mtd.writesize = master->writesize; 370 slave->mtd.oobsize = master->oobsize; 371 slave->mtd.oobavail = master->oobavail; 372 slave->mtd.subpage_sft = master->subpage_sft; 373 374 slave->mtd.name = part->name; 375 slave->mtd.owner = master->owner; 376 slave->mtd.backing_dev_info = master->backing_dev_info; 377 378 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 379 * to have the same data be in two different partitions. 380 */ 381 slave->mtd.dev.parent = master->dev.parent; 382 383 slave->mtd.read = part_read; 384 slave->mtd.write = part_write; 385 386 if (master->panic_write) 387 slave->mtd.panic_write = part_panic_write; 388 389 if (master->point && master->unpoint) { 390 slave->mtd.point = part_point; 391 slave->mtd.unpoint = part_unpoint; 392 } 393 394 if (master->get_unmapped_area) 395 slave->mtd.get_unmapped_area = part_get_unmapped_area; 396 if (master->read_oob) 397 slave->mtd.read_oob = part_read_oob; 398 if (master->write_oob) 399 slave->mtd.write_oob = part_write_oob; 400 if (master->read_user_prot_reg) 401 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 402 if (master->read_fact_prot_reg) 403 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 404 if (master->write_user_prot_reg) 405 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 406 if (master->lock_user_prot_reg) 407 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 408 if (master->get_user_prot_info) 409 slave->mtd.get_user_prot_info = part_get_user_prot_info; 410 if (master->get_fact_prot_info) 411 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 412 if (master->sync) 413 slave->mtd.sync = part_sync; 414 if (!partno && !master->dev.class && master->suspend && master->resume) { 415 slave->mtd.suspend = part_suspend; 416 slave->mtd.resume = part_resume; 417 } 418 if (master->writev) 419 slave->mtd.writev = part_writev; 420 if (master->lock) 421 slave->mtd.lock = part_lock; 422 if (master->unlock) 423 slave->mtd.unlock = part_unlock; 424 if (master->is_locked) 425 slave->mtd.is_locked = part_is_locked; 426 if (master->block_isbad) 427 slave->mtd.block_isbad = part_block_isbad; 428 if (master->block_markbad) 429 slave->mtd.block_markbad = part_block_markbad; 430 slave->mtd.erase = part_erase; 431 slave->master = master; 432 slave->offset = part->offset; 433 434 if (slave->offset == MTDPART_OFS_APPEND) 435 slave->offset = cur_offset; 436 if (slave->offset == MTDPART_OFS_NXTBLK) { 437 slave->offset = cur_offset; 438 if (mtd_mod_by_eb(cur_offset, master) != 0) { 439 /* Round up to next erasesize */ 440 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 441 printk(KERN_NOTICE "Moving partition %d: " 442 "0x%012llx -> 0x%012llx\n", partno, 443 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 444 } 445 } 446 if (slave->mtd.size == MTDPART_SIZ_FULL) 447 slave->mtd.size = master->size - slave->offset; 448 449 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 450 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 451 452 /* let's do some sanity checks */ 453 if (slave->offset >= master->size) { 454 /* let's register it anyway to preserve ordering */ 455 slave->offset = 0; 456 slave->mtd.size = 0; 457 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 458 part->name); 459 goto out_register; 460 } 461 if (slave->offset + slave->mtd.size > master->size) { 462 slave->mtd.size = master->size - slave->offset; 463 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 464 part->name, master->name, (unsigned long long)slave->mtd.size); 465 } 466 if (master->numeraseregions > 1) { 467 /* Deal with variable erase size stuff */ 468 int i, max = master->numeraseregions; 469 u64 end = slave->offset + slave->mtd.size; 470 struct mtd_erase_region_info *regions = master->eraseregions; 471 472 /* Find the first erase regions which is part of this 473 * partition. */ 474 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 475 ; 476 /* The loop searched for the region _behind_ the first one */ 477 if (i > 0) 478 i--; 479 480 /* Pick biggest erasesize */ 481 for (; i < max && regions[i].offset < end; i++) { 482 if (slave->mtd.erasesize < regions[i].erasesize) { 483 slave->mtd.erasesize = regions[i].erasesize; 484 } 485 } 486 BUG_ON(slave->mtd.erasesize == 0); 487 } else { 488 /* Single erase size */ 489 slave->mtd.erasesize = master->erasesize; 490 } 491 492 if ((slave->mtd.flags & MTD_WRITEABLE) && 493 mtd_mod_by_eb(slave->offset, &slave->mtd)) { 494 /* Doesn't start on a boundary of major erase size */ 495 /* FIXME: Let it be writable if it is on a boundary of 496 * _minor_ erase size though */ 497 slave->mtd.flags &= ~MTD_WRITEABLE; 498 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 499 part->name); 500 } 501 if ((slave->mtd.flags & MTD_WRITEABLE) && 502 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { 503 slave->mtd.flags &= ~MTD_WRITEABLE; 504 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 505 part->name); 506 } 507 508 slave->mtd.ecclayout = master->ecclayout; 509 if (master->block_isbad) { 510 uint64_t offs = 0; 511 512 while (offs < slave->mtd.size) { 513 if (master->block_isbad(master, 514 offs + slave->offset)) 515 slave->mtd.ecc_stats.badblocks++; 516 offs += slave->mtd.erasesize; 517 } 518 } 519 520 out_register: 521 /* register our partition */ 522 add_mtd_device(&slave->mtd); 523 524 return slave; 525 } 526 527 /* 528 * This function, given a master MTD object and a partition table, creates 529 * and registers slave MTD objects which are bound to the master according to 530 * the partition definitions. 531 * 532 * We don't register the master, or expect the caller to have done so, 533 * for reasons of data integrity. 534 */ 535 536 int add_mtd_partitions(struct mtd_info *master, 537 const struct mtd_partition *parts, 538 int nbparts) 539 { 540 struct mtd_part *slave; 541 uint64_t cur_offset = 0; 542 int i; 543 544 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 545 546 for (i = 0; i < nbparts; i++) { 547 slave = add_one_partition(master, parts + i, i, cur_offset); 548 if (!slave) 549 return -ENOMEM; 550 cur_offset = slave->offset + slave->mtd.size; 551 } 552 553 return 0; 554 } 555 EXPORT_SYMBOL(add_mtd_partitions); 556 557 static DEFINE_SPINLOCK(part_parser_lock); 558 static LIST_HEAD(part_parsers); 559 560 static struct mtd_part_parser *get_partition_parser(const char *name) 561 { 562 struct mtd_part_parser *p, *ret = NULL; 563 564 spin_lock(&part_parser_lock); 565 566 list_for_each_entry(p, &part_parsers, list) 567 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 568 ret = p; 569 break; 570 } 571 572 spin_unlock(&part_parser_lock); 573 574 return ret; 575 } 576 577 int register_mtd_parser(struct mtd_part_parser *p) 578 { 579 spin_lock(&part_parser_lock); 580 list_add(&p->list, &part_parsers); 581 spin_unlock(&part_parser_lock); 582 583 return 0; 584 } 585 EXPORT_SYMBOL_GPL(register_mtd_parser); 586 587 int deregister_mtd_parser(struct mtd_part_parser *p) 588 { 589 spin_lock(&part_parser_lock); 590 list_del(&p->list); 591 spin_unlock(&part_parser_lock); 592 return 0; 593 } 594 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 595 596 int parse_mtd_partitions(struct mtd_info *master, const char **types, 597 struct mtd_partition **pparts, unsigned long origin) 598 { 599 struct mtd_part_parser *parser; 600 int ret = 0; 601 602 for ( ; ret <= 0 && *types; types++) { 603 parser = get_partition_parser(*types); 604 if (!parser && !request_module("%s", *types)) 605 parser = get_partition_parser(*types); 606 if (!parser) { 607 printk(KERN_NOTICE "%s partition parsing not available\n", 608 *types); 609 continue; 610 } 611 ret = (*parser->parse_fn)(master, pparts, origin); 612 if (ret > 0) { 613 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 614 ret, parser->name, master->name); 615 } 616 put_partition_parser(parser); 617 } 618 return ret; 619 } 620 EXPORT_SYMBOL_GPL(parse_mtd_partitions); 621