1 /* 2 * Simple MTD partitioning layer 3 * 4 * (C) 2000 Nicolas Pitre <nico@cam.org> 5 * 6 * This code is GPL 7 * 8 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> 9 * added support for read_oob, write_oob 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/list.h> 17 #include <linux/kmod.h> 18 #include <linux/mtd/mtd.h> 19 #include <linux/mtd/partitions.h> 20 #include <linux/mtd/compatmac.h> 21 22 /* Our partition linked list */ 23 static LIST_HEAD(mtd_partitions); 24 25 /* Our partition node structure */ 26 struct mtd_part { 27 struct mtd_info mtd; 28 struct mtd_info *master; 29 u_int32_t offset; 30 int index; 31 struct list_head list; 32 int registered; 33 }; 34 35 /* 36 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 37 * the pointer to that structure with this macro. 38 */ 39 #define PART(x) ((struct mtd_part *)(x)) 40 41 42 /* 43 * MTD methods which simply translate the effective address and pass through 44 * to the _real_ device. 45 */ 46 47 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 48 size_t *retlen, u_char *buf) 49 { 50 struct mtd_part *part = PART(mtd); 51 int res; 52 53 if (from >= mtd->size) 54 len = 0; 55 else if (from + len > mtd->size) 56 len = mtd->size - from; 57 res = part->master->read(part->master, from + part->offset, 58 len, retlen, buf); 59 if (unlikely(res)) { 60 if (res == -EUCLEAN) 61 mtd->ecc_stats.corrected++; 62 if (res == -EBADMSG) 63 mtd->ecc_stats.failed++; 64 } 65 return res; 66 } 67 68 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 69 size_t *retlen, void **virt, resource_size_t *phys) 70 { 71 struct mtd_part *part = PART(mtd); 72 if (from >= mtd->size) 73 len = 0; 74 else if (from + len > mtd->size) 75 len = mtd->size - from; 76 return part->master->point (part->master, from + part->offset, 77 len, retlen, virt, phys); 78 } 79 80 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 81 { 82 struct mtd_part *part = PART(mtd); 83 84 part->master->unpoint(part->master, from + part->offset, len); 85 } 86 87 static int part_read_oob(struct mtd_info *mtd, loff_t from, 88 struct mtd_oob_ops *ops) 89 { 90 struct mtd_part *part = PART(mtd); 91 int res; 92 93 if (from >= mtd->size) 94 return -EINVAL; 95 if (ops->datbuf && from + ops->len > mtd->size) 96 return -EINVAL; 97 res = part->master->read_oob(part->master, from + part->offset, ops); 98 99 if (unlikely(res)) { 100 if (res == -EUCLEAN) 101 mtd->ecc_stats.corrected++; 102 if (res == -EBADMSG) 103 mtd->ecc_stats.failed++; 104 } 105 return res; 106 } 107 108 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 109 size_t len, size_t *retlen, u_char *buf) 110 { 111 struct mtd_part *part = PART(mtd); 112 return part->master->read_user_prot_reg(part->master, from, 113 len, retlen, buf); 114 } 115 116 static int part_get_user_prot_info(struct mtd_info *mtd, 117 struct otp_info *buf, size_t len) 118 { 119 struct mtd_part *part = PART(mtd); 120 return part->master->get_user_prot_info(part->master, buf, len); 121 } 122 123 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 124 size_t len, size_t *retlen, u_char *buf) 125 { 126 struct mtd_part *part = PART(mtd); 127 return part->master->read_fact_prot_reg(part->master, from, 128 len, retlen, buf); 129 } 130 131 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 132 size_t len) 133 { 134 struct mtd_part *part = PART(mtd); 135 return part->master->get_fact_prot_info(part->master, buf, len); 136 } 137 138 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 139 size_t *retlen, const u_char *buf) 140 { 141 struct mtd_part *part = PART(mtd); 142 if (!(mtd->flags & MTD_WRITEABLE)) 143 return -EROFS; 144 if (to >= mtd->size) 145 len = 0; 146 else if (to + len > mtd->size) 147 len = mtd->size - to; 148 return part->master->write(part->master, to + part->offset, 149 len, retlen, buf); 150 } 151 152 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 153 size_t *retlen, const u_char *buf) 154 { 155 struct mtd_part *part = PART(mtd); 156 if (!(mtd->flags & MTD_WRITEABLE)) 157 return -EROFS; 158 if (to >= mtd->size) 159 len = 0; 160 else if (to + len > mtd->size) 161 len = mtd->size - to; 162 return part->master->panic_write(part->master, to + part->offset, 163 len, retlen, buf); 164 } 165 166 static int part_write_oob(struct mtd_info *mtd, loff_t to, 167 struct mtd_oob_ops *ops) 168 { 169 struct mtd_part *part = PART(mtd); 170 171 if (!(mtd->flags & MTD_WRITEABLE)) 172 return -EROFS; 173 174 if (to >= mtd->size) 175 return -EINVAL; 176 if (ops->datbuf && to + ops->len > mtd->size) 177 return -EINVAL; 178 return part->master->write_oob(part->master, to + part->offset, ops); 179 } 180 181 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 182 size_t len, size_t *retlen, u_char *buf) 183 { 184 struct mtd_part *part = PART(mtd); 185 return part->master->write_user_prot_reg(part->master, from, 186 len, retlen, buf); 187 } 188 189 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 190 size_t len) 191 { 192 struct mtd_part *part = PART(mtd); 193 return part->master->lock_user_prot_reg(part->master, from, len); 194 } 195 196 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 197 unsigned long count, loff_t to, size_t *retlen) 198 { 199 struct mtd_part *part = PART(mtd); 200 if (!(mtd->flags & MTD_WRITEABLE)) 201 return -EROFS; 202 return part->master->writev(part->master, vecs, count, 203 to + part->offset, retlen); 204 } 205 206 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 207 { 208 struct mtd_part *part = PART(mtd); 209 int ret; 210 if (!(mtd->flags & MTD_WRITEABLE)) 211 return -EROFS; 212 if (instr->addr >= mtd->size) 213 return -EINVAL; 214 instr->addr += part->offset; 215 ret = part->master->erase(part->master, instr); 216 if (ret) { 217 if (instr->fail_addr != 0xffffffff) 218 instr->fail_addr -= part->offset; 219 instr->addr -= part->offset; 220 } 221 return ret; 222 } 223 224 void mtd_erase_callback(struct erase_info *instr) 225 { 226 if (instr->mtd->erase == part_erase) { 227 struct mtd_part *part = PART(instr->mtd); 228 229 if (instr->fail_addr != 0xffffffff) 230 instr->fail_addr -= part->offset; 231 instr->addr -= part->offset; 232 } 233 if (instr->callback) 234 instr->callback(instr); 235 } 236 EXPORT_SYMBOL_GPL(mtd_erase_callback); 237 238 static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 239 { 240 struct mtd_part *part = PART(mtd); 241 if ((len + ofs) > mtd->size) 242 return -EINVAL; 243 return part->master->lock(part->master, ofs + part->offset, len); 244 } 245 246 static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 247 { 248 struct mtd_part *part = PART(mtd); 249 if ((len + ofs) > mtd->size) 250 return -EINVAL; 251 return part->master->unlock(part->master, ofs + part->offset, len); 252 } 253 254 static void part_sync(struct mtd_info *mtd) 255 { 256 struct mtd_part *part = PART(mtd); 257 part->master->sync(part->master); 258 } 259 260 static int part_suspend(struct mtd_info *mtd) 261 { 262 struct mtd_part *part = PART(mtd); 263 return part->master->suspend(part->master); 264 } 265 266 static void part_resume(struct mtd_info *mtd) 267 { 268 struct mtd_part *part = PART(mtd); 269 part->master->resume(part->master); 270 } 271 272 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 273 { 274 struct mtd_part *part = PART(mtd); 275 if (ofs >= mtd->size) 276 return -EINVAL; 277 ofs += part->offset; 278 return part->master->block_isbad(part->master, ofs); 279 } 280 281 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 282 { 283 struct mtd_part *part = PART(mtd); 284 int res; 285 286 if (!(mtd->flags & MTD_WRITEABLE)) 287 return -EROFS; 288 if (ofs >= mtd->size) 289 return -EINVAL; 290 ofs += part->offset; 291 res = part->master->block_markbad(part->master, ofs); 292 if (!res) 293 mtd->ecc_stats.badblocks++; 294 return res; 295 } 296 297 /* 298 * This function unregisters and destroy all slave MTD objects which are 299 * attached to the given master MTD object. 300 */ 301 302 int del_mtd_partitions(struct mtd_info *master) 303 { 304 struct mtd_part *slave, *next; 305 306 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 307 if (slave->master == master) { 308 list_del(&slave->list); 309 if (slave->registered) 310 del_mtd_device(&slave->mtd); 311 kfree(slave); 312 } 313 314 return 0; 315 } 316 EXPORT_SYMBOL(del_mtd_partitions); 317 318 static struct mtd_part *add_one_partition(struct mtd_info *master, 319 const struct mtd_partition *part, int partno, 320 u_int32_t cur_offset) 321 { 322 struct mtd_part *slave; 323 324 /* allocate the partition structure */ 325 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 326 if (!slave) { 327 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 328 master->name); 329 del_mtd_partitions(master); 330 return NULL; 331 } 332 list_add(&slave->list, &mtd_partitions); 333 334 /* set up the MTD object for this partition */ 335 slave->mtd.type = master->type; 336 slave->mtd.flags = master->flags & ~part->mask_flags; 337 slave->mtd.size = part->size; 338 slave->mtd.writesize = master->writesize; 339 slave->mtd.oobsize = master->oobsize; 340 slave->mtd.oobavail = master->oobavail; 341 slave->mtd.subpage_sft = master->subpage_sft; 342 343 slave->mtd.name = part->name; 344 slave->mtd.owner = master->owner; 345 346 slave->mtd.read = part_read; 347 slave->mtd.write = part_write; 348 349 if (master->panic_write) 350 slave->mtd.panic_write = part_panic_write; 351 352 if (master->point && master->unpoint) { 353 slave->mtd.point = part_point; 354 slave->mtd.unpoint = part_unpoint; 355 } 356 357 if (master->read_oob) 358 slave->mtd.read_oob = part_read_oob; 359 if (master->write_oob) 360 slave->mtd.write_oob = part_write_oob; 361 if (master->read_user_prot_reg) 362 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 363 if (master->read_fact_prot_reg) 364 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 365 if (master->write_user_prot_reg) 366 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 367 if (master->lock_user_prot_reg) 368 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 369 if (master->get_user_prot_info) 370 slave->mtd.get_user_prot_info = part_get_user_prot_info; 371 if (master->get_fact_prot_info) 372 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 373 if (master->sync) 374 slave->mtd.sync = part_sync; 375 if (!partno && master->suspend && master->resume) { 376 slave->mtd.suspend = part_suspend; 377 slave->mtd.resume = part_resume; 378 } 379 if (master->writev) 380 slave->mtd.writev = part_writev; 381 if (master->lock) 382 slave->mtd.lock = part_lock; 383 if (master->unlock) 384 slave->mtd.unlock = part_unlock; 385 if (master->block_isbad) 386 slave->mtd.block_isbad = part_block_isbad; 387 if (master->block_markbad) 388 slave->mtd.block_markbad = part_block_markbad; 389 slave->mtd.erase = part_erase; 390 slave->master = master; 391 slave->offset = part->offset; 392 slave->index = partno; 393 394 if (slave->offset == MTDPART_OFS_APPEND) 395 slave->offset = cur_offset; 396 if (slave->offset == MTDPART_OFS_NXTBLK) { 397 slave->offset = cur_offset; 398 if ((cur_offset % master->erasesize) != 0) { 399 /* Round up to next erasesize */ 400 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; 401 printk(KERN_NOTICE "Moving partition %d: " 402 "0x%08x -> 0x%08x\n", partno, 403 cur_offset, slave->offset); 404 } 405 } 406 if (slave->mtd.size == MTDPART_SIZ_FULL) 407 slave->mtd.size = master->size - slave->offset; 408 409 printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 410 slave->offset + slave->mtd.size, slave->mtd.name); 411 412 /* let's do some sanity checks */ 413 if (slave->offset >= master->size) { 414 /* let's register it anyway to preserve ordering */ 415 slave->offset = 0; 416 slave->mtd.size = 0; 417 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 418 part->name); 419 goto out_register; 420 } 421 if (slave->offset + slave->mtd.size > master->size) { 422 slave->mtd.size = master->size - slave->offset; 423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", 424 part->name, master->name, slave->mtd.size); 425 } 426 if (master->numeraseregions > 1) { 427 /* Deal with variable erase size stuff */ 428 int i, max = master->numeraseregions; 429 u32 end = slave->offset + slave->mtd.size; 430 struct mtd_erase_region_info *regions = master->eraseregions; 431 432 /* Find the first erase regions which is part of this 433 * partition. */ 434 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 435 ; 436 /* The loop searched for the region _behind_ the first one */ 437 i--; 438 439 /* Pick biggest erasesize */ 440 for (; i < max && regions[i].offset < end; i++) { 441 if (slave->mtd.erasesize < regions[i].erasesize) { 442 slave->mtd.erasesize = regions[i].erasesize; 443 } 444 } 445 BUG_ON(slave->mtd.erasesize == 0); 446 } else { 447 /* Single erase size */ 448 slave->mtd.erasesize = master->erasesize; 449 } 450 451 if ((slave->mtd.flags & MTD_WRITEABLE) && 452 (slave->offset % slave->mtd.erasesize)) { 453 /* Doesn't start on a boundary of major erase size */ 454 /* FIXME: Let it be writable if it is on a boundary of 455 * _minor_ erase size though */ 456 slave->mtd.flags &= ~MTD_WRITEABLE; 457 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 458 part->name); 459 } 460 if ((slave->mtd.flags & MTD_WRITEABLE) && 461 (slave->mtd.size % slave->mtd.erasesize)) { 462 slave->mtd.flags &= ~MTD_WRITEABLE; 463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 464 part->name); 465 } 466 467 slave->mtd.ecclayout = master->ecclayout; 468 if (master->block_isbad) { 469 uint32_t offs = 0; 470 471 while (offs < slave->mtd.size) { 472 if (master->block_isbad(master, 473 offs + slave->offset)) 474 slave->mtd.ecc_stats.badblocks++; 475 offs += slave->mtd.erasesize; 476 } 477 } 478 479 out_register: 480 if (part->mtdp) { 481 /* store the object pointer (caller may or may not register it*/ 482 *part->mtdp = &slave->mtd; 483 slave->registered = 0; 484 } else { 485 /* register our partition */ 486 add_mtd_device(&slave->mtd); 487 slave->registered = 1; 488 } 489 return slave; 490 } 491 492 /* 493 * This function, given a master MTD object and a partition table, creates 494 * and registers slave MTD objects which are bound to the master according to 495 * the partition definitions. 496 * (Q: should we register the master MTD object as well?) 497 */ 498 499 int add_mtd_partitions(struct mtd_info *master, 500 const struct mtd_partition *parts, 501 int nbparts) 502 { 503 struct mtd_part *slave; 504 u_int32_t cur_offset = 0; 505 int i; 506 507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 508 509 for (i = 0; i < nbparts; i++) { 510 slave = add_one_partition(master, parts + i, i, cur_offset); 511 if (!slave) 512 return -ENOMEM; 513 cur_offset = slave->offset + slave->mtd.size; 514 } 515 516 return 0; 517 } 518 EXPORT_SYMBOL(add_mtd_partitions); 519 520 static DEFINE_SPINLOCK(part_parser_lock); 521 static LIST_HEAD(part_parsers); 522 523 static struct mtd_part_parser *get_partition_parser(const char *name) 524 { 525 struct mtd_part_parser *p, *ret = NULL; 526 527 spin_lock(&part_parser_lock); 528 529 list_for_each_entry(p, &part_parsers, list) 530 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 531 ret = p; 532 break; 533 } 534 535 spin_unlock(&part_parser_lock); 536 537 return ret; 538 } 539 540 int register_mtd_parser(struct mtd_part_parser *p) 541 { 542 spin_lock(&part_parser_lock); 543 list_add(&p->list, &part_parsers); 544 spin_unlock(&part_parser_lock); 545 546 return 0; 547 } 548 EXPORT_SYMBOL_GPL(register_mtd_parser); 549 550 int deregister_mtd_parser(struct mtd_part_parser *p) 551 { 552 spin_lock(&part_parser_lock); 553 list_del(&p->list); 554 spin_unlock(&part_parser_lock); 555 return 0; 556 } 557 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 558 559 int parse_mtd_partitions(struct mtd_info *master, const char **types, 560 struct mtd_partition **pparts, unsigned long origin) 561 { 562 struct mtd_part_parser *parser; 563 int ret = 0; 564 565 for ( ; ret <= 0 && *types; types++) { 566 parser = get_partition_parser(*types); 567 #ifdef CONFIG_KMOD 568 if (!parser && !request_module("%s", *types)) 569 parser = get_partition_parser(*types); 570 #endif 571 if (!parser) { 572 printk(KERN_NOTICE "%s partition parsing not available\n", 573 *types); 574 continue; 575 } 576 ret = (*parser->parse_fn)(master, pparts, origin); 577 if (ret > 0) { 578 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 579 ret, parser->name, master->name); 580 } 581 put_partition_parser(parser); 582 } 583 return ret; 584 } 585 EXPORT_SYMBOL_GPL(parse_mtd_partitions); 586