1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * SPDX-License-Identifier: GPL-2.0+ 9 * 10 */ 11 12 #define __UBOOT__ 13 #ifndef __UBOOT__ 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/list.h> 19 #include <linux/kmod.h> 20 #endif 21 22 #include <common.h> 23 #include <malloc.h> 24 #include <asm/errno.h> 25 #include <linux/compat.h> 26 #include <ubi_uboot.h> 27 28 #include <linux/mtd/mtd.h> 29 #include <linux/mtd/partitions.h> 30 #include <linux/err.h> 31 32 #include "mtdcore.h" 33 34 /* Our partition linked list */ 35 static LIST_HEAD(mtd_partitions); 36 #ifndef __UBOOT__ 37 static DEFINE_MUTEX(mtd_partitions_mutex); 38 #else 39 DEFINE_MUTEX(mtd_partitions_mutex); 40 #endif 41 42 /* Our partition node structure */ 43 struct mtd_part { 44 struct mtd_info mtd; 45 struct mtd_info *master; 46 uint64_t offset; 47 struct list_head list; 48 }; 49 50 /* 51 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 52 * the pointer to that structure with this macro. 53 */ 54 #define PART(x) ((struct mtd_part *)(x)) 55 56 57 #ifdef __UBOOT__ 58 /* from mm/util.c */ 59 60 /** 61 * kstrdup - allocate space for and copy an existing string 62 * @s: the string to duplicate 63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 64 */ 65 char *kstrdup(const char *s, gfp_t gfp) 66 { 67 size_t len; 68 char *buf; 69 70 if (!s) 71 return NULL; 72 73 len = strlen(s) + 1; 74 buf = kmalloc(len, gfp); 75 if (buf) 76 memcpy(buf, s, len); 77 return buf; 78 } 79 #endif 80 81 /* 82 * MTD methods which simply translate the effective address and pass through 83 * to the _real_ device. 84 */ 85 86 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 87 size_t *retlen, u_char *buf) 88 { 89 struct mtd_part *part = PART(mtd); 90 struct mtd_ecc_stats stats; 91 int res; 92 93 stats = part->master->ecc_stats; 94 res = part->master->_read(part->master, from + part->offset, len, 95 retlen, buf); 96 if (unlikely(mtd_is_eccerr(res))) 97 mtd->ecc_stats.failed += 98 part->master->ecc_stats.failed - stats.failed; 99 else 100 mtd->ecc_stats.corrected += 101 part->master->ecc_stats.corrected - stats.corrected; 102 return res; 103 } 104 105 #ifndef __UBOOT__ 106 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 107 size_t *retlen, void **virt, resource_size_t *phys) 108 { 109 struct mtd_part *part = PART(mtd); 110 111 return part->master->_point(part->master, from + part->offset, len, 112 retlen, virt, phys); 113 } 114 115 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 116 { 117 struct mtd_part *part = PART(mtd); 118 119 return part->master->_unpoint(part->master, from + part->offset, len); 120 } 121 #endif 122 123 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 124 unsigned long len, 125 unsigned long offset, 126 unsigned long flags) 127 { 128 struct mtd_part *part = PART(mtd); 129 130 offset += part->offset; 131 return part->master->_get_unmapped_area(part->master, len, offset, 132 flags); 133 } 134 135 static int part_read_oob(struct mtd_info *mtd, loff_t from, 136 struct mtd_oob_ops *ops) 137 { 138 struct mtd_part *part = PART(mtd); 139 int res; 140 141 if (from >= mtd->size) 142 return -EINVAL; 143 if (ops->datbuf && from + ops->len > mtd->size) 144 return -EINVAL; 145 146 /* 147 * If OOB is also requested, make sure that we do not read past the end 148 * of this partition. 149 */ 150 if (ops->oobbuf) { 151 size_t len, pages; 152 153 if (ops->mode == MTD_OPS_AUTO_OOB) 154 len = mtd->oobavail; 155 else 156 len = mtd->oobsize; 157 pages = mtd_div_by_ws(mtd->size, mtd); 158 pages -= mtd_div_by_ws(from, mtd); 159 if (ops->ooboffs + ops->ooblen > pages * len) 160 return -EINVAL; 161 } 162 163 res = part->master->_read_oob(part->master, from + part->offset, ops); 164 if (unlikely(res)) { 165 if (mtd_is_bitflip(res)) 166 mtd->ecc_stats.corrected++; 167 if (mtd_is_eccerr(res)) 168 mtd->ecc_stats.failed++; 169 } 170 return res; 171 } 172 173 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 174 size_t len, size_t *retlen, u_char *buf) 175 { 176 struct mtd_part *part = PART(mtd); 177 return part->master->_read_user_prot_reg(part->master, from, len, 178 retlen, buf); 179 } 180 181 static int part_get_user_prot_info(struct mtd_info *mtd, 182 struct otp_info *buf, size_t len) 183 { 184 struct mtd_part *part = PART(mtd); 185 return part->master->_get_user_prot_info(part->master, buf, len); 186 } 187 188 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 189 size_t len, size_t *retlen, u_char *buf) 190 { 191 struct mtd_part *part = PART(mtd); 192 return part->master->_read_fact_prot_reg(part->master, from, len, 193 retlen, buf); 194 } 195 196 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 197 size_t len) 198 { 199 struct mtd_part *part = PART(mtd); 200 return part->master->_get_fact_prot_info(part->master, buf, len); 201 } 202 203 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 204 size_t *retlen, const u_char *buf) 205 { 206 struct mtd_part *part = PART(mtd); 207 return part->master->_write(part->master, to + part->offset, len, 208 retlen, buf); 209 } 210 211 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 212 size_t *retlen, const u_char *buf) 213 { 214 struct mtd_part *part = PART(mtd); 215 return part->master->_panic_write(part->master, to + part->offset, len, 216 retlen, buf); 217 } 218 219 static int part_write_oob(struct mtd_info *mtd, loff_t to, 220 struct mtd_oob_ops *ops) 221 { 222 struct mtd_part *part = PART(mtd); 223 224 if (to >= mtd->size) 225 return -EINVAL; 226 if (ops->datbuf && to + ops->len > mtd->size) 227 return -EINVAL; 228 return part->master->_write_oob(part->master, to + part->offset, ops); 229 } 230 231 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 232 size_t len, size_t *retlen, u_char *buf) 233 { 234 struct mtd_part *part = PART(mtd); 235 return part->master->_write_user_prot_reg(part->master, from, len, 236 retlen, buf); 237 } 238 239 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 240 size_t len) 241 { 242 struct mtd_part *part = PART(mtd); 243 return part->master->_lock_user_prot_reg(part->master, from, len); 244 } 245 246 #ifndef __UBOOT__ 247 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 248 unsigned long count, loff_t to, size_t *retlen) 249 { 250 struct mtd_part *part = PART(mtd); 251 return part->master->_writev(part->master, vecs, count, 252 to + part->offset, retlen); 253 } 254 #endif 255 256 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 257 { 258 struct mtd_part *part = PART(mtd); 259 int ret; 260 261 instr->addr += part->offset; 262 ret = part->master->_erase(part->master, instr); 263 if (ret) { 264 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 265 instr->fail_addr -= part->offset; 266 instr->addr -= part->offset; 267 } 268 return ret; 269 } 270 271 void mtd_erase_callback(struct erase_info *instr) 272 { 273 if (instr->mtd->_erase == part_erase) { 274 struct mtd_part *part = PART(instr->mtd); 275 276 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 277 instr->fail_addr -= part->offset; 278 instr->addr -= part->offset; 279 } 280 if (instr->callback) 281 instr->callback(instr); 282 } 283 EXPORT_SYMBOL_GPL(mtd_erase_callback); 284 285 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 286 { 287 struct mtd_part *part = PART(mtd); 288 return part->master->_lock(part->master, ofs + part->offset, len); 289 } 290 291 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 292 { 293 struct mtd_part *part = PART(mtd); 294 return part->master->_unlock(part->master, ofs + part->offset, len); 295 } 296 297 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 298 { 299 struct mtd_part *part = PART(mtd); 300 return part->master->_is_locked(part->master, ofs + part->offset, len); 301 } 302 303 static void part_sync(struct mtd_info *mtd) 304 { 305 struct mtd_part *part = PART(mtd); 306 part->master->_sync(part->master); 307 } 308 309 #ifndef __UBOOT__ 310 static int part_suspend(struct mtd_info *mtd) 311 { 312 struct mtd_part *part = PART(mtd); 313 return part->master->_suspend(part->master); 314 } 315 316 static void part_resume(struct mtd_info *mtd) 317 { 318 struct mtd_part *part = PART(mtd); 319 part->master->_resume(part->master); 320 } 321 #endif 322 323 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 324 { 325 struct mtd_part *part = PART(mtd); 326 ofs += part->offset; 327 return part->master->_block_isbad(part->master, ofs); 328 } 329 330 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 331 { 332 struct mtd_part *part = PART(mtd); 333 int res; 334 335 ofs += part->offset; 336 res = part->master->_block_markbad(part->master, ofs); 337 if (!res) 338 mtd->ecc_stats.badblocks++; 339 return res; 340 } 341 342 static inline void free_partition(struct mtd_part *p) 343 { 344 kfree(p->mtd.name); 345 kfree(p); 346 } 347 348 /* 349 * This function unregisters and destroy all slave MTD objects which are 350 * attached to the given master MTD object. 351 */ 352 353 int del_mtd_partitions(struct mtd_info *master) 354 { 355 struct mtd_part *slave, *next; 356 int ret, err = 0; 357 358 mutex_lock(&mtd_partitions_mutex); 359 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 360 if (slave->master == master) { 361 ret = del_mtd_device(&slave->mtd); 362 if (ret < 0) { 363 err = ret; 364 continue; 365 } 366 list_del(&slave->list); 367 free_partition(slave); 368 } 369 mutex_unlock(&mtd_partitions_mutex); 370 371 return err; 372 } 373 374 static struct mtd_part *allocate_partition(struct mtd_info *master, 375 const struct mtd_partition *part, int partno, 376 uint64_t cur_offset) 377 { 378 struct mtd_part *slave; 379 char *name; 380 381 /* allocate the partition structure */ 382 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 383 name = kstrdup(part->name, GFP_KERNEL); 384 if (!name || !slave) { 385 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 386 master->name); 387 kfree(name); 388 kfree(slave); 389 return ERR_PTR(-ENOMEM); 390 } 391 392 /* set up the MTD object for this partition */ 393 slave->mtd.type = master->type; 394 slave->mtd.flags = master->flags & ~part->mask_flags; 395 slave->mtd.size = part->size; 396 slave->mtd.writesize = master->writesize; 397 slave->mtd.writebufsize = master->writebufsize; 398 slave->mtd.oobsize = master->oobsize; 399 slave->mtd.oobavail = master->oobavail; 400 slave->mtd.subpage_sft = master->subpage_sft; 401 402 slave->mtd.name = name; 403 slave->mtd.owner = master->owner; 404 #ifndef __UBOOT__ 405 slave->mtd.backing_dev_info = master->backing_dev_info; 406 407 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 408 * to have the same data be in two different partitions. 409 */ 410 slave->mtd.dev.parent = master->dev.parent; 411 #endif 412 413 slave->mtd._read = part_read; 414 slave->mtd._write = part_write; 415 416 if (master->_panic_write) 417 slave->mtd._panic_write = part_panic_write; 418 419 #ifndef __UBOOT__ 420 if (master->_point && master->_unpoint) { 421 slave->mtd._point = part_point; 422 slave->mtd._unpoint = part_unpoint; 423 } 424 #endif 425 426 if (master->_get_unmapped_area) 427 slave->mtd._get_unmapped_area = part_get_unmapped_area; 428 if (master->_read_oob) 429 slave->mtd._read_oob = part_read_oob; 430 if (master->_write_oob) 431 slave->mtd._write_oob = part_write_oob; 432 if (master->_read_user_prot_reg) 433 slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 434 if (master->_read_fact_prot_reg) 435 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 436 if (master->_write_user_prot_reg) 437 slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 438 if (master->_lock_user_prot_reg) 439 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 440 if (master->_get_user_prot_info) 441 slave->mtd._get_user_prot_info = part_get_user_prot_info; 442 if (master->_get_fact_prot_info) 443 slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 444 if (master->_sync) 445 slave->mtd._sync = part_sync; 446 #ifndef __UBOOT__ 447 if (!partno && !master->dev.class && master->_suspend && 448 master->_resume) { 449 slave->mtd._suspend = part_suspend; 450 slave->mtd._resume = part_resume; 451 } 452 if (master->_writev) 453 slave->mtd._writev = part_writev; 454 #endif 455 if (master->_lock) 456 slave->mtd._lock = part_lock; 457 if (master->_unlock) 458 slave->mtd._unlock = part_unlock; 459 if (master->_is_locked) 460 slave->mtd._is_locked = part_is_locked; 461 if (master->_block_isbad) 462 slave->mtd._block_isbad = part_block_isbad; 463 if (master->_block_markbad) 464 slave->mtd._block_markbad = part_block_markbad; 465 slave->mtd._erase = part_erase; 466 slave->master = master; 467 slave->offset = part->offset; 468 469 if (slave->offset == MTDPART_OFS_APPEND) 470 slave->offset = cur_offset; 471 if (slave->offset == MTDPART_OFS_NXTBLK) { 472 slave->offset = cur_offset; 473 if (mtd_mod_by_eb(cur_offset, master) != 0) { 474 /* Round up to next erasesize */ 475 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 476 debug("Moving partition %d: " 477 "0x%012llx -> 0x%012llx\n", partno, 478 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 479 } 480 } 481 if (slave->offset == MTDPART_OFS_RETAIN) { 482 slave->offset = cur_offset; 483 if (master->size - slave->offset >= slave->mtd.size) { 484 slave->mtd.size = master->size - slave->offset 485 - slave->mtd.size; 486 } else { 487 debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 488 part->name, master->size - slave->offset, 489 slave->mtd.size); 490 /* register to preserve ordering */ 491 goto out_register; 492 } 493 } 494 if (slave->mtd.size == MTDPART_SIZ_FULL) 495 slave->mtd.size = master->size - slave->offset; 496 497 debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 498 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 499 500 /* let's do some sanity checks */ 501 if (slave->offset >= master->size) { 502 /* let's register it anyway to preserve ordering */ 503 slave->offset = 0; 504 slave->mtd.size = 0; 505 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 506 part->name); 507 goto out_register; 508 } 509 if (slave->offset + slave->mtd.size > master->size) { 510 slave->mtd.size = master->size - slave->offset; 511 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 512 part->name, master->name, (unsigned long long)slave->mtd.size); 513 } 514 if (master->numeraseregions > 1) { 515 /* Deal with variable erase size stuff */ 516 int i, max = master->numeraseregions; 517 u64 end = slave->offset + slave->mtd.size; 518 struct mtd_erase_region_info *regions = master->eraseregions; 519 520 /* Find the first erase regions which is part of this 521 * partition. */ 522 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 523 ; 524 /* The loop searched for the region _behind_ the first one */ 525 if (i > 0) 526 i--; 527 528 /* Pick biggest erasesize */ 529 for (; i < max && regions[i].offset < end; i++) { 530 if (slave->mtd.erasesize < regions[i].erasesize) { 531 slave->mtd.erasesize = regions[i].erasesize; 532 } 533 } 534 BUG_ON(slave->mtd.erasesize == 0); 535 } else { 536 /* Single erase size */ 537 slave->mtd.erasesize = master->erasesize; 538 } 539 540 if ((slave->mtd.flags & MTD_WRITEABLE) && 541 mtd_mod_by_eb(slave->offset, &slave->mtd)) { 542 /* Doesn't start on a boundary of major erase size */ 543 /* FIXME: Let it be writable if it is on a boundary of 544 * _minor_ erase size though */ 545 slave->mtd.flags &= ~MTD_WRITEABLE; 546 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 547 part->name); 548 } 549 if ((slave->mtd.flags & MTD_WRITEABLE) && 550 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { 551 slave->mtd.flags &= ~MTD_WRITEABLE; 552 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 553 part->name); 554 } 555 556 slave->mtd.ecclayout = master->ecclayout; 557 slave->mtd.ecc_step_size = master->ecc_step_size; 558 slave->mtd.ecc_strength = master->ecc_strength; 559 slave->mtd.bitflip_threshold = master->bitflip_threshold; 560 561 if (master->_block_isbad) { 562 uint64_t offs = 0; 563 564 while (offs < slave->mtd.size) { 565 if (mtd_block_isbad(master, offs + slave->offset)) 566 slave->mtd.ecc_stats.badblocks++; 567 offs += slave->mtd.erasesize; 568 } 569 } 570 571 out_register: 572 return slave; 573 } 574 575 int mtd_add_partition(struct mtd_info *master, const char *name, 576 long long offset, long long length) 577 { 578 struct mtd_partition part; 579 struct mtd_part *p, *new; 580 uint64_t start, end; 581 int ret = 0; 582 583 /* the direct offset is expected */ 584 if (offset == MTDPART_OFS_APPEND || 585 offset == MTDPART_OFS_NXTBLK) 586 return -EINVAL; 587 588 if (length == MTDPART_SIZ_FULL) 589 length = master->size - offset; 590 591 if (length <= 0) 592 return -EINVAL; 593 594 part.name = name; 595 part.size = length; 596 part.offset = offset; 597 part.mask_flags = 0; 598 part.ecclayout = NULL; 599 600 new = allocate_partition(master, &part, -1, offset); 601 if (IS_ERR(new)) 602 return PTR_ERR(new); 603 604 start = offset; 605 end = offset + length; 606 607 mutex_lock(&mtd_partitions_mutex); 608 list_for_each_entry(p, &mtd_partitions, list) 609 if (p->master == master) { 610 if ((start >= p->offset) && 611 (start < (p->offset + p->mtd.size))) 612 goto err_inv; 613 614 if ((end >= p->offset) && 615 (end < (p->offset + p->mtd.size))) 616 goto err_inv; 617 } 618 619 list_add(&new->list, &mtd_partitions); 620 mutex_unlock(&mtd_partitions_mutex); 621 622 add_mtd_device(&new->mtd); 623 624 return ret; 625 err_inv: 626 mutex_unlock(&mtd_partitions_mutex); 627 free_partition(new); 628 return -EINVAL; 629 } 630 EXPORT_SYMBOL_GPL(mtd_add_partition); 631 632 int mtd_del_partition(struct mtd_info *master, int partno) 633 { 634 struct mtd_part *slave, *next; 635 int ret = -EINVAL; 636 637 mutex_lock(&mtd_partitions_mutex); 638 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 639 if ((slave->master == master) && 640 (slave->mtd.index == partno)) { 641 ret = del_mtd_device(&slave->mtd); 642 if (ret < 0) 643 break; 644 645 list_del(&slave->list); 646 free_partition(slave); 647 break; 648 } 649 mutex_unlock(&mtd_partitions_mutex); 650 651 return ret; 652 } 653 EXPORT_SYMBOL_GPL(mtd_del_partition); 654 655 /* 656 * This function, given a master MTD object and a partition table, creates 657 * and registers slave MTD objects which are bound to the master according to 658 * the partition definitions. 659 * 660 * We don't register the master, or expect the caller to have done so, 661 * for reasons of data integrity. 662 */ 663 664 int add_mtd_partitions(struct mtd_info *master, 665 const struct mtd_partition *parts, 666 int nbparts) 667 { 668 struct mtd_part *slave; 669 uint64_t cur_offset = 0; 670 int i; 671 672 #ifdef __UBOOT__ 673 /* 674 * Need to init the list here, since LIST_INIT() does not 675 * work on platforms where relocation has problems (like MIPS 676 * & PPC). 677 */ 678 if (mtd_partitions.next == NULL) 679 INIT_LIST_HEAD(&mtd_partitions); 680 #endif 681 682 debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 683 684 for (i = 0; i < nbparts; i++) { 685 slave = allocate_partition(master, parts + i, i, cur_offset); 686 if (IS_ERR(slave)) 687 return PTR_ERR(slave); 688 689 mutex_lock(&mtd_partitions_mutex); 690 list_add(&slave->list, &mtd_partitions); 691 mutex_unlock(&mtd_partitions_mutex); 692 693 add_mtd_device(&slave->mtd); 694 695 cur_offset = slave->offset + slave->mtd.size; 696 } 697 698 return 0; 699 } 700 701 #ifndef __UBOOT__ 702 static DEFINE_SPINLOCK(part_parser_lock); 703 static LIST_HEAD(part_parsers); 704 705 static struct mtd_part_parser *get_partition_parser(const char *name) 706 { 707 struct mtd_part_parser *p, *ret = NULL; 708 709 spin_lock(&part_parser_lock); 710 711 list_for_each_entry(p, &part_parsers, list) 712 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 713 ret = p; 714 break; 715 } 716 717 spin_unlock(&part_parser_lock); 718 719 return ret; 720 } 721 722 #define put_partition_parser(p) do { module_put((p)->owner); } while (0) 723 724 void register_mtd_parser(struct mtd_part_parser *p) 725 { 726 spin_lock(&part_parser_lock); 727 list_add(&p->list, &part_parsers); 728 spin_unlock(&part_parser_lock); 729 } 730 EXPORT_SYMBOL_GPL(register_mtd_parser); 731 732 void deregister_mtd_parser(struct mtd_part_parser *p) 733 { 734 spin_lock(&part_parser_lock); 735 list_del(&p->list); 736 spin_unlock(&part_parser_lock); 737 } 738 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 739 740 /* 741 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 742 * are changing this array! 743 */ 744 static const char * const default_mtd_part_types[] = { 745 "cmdlinepart", 746 "ofpart", 747 NULL 748 }; 749 750 /** 751 * parse_mtd_partitions - parse MTD partitions 752 * @master: the master partition (describes whole MTD device) 753 * @types: names of partition parsers to try or %NULL 754 * @pparts: array of partitions found is returned here 755 * @data: MTD partition parser-specific data 756 * 757 * This function tries to find partition on MTD device @master. It uses MTD 758 * partition parsers, specified in @types. However, if @types is %NULL, then 759 * the default list of parsers is used. The default list contains only the 760 * "cmdlinepart" and "ofpart" parsers ATM. 761 * Note: If there are more then one parser in @types, the kernel only takes the 762 * partitions parsed out by the first parser. 763 * 764 * This function may return: 765 * o a negative error code in case of failure 766 * o zero if no partitions were found 767 * o a positive number of found partitions, in which case on exit @pparts will 768 * point to an array containing this number of &struct mtd_info objects. 769 */ 770 int parse_mtd_partitions(struct mtd_info *master, const char *const *types, 771 struct mtd_partition **pparts, 772 struct mtd_part_parser_data *data) 773 { 774 struct mtd_part_parser *parser; 775 int ret = 0; 776 777 if (!types) 778 types = default_mtd_part_types; 779 780 for ( ; ret <= 0 && *types; types++) { 781 parser = get_partition_parser(*types); 782 if (!parser && !request_module("%s", *types)) 783 parser = get_partition_parser(*types); 784 if (!parser) 785 continue; 786 ret = (*parser->parse_fn)(master, pparts, data); 787 put_partition_parser(parser); 788 if (ret > 0) { 789 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 790 ret, parser->name, master->name); 791 break; 792 } 793 } 794 return ret; 795 } 796 #endif 797 798 int mtd_is_partition(const struct mtd_info *mtd) 799 { 800 struct mtd_part *part; 801 int ispart = 0; 802 803 mutex_lock(&mtd_partitions_mutex); 804 list_for_each_entry(part, &mtd_partitions, list) 805 if (&part->mtd == mtd) { 806 ispart = 1; 807 break; 808 } 809 mutex_unlock(&mtd_partitions_mutex); 810 811 return ispart; 812 } 813 EXPORT_SYMBOL_GPL(mtd_is_partition); 814 815 /* Returns the size of the entire flash chip */ 816 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 817 { 818 if (!mtd_is_partition(mtd)) 819 return mtd->size; 820 821 return PART(mtd)->master->size; 822 } 823 EXPORT_SYMBOL_GPL(mtd_get_device_size); 824