1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Simple MTD partitioning layer 4 * 5 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 6 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 7 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 8 * 9 */ 10 11 #ifndef __UBOOT__ 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/list.h> 17 #include <linux/kmod.h> 18 #endif 19 20 #include <common.h> 21 #include <malloc.h> 22 #include <linux/errno.h> 23 #include <linux/compat.h> 24 #include <ubi_uboot.h> 25 26 #include <linux/mtd/mtd.h> 27 #include <linux/mtd/partitions.h> 28 #include <linux/err.h> 29 #include <linux/sizes.h> 30 31 #include "mtdcore.h" 32 33 #ifndef __UBOOT__ 34 static DEFINE_MUTEX(mtd_partitions_mutex); 35 #else 36 DEFINE_MUTEX(mtd_partitions_mutex); 37 #endif 38 39 #ifdef __UBOOT__ 40 /* from mm/util.c */ 41 42 /** 43 * kstrdup - allocate space for and copy an existing string 44 * @s: the string to duplicate 45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 46 */ 47 char *kstrdup(const char *s, gfp_t gfp) 48 { 49 size_t len; 50 char *buf; 51 52 if (!s) 53 return NULL; 54 55 len = strlen(s) + 1; 56 buf = kmalloc(len, gfp); 57 if (buf) 58 memcpy(buf, s, len); 59 return buf; 60 } 61 #endif 62 63 #define MTD_SIZE_REMAINING (~0LLU) 64 #define MTD_OFFSET_NOT_SPECIFIED (~0LLU) 65 66 bool mtd_partitions_used(struct mtd_info *master) 67 { 68 struct mtd_info *slave; 69 70 list_for_each_entry(slave, &master->partitions, node) { 71 if (slave->usecount) 72 return true; 73 } 74 75 return false; 76 } 77 78 /** 79 * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition 80 * with it and update the @mtdparts string pointer. 81 * 82 * The partition name is allocated and must be freed by the caller. 83 * 84 * This function is widely inspired from part_parse (mtdparts.c). 85 * 86 * @mtdparts: String describing the partition with mtdparts command syntax 87 * @partition: MTD partition structure to fill 88 * 89 * @return 0 on success, an error otherwise. 90 */ 91 static int mtd_parse_partition(const char **_mtdparts, 92 struct mtd_partition *partition) 93 { 94 const char *mtdparts = *_mtdparts; 95 const char *name = NULL; 96 int name_len; 97 char *buf; 98 99 /* Ensure the partition structure is empty */ 100 memset(partition, 0, sizeof(struct mtd_partition)); 101 102 /* Fetch the partition size */ 103 if (*mtdparts == '-') { 104 /* Assign all remaining space to this partition */ 105 partition->size = MTD_SIZE_REMAINING; 106 mtdparts++; 107 } else { 108 partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0); 109 if (partition->size < SZ_4K) { 110 printf("Minimum partition size 4kiB, %lldB requested\n", 111 partition->size); 112 return -EINVAL; 113 } 114 } 115 116 /* Check for the offset */ 117 partition->offset = MTD_OFFSET_NOT_SPECIFIED; 118 if (*mtdparts == '@') { 119 mtdparts++; 120 partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0); 121 } 122 123 /* Now look for the name */ 124 if (*mtdparts == '(') { 125 name = ++mtdparts; 126 mtdparts = strchr(name, ')'); 127 if (!mtdparts) { 128 printf("No closing ')' found in partition name\n"); 129 return -EINVAL; 130 } 131 name_len = mtdparts - name + 1; 132 if ((name_len - 1) == 0) { 133 printf("Empty partition name\n"); 134 return -EINVAL; 135 } 136 mtdparts++; 137 } else { 138 /* Name will be of the form size@offset */ 139 name_len = 22; 140 } 141 142 /* Check if the partition is read-only */ 143 if (strncmp(mtdparts, "ro", 2) == 0) { 144 partition->mask_flags |= MTD_WRITEABLE; 145 mtdparts += 2; 146 } 147 148 /* Check for a potential next partition definition */ 149 if (*mtdparts == ',') { 150 if (partition->size == MTD_SIZE_REMAINING) { 151 printf("No partitions allowed after a fill-up\n"); 152 return -EINVAL; 153 } 154 ++mtdparts; 155 } else if ((*mtdparts == ';') || (*mtdparts == '\0')) { 156 /* NOP */ 157 } else { 158 printf("Unexpected character '%c' in mtdparts\n", *mtdparts); 159 return -EINVAL; 160 } 161 162 /* 163 * Allocate a buffer for the name and either copy the provided name or 164 * auto-generate it with the form 'size@offset'. 165 */ 166 buf = malloc(name_len); 167 if (!buf) 168 return -ENOMEM; 169 170 if (name) 171 strncpy(buf, name, name_len - 1); 172 else 173 snprintf(buf, name_len, "0x%08llx@0x%08llx", 174 partition->size, partition->offset); 175 176 buf[name_len - 1] = '\0'; 177 partition->name = buf; 178 179 *_mtdparts = mtdparts; 180 181 return 0; 182 } 183 184 /** 185 * mtd_parse_partitions - Create a partition array from an mtdparts definition 186 * 187 * Stateless function that takes a @parent MTD device, a string @_mtdparts 188 * describing the partitions (with the "mtdparts" command syntax) and creates 189 * the corresponding MTD partition structure array @_parts. Both the name and 190 * the structure partition itself must be freed freed, the caller may use 191 * @mtd_free_parsed_partitions() for this purpose. 192 * 193 * @parent: MTD device which contains the partitions 194 * @_mtdparts: Pointer to a string describing the partitions with "mtdparts" 195 * command syntax. 196 * @_parts: Allocated array containing the partitions, must be freed by the 197 * caller. 198 * @_nparts: Size of @_parts array. 199 * 200 * @return 0 on success, an error otherwise. 201 */ 202 int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts, 203 struct mtd_partition **_parts, int *_nparts) 204 { 205 struct mtd_partition partition = {}, *parts; 206 const char *mtdparts = *_mtdparts; 207 int cur_off = 0, cur_sz = 0; 208 int nparts = 0; 209 int ret, idx; 210 u64 sz; 211 212 /* First, iterate over the partitions until we know their number */ 213 while (mtdparts[0] != '\0' && mtdparts[0] != ';') { 214 ret = mtd_parse_partition(&mtdparts, &partition); 215 if (ret) 216 return ret; 217 218 free((char *)partition.name); 219 nparts++; 220 } 221 222 /* Allocate an array of partitions to give back to the caller */ 223 parts = malloc(sizeof(*parts) * nparts); 224 if (!parts) { 225 printf("Not enough space to save partitions meta-data\n"); 226 return -ENOMEM; 227 } 228 229 /* Iterate again over each partition to save the data in our array */ 230 for (idx = 0; idx < nparts; idx++) { 231 ret = mtd_parse_partition(_mtdparts, &parts[idx]); 232 if (ret) 233 return ret; 234 235 if (parts[idx].size == MTD_SIZE_REMAINING) 236 parts[idx].size = parent->size - cur_sz; 237 cur_sz += parts[idx].size; 238 239 sz = parts[idx].size; 240 if (sz < parent->writesize || do_div(sz, parent->writesize)) { 241 printf("Partition size must be a multiple of %d\n", 242 parent->writesize); 243 return -EINVAL; 244 } 245 246 if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED) 247 parts[idx].offset = cur_off; 248 cur_off += parts[idx].size; 249 250 parts[idx].ecclayout = parent->ecclayout; 251 } 252 253 /* Offset by one mtdparts to point to the next device if any */ 254 if (*_mtdparts[0] == ';') 255 (*_mtdparts)++; 256 257 *_parts = parts; 258 *_nparts = nparts; 259 260 return 0; 261 } 262 263 /** 264 * mtd_free_parsed_partitions - Free dynamically allocated partitions 265 * 266 * Each successful call to @mtd_parse_partitions must be followed by a call to 267 * @mtd_free_parsed_partitions to free any allocated array during the parsing 268 * process. 269 * 270 * @parts: Array containing the partitions that will be freed. 271 * @nparts: Size of @parts array. 272 */ 273 void mtd_free_parsed_partitions(struct mtd_partition *parts, 274 unsigned int nparts) 275 { 276 int i; 277 278 for (i = 0; i < nparts; i++) 279 free((char *)parts[i].name); 280 281 free(parts); 282 } 283 284 /* 285 * MTD methods which simply translate the effective address and pass through 286 * to the _real_ device. 287 */ 288 289 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 290 size_t *retlen, u_char *buf) 291 { 292 struct mtd_ecc_stats stats; 293 int res; 294 295 stats = mtd->parent->ecc_stats; 296 res = mtd->parent->_read(mtd->parent, from + mtd->offset, len, 297 retlen, buf); 298 if (unlikely(mtd_is_eccerr(res))) 299 mtd->ecc_stats.failed += 300 mtd->parent->ecc_stats.failed - stats.failed; 301 else 302 mtd->ecc_stats.corrected += 303 mtd->parent->ecc_stats.corrected - stats.corrected; 304 return res; 305 } 306 307 #ifndef __UBOOT__ 308 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 309 size_t *retlen, void **virt, resource_size_t *phys) 310 { 311 return mtd->parent->_point(mtd->parent, from + mtd->offset, len, 312 retlen, virt, phys); 313 } 314 315 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 316 { 317 return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len); 318 } 319 #endif 320 321 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 322 unsigned long len, 323 unsigned long offset, 324 unsigned long flags) 325 { 326 offset += mtd->offset; 327 return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags); 328 } 329 330 static int part_read_oob(struct mtd_info *mtd, loff_t from, 331 struct mtd_oob_ops *ops) 332 { 333 int res; 334 335 if (from >= mtd->size) 336 return -EINVAL; 337 if (ops->datbuf && from + ops->len > mtd->size) 338 return -EINVAL; 339 340 /* 341 * If OOB is also requested, make sure that we do not read past the end 342 * of this partition. 343 */ 344 if (ops->oobbuf) { 345 size_t len, pages; 346 347 if (ops->mode == MTD_OPS_AUTO_OOB) 348 len = mtd->oobavail; 349 else 350 len = mtd->oobsize; 351 pages = mtd_div_by_ws(mtd->size, mtd); 352 pages -= mtd_div_by_ws(from, mtd); 353 if (ops->ooboffs + ops->ooblen > pages * len) 354 return -EINVAL; 355 } 356 357 res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops); 358 if (unlikely(res)) { 359 if (mtd_is_bitflip(res)) 360 mtd->ecc_stats.corrected++; 361 if (mtd_is_eccerr(res)) 362 mtd->ecc_stats.failed++; 363 } 364 return res; 365 } 366 367 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 368 size_t len, size_t *retlen, u_char *buf) 369 { 370 return mtd->parent->_read_user_prot_reg(mtd->parent, from, len, 371 retlen, buf); 372 } 373 374 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, 375 size_t *retlen, struct otp_info *buf) 376 { 377 return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen, 378 buf); 379 } 380 381 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 382 size_t len, size_t *retlen, u_char *buf) 383 { 384 return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len, 385 retlen, buf); 386 } 387 388 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, 389 size_t *retlen, struct otp_info *buf) 390 { 391 return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen, 392 buf); 393 } 394 395 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 396 size_t *retlen, const u_char *buf) 397 { 398 return mtd->parent->_write(mtd->parent, to + mtd->offset, len, 399 retlen, buf); 400 } 401 402 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 403 size_t *retlen, const u_char *buf) 404 { 405 return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len, 406 retlen, buf); 407 } 408 409 static int part_write_oob(struct mtd_info *mtd, loff_t to, 410 struct mtd_oob_ops *ops) 411 { 412 if (to >= mtd->size) 413 return -EINVAL; 414 if (ops->datbuf && to + ops->len > mtd->size) 415 return -EINVAL; 416 return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops); 417 } 418 419 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 420 size_t len, size_t *retlen, u_char *buf) 421 { 422 return mtd->parent->_write_user_prot_reg(mtd->parent, from, len, 423 retlen, buf); 424 } 425 426 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 427 size_t len) 428 { 429 return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len); 430 } 431 432 #ifndef __UBOOT__ 433 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 434 unsigned long count, loff_t to, size_t *retlen) 435 { 436 return mtd->parent->_writev(mtd->parent, vecs, count, 437 to + mtd->offset, retlen); 438 } 439 #endif 440 441 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 442 { 443 int ret; 444 445 instr->addr += mtd->offset; 446 ret = mtd->parent->_erase(mtd->parent, instr); 447 if (ret) { 448 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 449 instr->fail_addr -= mtd->offset; 450 instr->addr -= mtd->offset; 451 } 452 return ret; 453 } 454 455 void mtd_erase_callback(struct erase_info *instr) 456 { 457 if (instr->mtd->_erase == part_erase) { 458 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 459 instr->fail_addr -= instr->mtd->offset; 460 instr->addr -= instr->mtd->offset; 461 } 462 if (instr->callback) 463 instr->callback(instr); 464 } 465 EXPORT_SYMBOL_GPL(mtd_erase_callback); 466 467 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 468 { 469 return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len); 470 } 471 472 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 473 { 474 return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len); 475 } 476 477 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 478 { 479 return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len); 480 } 481 482 static void part_sync(struct mtd_info *mtd) 483 { 484 mtd->parent->_sync(mtd->parent); 485 } 486 487 #ifndef __UBOOT__ 488 static int part_suspend(struct mtd_info *mtd) 489 { 490 return mtd->parent->_suspend(mtd->parent); 491 } 492 493 static void part_resume(struct mtd_info *mtd) 494 { 495 mtd->parent->_resume(mtd->parent); 496 } 497 #endif 498 499 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) 500 { 501 ofs += mtd->offset; 502 return mtd->parent->_block_isreserved(mtd->parent, ofs); 503 } 504 505 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 506 { 507 ofs += mtd->offset; 508 return mtd->parent->_block_isbad(mtd->parent, ofs); 509 } 510 511 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 512 { 513 int res; 514 515 ofs += mtd->offset; 516 res = mtd->parent->_block_markbad(mtd->parent, ofs); 517 if (!res) 518 mtd->ecc_stats.badblocks++; 519 return res; 520 } 521 522 static inline void free_partition(struct mtd_info *p) 523 { 524 kfree(p->name); 525 kfree(p); 526 } 527 528 /* 529 * This function unregisters and destroy all slave MTD objects which are 530 * attached to the given master MTD object, recursively. 531 */ 532 static int do_del_mtd_partitions(struct mtd_info *master) 533 { 534 struct mtd_info *slave, *next; 535 int ret, err = 0; 536 537 list_for_each_entry_safe(slave, next, &master->partitions, node) { 538 if (mtd_has_partitions(slave)) 539 del_mtd_partitions(slave); 540 541 debug("Deleting %s MTD partition\n", slave->name); 542 ret = del_mtd_device(slave); 543 if (ret < 0) { 544 printf("Error when deleting partition \"%s\" (%d)\n", 545 slave->name, ret); 546 err = ret; 547 continue; 548 } 549 550 list_del(&slave->node); 551 free_partition(slave); 552 } 553 554 return err; 555 } 556 557 int del_mtd_partitions(struct mtd_info *master) 558 { 559 int ret; 560 561 debug("Deleting MTD partitions on \"%s\":\n", master->name); 562 563 mutex_lock(&mtd_partitions_mutex); 564 ret = do_del_mtd_partitions(master); 565 mutex_unlock(&mtd_partitions_mutex); 566 567 return ret; 568 } 569 570 static struct mtd_info *allocate_partition(struct mtd_info *master, 571 const struct mtd_partition *part, 572 int partno, uint64_t cur_offset) 573 { 574 struct mtd_info *slave; 575 char *name; 576 577 /* allocate the partition structure */ 578 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 579 name = kstrdup(part->name, GFP_KERNEL); 580 if (!name || !slave) { 581 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 582 master->name); 583 kfree(name); 584 kfree(slave); 585 return ERR_PTR(-ENOMEM); 586 } 587 588 /* set up the MTD object for this partition */ 589 slave->type = master->type; 590 slave->flags = master->flags & ~part->mask_flags; 591 slave->size = part->size; 592 slave->writesize = master->writesize; 593 slave->writebufsize = master->writebufsize; 594 slave->oobsize = master->oobsize; 595 slave->oobavail = master->oobavail; 596 slave->subpage_sft = master->subpage_sft; 597 598 slave->name = name; 599 slave->owner = master->owner; 600 #ifndef __UBOOT__ 601 slave->backing_dev_info = master->backing_dev_info; 602 603 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 604 * to have the same data be in two different partitions. 605 */ 606 slave->dev.parent = master->dev.parent; 607 #endif 608 609 if (master->_read) 610 slave->_read = part_read; 611 if (master->_write) 612 slave->_write = part_write; 613 614 if (master->_panic_write) 615 slave->_panic_write = part_panic_write; 616 617 #ifndef __UBOOT__ 618 if (master->_point && master->_unpoint) { 619 slave->_point = part_point; 620 slave->_unpoint = part_unpoint; 621 } 622 #endif 623 624 if (master->_get_unmapped_area) 625 slave->_get_unmapped_area = part_get_unmapped_area; 626 if (master->_read_oob) 627 slave->_read_oob = part_read_oob; 628 if (master->_write_oob) 629 slave->_write_oob = part_write_oob; 630 if (master->_read_user_prot_reg) 631 slave->_read_user_prot_reg = part_read_user_prot_reg; 632 if (master->_read_fact_prot_reg) 633 slave->_read_fact_prot_reg = part_read_fact_prot_reg; 634 if (master->_write_user_prot_reg) 635 slave->_write_user_prot_reg = part_write_user_prot_reg; 636 if (master->_lock_user_prot_reg) 637 slave->_lock_user_prot_reg = part_lock_user_prot_reg; 638 if (master->_get_user_prot_info) 639 slave->_get_user_prot_info = part_get_user_prot_info; 640 if (master->_get_fact_prot_info) 641 slave->_get_fact_prot_info = part_get_fact_prot_info; 642 if (master->_sync) 643 slave->_sync = part_sync; 644 #ifndef __UBOOT__ 645 if (!partno && !master->dev.class && master->_suspend && 646 master->_resume) { 647 slave->_suspend = part_suspend; 648 slave->_resume = part_resume; 649 } 650 if (master->_writev) 651 slave->_writev = part_writev; 652 #endif 653 if (master->_lock) 654 slave->_lock = part_lock; 655 if (master->_unlock) 656 slave->_unlock = part_unlock; 657 if (master->_is_locked) 658 slave->_is_locked = part_is_locked; 659 if (master->_block_isreserved) 660 slave->_block_isreserved = part_block_isreserved; 661 if (master->_block_isbad) 662 slave->_block_isbad = part_block_isbad; 663 if (master->_block_markbad) 664 slave->_block_markbad = part_block_markbad; 665 slave->_erase = part_erase; 666 slave->parent = master; 667 slave->offset = part->offset; 668 INIT_LIST_HEAD(&slave->partitions); 669 INIT_LIST_HEAD(&slave->node); 670 671 if (slave->offset == MTDPART_OFS_APPEND) 672 slave->offset = cur_offset; 673 if (slave->offset == MTDPART_OFS_NXTBLK) { 674 slave->offset = cur_offset; 675 if (mtd_mod_by_eb(cur_offset, master) != 0) { 676 /* Round up to next erasesize */ 677 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 678 debug("Moving partition %d: " 679 "0x%012llx -> 0x%012llx\n", partno, 680 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 681 } 682 } 683 if (slave->offset == MTDPART_OFS_RETAIN) { 684 slave->offset = cur_offset; 685 if (master->size - slave->offset >= slave->size) { 686 slave->size = master->size - slave->offset 687 - slave->size; 688 } else { 689 debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 690 part->name, master->size - slave->offset, 691 slave->size); 692 /* register to preserve ordering */ 693 goto out_register; 694 } 695 } 696 if (slave->size == MTDPART_SIZ_FULL) 697 slave->size = master->size - slave->offset; 698 699 debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 700 (unsigned long long)(slave->offset + slave->size), slave->name); 701 702 /* let's do some sanity checks */ 703 if (slave->offset >= master->size) { 704 /* let's register it anyway to preserve ordering */ 705 slave->offset = 0; 706 slave->size = 0; 707 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 708 part->name); 709 goto out_register; 710 } 711 if (slave->offset + slave->size > master->size) { 712 slave->size = master->size - slave->offset; 713 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 714 part->name, master->name, slave->size); 715 } 716 if (master->numeraseregions > 1) { 717 /* Deal with variable erase size stuff */ 718 int i, max = master->numeraseregions; 719 u64 end = slave->offset + slave->size; 720 struct mtd_erase_region_info *regions = master->eraseregions; 721 722 /* Find the first erase regions which is part of this 723 * partition. */ 724 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 725 ; 726 /* The loop searched for the region _behind_ the first one */ 727 if (i > 0) 728 i--; 729 730 /* Pick biggest erasesize */ 731 for (; i < max && regions[i].offset < end; i++) { 732 if (slave->erasesize < regions[i].erasesize) 733 slave->erasesize = regions[i].erasesize; 734 } 735 WARN_ON(slave->erasesize == 0); 736 } else { 737 /* Single erase size */ 738 slave->erasesize = master->erasesize; 739 } 740 741 if ((slave->flags & MTD_WRITEABLE) && 742 mtd_mod_by_eb(slave->offset, slave)) { 743 /* Doesn't start on a boundary of major erase size */ 744 /* FIXME: Let it be writable if it is on a boundary of 745 * _minor_ erase size though */ 746 slave->flags &= ~MTD_WRITEABLE; 747 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 748 part->name); 749 } 750 if ((slave->flags & MTD_WRITEABLE) && 751 mtd_mod_by_eb(slave->size, slave)) { 752 slave->flags &= ~MTD_WRITEABLE; 753 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 754 part->name); 755 } 756 757 slave->ecclayout = master->ecclayout; 758 slave->ecc_step_size = master->ecc_step_size; 759 slave->ecc_strength = master->ecc_strength; 760 slave->bitflip_threshold = master->bitflip_threshold; 761 762 if (master->_block_isbad) { 763 uint64_t offs = 0; 764 765 while (offs < slave->size) { 766 if (mtd_block_isbad(master, offs + slave->offset)) 767 slave->ecc_stats.badblocks++; 768 offs += slave->erasesize; 769 } 770 } 771 772 out_register: 773 return slave; 774 } 775 776 #ifndef __UBOOT__ 777 int mtd_add_partition(struct mtd_info *master, const char *name, 778 long long offset, long long length) 779 { 780 struct mtd_partition part; 781 struct mtd_info *p, *new; 782 uint64_t start, end; 783 int ret = 0; 784 785 /* the direct offset is expected */ 786 if (offset == MTDPART_OFS_APPEND || 787 offset == MTDPART_OFS_NXTBLK) 788 return -EINVAL; 789 790 if (length == MTDPART_SIZ_FULL) 791 length = master->size - offset; 792 793 if (length <= 0) 794 return -EINVAL; 795 796 part.name = name; 797 part.size = length; 798 part.offset = offset; 799 part.mask_flags = 0; 800 part.ecclayout = NULL; 801 802 new = allocate_partition(master, &part, -1, offset); 803 if (IS_ERR(new)) 804 return PTR_ERR(new); 805 806 start = offset; 807 end = offset + length; 808 809 mutex_lock(&mtd_partitions_mutex); 810 list_for_each_entry(p, &master->partitions, node) { 811 if (start >= p->offset && 812 (start < (p->offset + p->size))) 813 goto err_inv; 814 815 if (end >= p->offset && 816 (end < (p->offset + p->size))) 817 goto err_inv; 818 } 819 820 list_add_tail(&new->node, &master->partitions); 821 mutex_unlock(&mtd_partitions_mutex); 822 823 add_mtd_device(new); 824 825 return ret; 826 err_inv: 827 mutex_unlock(&mtd_partitions_mutex); 828 free_partition(new); 829 return -EINVAL; 830 } 831 EXPORT_SYMBOL_GPL(mtd_add_partition); 832 833 int mtd_del_partition(struct mtd_info *master, int partno) 834 { 835 struct mtd_info *slave, *next; 836 int ret = -EINVAL; 837 838 mutex_lock(&mtd_partitions_mutex); 839 list_for_each_entry_safe(slave, next, &master->partitions, node) 840 if (slave->index == partno) { 841 ret = del_mtd_device(slave); 842 if (ret < 0) 843 break; 844 845 list_del(&slave->node); 846 free_partition(slave); 847 break; 848 } 849 mutex_unlock(&mtd_partitions_mutex); 850 851 return ret; 852 } 853 EXPORT_SYMBOL_GPL(mtd_del_partition); 854 #endif 855 856 /* 857 * This function, given a master MTD object and a partition table, creates 858 * and registers slave MTD objects which are bound to the master according to 859 * the partition definitions. 860 * 861 * We don't register the master, or expect the caller to have done so, 862 * for reasons of data integrity. 863 */ 864 865 int add_mtd_partitions(struct mtd_info *master, 866 const struct mtd_partition *parts, 867 int nbparts) 868 { 869 struct mtd_info *slave; 870 uint64_t cur_offset = 0; 871 int i; 872 873 debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 874 875 for (i = 0; i < nbparts; i++) { 876 slave = allocate_partition(master, parts + i, i, cur_offset); 877 if (IS_ERR(slave)) 878 return PTR_ERR(slave); 879 880 mutex_lock(&mtd_partitions_mutex); 881 list_add_tail(&slave->node, &master->partitions); 882 mutex_unlock(&mtd_partitions_mutex); 883 884 add_mtd_device(slave); 885 886 cur_offset = slave->offset + slave->size; 887 } 888 889 return 0; 890 } 891 892 #ifndef __UBOOT__ 893 static DEFINE_SPINLOCK(part_parser_lock); 894 static LIST_HEAD(part_parsers); 895 896 static struct mtd_part_parser *get_partition_parser(const char *name) 897 { 898 struct mtd_part_parser *p, *ret = NULL; 899 900 spin_lock(&part_parser_lock); 901 902 list_for_each_entry(p, &part_parsers, list) 903 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 904 ret = p; 905 break; 906 } 907 908 spin_unlock(&part_parser_lock); 909 910 return ret; 911 } 912 913 #define put_partition_parser(p) do { module_put((p)->owner); } while (0) 914 915 void register_mtd_parser(struct mtd_part_parser *p) 916 { 917 spin_lock(&part_parser_lock); 918 list_add(&p->list, &part_parsers); 919 spin_unlock(&part_parser_lock); 920 } 921 EXPORT_SYMBOL_GPL(register_mtd_parser); 922 923 void deregister_mtd_parser(struct mtd_part_parser *p) 924 { 925 spin_lock(&part_parser_lock); 926 list_del(&p->list); 927 spin_unlock(&part_parser_lock); 928 } 929 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 930 931 /* 932 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 933 * are changing this array! 934 */ 935 static const char * const default_mtd_part_types[] = { 936 "cmdlinepart", 937 "ofpart", 938 NULL 939 }; 940 941 /** 942 * parse_mtd_partitions - parse MTD partitions 943 * @master: the master partition (describes whole MTD device) 944 * @types: names of partition parsers to try or %NULL 945 * @pparts: array of partitions found is returned here 946 * @data: MTD partition parser-specific data 947 * 948 * This function tries to find partition on MTD device @master. It uses MTD 949 * partition parsers, specified in @types. However, if @types is %NULL, then 950 * the default list of parsers is used. The default list contains only the 951 * "cmdlinepart" and "ofpart" parsers ATM. 952 * Note: If there are more then one parser in @types, the kernel only takes the 953 * partitions parsed out by the first parser. 954 * 955 * This function may return: 956 * o a negative error code in case of failure 957 * o zero if no partitions were found 958 * o a positive number of found partitions, in which case on exit @pparts will 959 * point to an array containing this number of &struct mtd_info objects. 960 */ 961 int parse_mtd_partitions(struct mtd_info *master, const char *const *types, 962 struct mtd_partition **pparts, 963 struct mtd_part_parser_data *data) 964 { 965 struct mtd_part_parser *parser; 966 int ret = 0; 967 968 if (!types) 969 types = default_mtd_part_types; 970 971 for ( ; ret <= 0 && *types; types++) { 972 parser = get_partition_parser(*types); 973 if (!parser && !request_module("%s", *types)) 974 parser = get_partition_parser(*types); 975 if (!parser) 976 continue; 977 ret = (*parser->parse_fn)(master, pparts, data); 978 put_partition_parser(parser); 979 if (ret > 0) { 980 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 981 ret, parser->name, master->name); 982 break; 983 } 984 } 985 return ret; 986 } 987 #endif 988 989 /* Returns the size of the entire flash chip */ 990 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 991 { 992 if (mtd_is_partition(mtd)) 993 return mtd->parent->size; 994 995 return mtd->size; 996 } 997 EXPORT_SYMBOL_GPL(mtd_get_device_size); 998