1 /* 2 * Core registration and callback routines for MTD 3 * drivers and users. 4 * 5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 6 * Copyright © 2006 Red Hat UK Limited 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/ptrace.h> 27 #include <linux/seq_file.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/major.h> 31 #include <linux/fs.h> 32 #include <linux/err.h> 33 #include <linux/ioctl.h> 34 #include <linux/init.h> 35 #include <linux/proc_fs.h> 36 #include <linux/idr.h> 37 #include <linux/backing-dev.h> 38 #include <linux/gfp.h> 39 #include <linux/slab.h> 40 #include <linux/reboot.h> 41 #include <linux/kconfig.h> 42 43 #include <linux/mtd/mtd.h> 44 #include <linux/mtd/partitions.h> 45 46 #include "mtdcore.h" 47 48 static struct backing_dev_info mtd_bdi = { 49 }; 50 51 #ifdef CONFIG_PM_SLEEP 52 53 static int mtd_cls_suspend(struct device *dev) 54 { 55 struct mtd_info *mtd = dev_get_drvdata(dev); 56 57 return mtd ? mtd_suspend(mtd) : 0; 58 } 59 60 static int mtd_cls_resume(struct device *dev) 61 { 62 struct mtd_info *mtd = dev_get_drvdata(dev); 63 64 if (mtd) 65 mtd_resume(mtd); 66 return 0; 67 } 68 69 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 70 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 71 #else 72 #define MTD_CLS_PM_OPS NULL 73 #endif 74 75 static struct class mtd_class = { 76 .name = "mtd", 77 .owner = THIS_MODULE, 78 .pm = MTD_CLS_PM_OPS, 79 }; 80 81 static DEFINE_IDR(mtd_idr); 82 83 /* These are exported solely for the purpose of mtd_blkdevs.c. You 84 should not use them for _anything_ else */ 85 DEFINE_MUTEX(mtd_table_mutex); 86 EXPORT_SYMBOL_GPL(mtd_table_mutex); 87 88 struct mtd_info *__mtd_next_device(int i) 89 { 90 return idr_get_next(&mtd_idr, &i); 91 } 92 EXPORT_SYMBOL_GPL(__mtd_next_device); 93 94 static LIST_HEAD(mtd_notifiers); 95 96 97 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 98 99 /* REVISIT once MTD uses the driver model better, whoever allocates 100 * the mtd_info will probably want to use the release() hook... 101 */ 102 static void mtd_release(struct device *dev) 103 { 104 struct mtd_info *mtd = dev_get_drvdata(dev); 105 dev_t index = MTD_DEVT(mtd->index); 106 107 /* remove /dev/mtdXro node */ 108 device_destroy(&mtd_class, index + 1); 109 } 110 111 static ssize_t mtd_type_show(struct device *dev, 112 struct device_attribute *attr, char *buf) 113 { 114 struct mtd_info *mtd = dev_get_drvdata(dev); 115 char *type; 116 117 switch (mtd->type) { 118 case MTD_ABSENT: 119 type = "absent"; 120 break; 121 case MTD_RAM: 122 type = "ram"; 123 break; 124 case MTD_ROM: 125 type = "rom"; 126 break; 127 case MTD_NORFLASH: 128 type = "nor"; 129 break; 130 case MTD_NANDFLASH: 131 type = "nand"; 132 break; 133 case MTD_DATAFLASH: 134 type = "dataflash"; 135 break; 136 case MTD_UBIVOLUME: 137 type = "ubi"; 138 break; 139 case MTD_MLCNANDFLASH: 140 type = "mlc-nand"; 141 break; 142 default: 143 type = "unknown"; 144 } 145 146 return snprintf(buf, PAGE_SIZE, "%s\n", type); 147 } 148 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 149 150 static ssize_t mtd_flags_show(struct device *dev, 151 struct device_attribute *attr, char *buf) 152 { 153 struct mtd_info *mtd = dev_get_drvdata(dev); 154 155 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 156 157 } 158 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 159 160 static ssize_t mtd_size_show(struct device *dev, 161 struct device_attribute *attr, char *buf) 162 { 163 struct mtd_info *mtd = dev_get_drvdata(dev); 164 165 return snprintf(buf, PAGE_SIZE, "%llu\n", 166 (unsigned long long)mtd->size); 167 168 } 169 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 170 171 static ssize_t mtd_erasesize_show(struct device *dev, 172 struct device_attribute *attr, char *buf) 173 { 174 struct mtd_info *mtd = dev_get_drvdata(dev); 175 176 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 177 178 } 179 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 180 181 static ssize_t mtd_writesize_show(struct device *dev, 182 struct device_attribute *attr, char *buf) 183 { 184 struct mtd_info *mtd = dev_get_drvdata(dev); 185 186 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 187 188 } 189 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 190 191 static ssize_t mtd_subpagesize_show(struct device *dev, 192 struct device_attribute *attr, char *buf) 193 { 194 struct mtd_info *mtd = dev_get_drvdata(dev); 195 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 196 197 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 198 199 } 200 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 201 202 static ssize_t mtd_oobsize_show(struct device *dev, 203 struct device_attribute *attr, char *buf) 204 { 205 struct mtd_info *mtd = dev_get_drvdata(dev); 206 207 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 208 209 } 210 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 211 212 static ssize_t mtd_numeraseregions_show(struct device *dev, 213 struct device_attribute *attr, char *buf) 214 { 215 struct mtd_info *mtd = dev_get_drvdata(dev); 216 217 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 218 219 } 220 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 221 NULL); 222 223 static ssize_t mtd_name_show(struct device *dev, 224 struct device_attribute *attr, char *buf) 225 { 226 struct mtd_info *mtd = dev_get_drvdata(dev); 227 228 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 229 230 } 231 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 232 233 static ssize_t mtd_ecc_strength_show(struct device *dev, 234 struct device_attribute *attr, char *buf) 235 { 236 struct mtd_info *mtd = dev_get_drvdata(dev); 237 238 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 239 } 240 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 241 242 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 243 struct device_attribute *attr, 244 char *buf) 245 { 246 struct mtd_info *mtd = dev_get_drvdata(dev); 247 248 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 249 } 250 251 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 252 struct device_attribute *attr, 253 const char *buf, size_t count) 254 { 255 struct mtd_info *mtd = dev_get_drvdata(dev); 256 unsigned int bitflip_threshold; 257 int retval; 258 259 retval = kstrtouint(buf, 0, &bitflip_threshold); 260 if (retval) 261 return retval; 262 263 mtd->bitflip_threshold = bitflip_threshold; 264 return count; 265 } 266 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 267 mtd_bitflip_threshold_show, 268 mtd_bitflip_threshold_store); 269 270 static ssize_t mtd_ecc_step_size_show(struct device *dev, 271 struct device_attribute *attr, char *buf) 272 { 273 struct mtd_info *mtd = dev_get_drvdata(dev); 274 275 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); 276 277 } 278 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); 279 280 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, 281 struct device_attribute *attr, char *buf) 282 { 283 struct mtd_info *mtd = dev_get_drvdata(dev); 284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 285 286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); 287 } 288 static DEVICE_ATTR(corrected_bits, S_IRUGO, 289 mtd_ecc_stats_corrected_show, NULL); 290 291 static ssize_t mtd_ecc_stats_errors_show(struct device *dev, 292 struct device_attribute *attr, char *buf) 293 { 294 struct mtd_info *mtd = dev_get_drvdata(dev); 295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 296 297 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); 298 } 299 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); 300 301 static ssize_t mtd_badblocks_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303 { 304 struct mtd_info *mtd = dev_get_drvdata(dev); 305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 306 307 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); 308 } 309 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); 310 311 static ssize_t mtd_bbtblocks_show(struct device *dev, 312 struct device_attribute *attr, char *buf) 313 { 314 struct mtd_info *mtd = dev_get_drvdata(dev); 315 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 316 317 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); 318 } 319 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); 320 321 static struct attribute *mtd_attrs[] = { 322 &dev_attr_type.attr, 323 &dev_attr_flags.attr, 324 &dev_attr_size.attr, 325 &dev_attr_erasesize.attr, 326 &dev_attr_writesize.attr, 327 &dev_attr_subpagesize.attr, 328 &dev_attr_oobsize.attr, 329 &dev_attr_numeraseregions.attr, 330 &dev_attr_name.attr, 331 &dev_attr_ecc_strength.attr, 332 &dev_attr_ecc_step_size.attr, 333 &dev_attr_corrected_bits.attr, 334 &dev_attr_ecc_failures.attr, 335 &dev_attr_bad_blocks.attr, 336 &dev_attr_bbt_blocks.attr, 337 &dev_attr_bitflip_threshold.attr, 338 NULL, 339 }; 340 ATTRIBUTE_GROUPS(mtd); 341 342 static struct device_type mtd_devtype = { 343 .name = "mtd", 344 .groups = mtd_groups, 345 .release = mtd_release, 346 }; 347 348 #ifndef CONFIG_MMU 349 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 350 { 351 switch (mtd->type) { 352 case MTD_RAM: 353 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 354 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 355 case MTD_ROM: 356 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 357 NOMMU_MAP_READ; 358 default: 359 return NOMMU_MAP_COPY; 360 } 361 } 362 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 363 #endif 364 365 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 366 void *cmd) 367 { 368 struct mtd_info *mtd; 369 370 mtd = container_of(n, struct mtd_info, reboot_notifier); 371 mtd->_reboot(mtd); 372 373 return NOTIFY_DONE; 374 } 375 376 /** 377 * add_mtd_device - register an MTD device 378 * @mtd: pointer to new MTD device info structure 379 * 380 * Add a device to the list of MTD devices present in the system, and 381 * notify each currently active MTD 'user' of its arrival. Returns 382 * zero on success or non-zero on failure. 383 */ 384 385 int add_mtd_device(struct mtd_info *mtd) 386 { 387 struct mtd_notifier *not; 388 int i, error; 389 390 /* 391 * May occur, for instance, on buggy drivers which call 392 * mtd_device_parse_register() multiple times on the same master MTD, 393 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 394 */ 395 if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n")) 396 return -EEXIST; 397 398 mtd->backing_dev_info = &mtd_bdi; 399 400 BUG_ON(mtd->writesize == 0); 401 mutex_lock(&mtd_table_mutex); 402 403 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 404 if (i < 0) { 405 error = i; 406 goto fail_locked; 407 } 408 409 mtd->index = i; 410 mtd->usecount = 0; 411 412 /* default value if not set by driver */ 413 if (mtd->bitflip_threshold == 0) 414 mtd->bitflip_threshold = mtd->ecc_strength; 415 416 if (is_power_of_2(mtd->erasesize)) 417 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 418 else 419 mtd->erasesize_shift = 0; 420 421 if (is_power_of_2(mtd->writesize)) 422 mtd->writesize_shift = ffs(mtd->writesize) - 1; 423 else 424 mtd->writesize_shift = 0; 425 426 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 427 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 428 429 if (mtd->dev.parent) { 430 if (!mtd->owner && mtd->dev.parent->driver) 431 mtd->owner = mtd->dev.parent->driver->owner; 432 if (!mtd->name) 433 mtd->name = dev_name(mtd->dev.parent); 434 } else { 435 pr_debug("mtd device won't show a device symlink in sysfs\n"); 436 } 437 438 /* Some chips always power up locked. Unlock them now */ 439 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 440 error = mtd_unlock(mtd, 0, mtd->size); 441 if (error && error != -EOPNOTSUPP) 442 printk(KERN_WARNING 443 "%s: unlock failed, writes may not work\n", 444 mtd->name); 445 /* Ignore unlock failures? */ 446 error = 0; 447 } 448 449 /* Caller should have set dev.parent to match the 450 * physical device, if appropriate. 451 */ 452 mtd->dev.type = &mtd_devtype; 453 mtd->dev.class = &mtd_class; 454 mtd->dev.devt = MTD_DEVT(i); 455 dev_set_name(&mtd->dev, "mtd%d", i); 456 dev_set_drvdata(&mtd->dev, mtd); 457 error = device_register(&mtd->dev); 458 if (error) 459 goto fail_added; 460 461 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 462 "mtd%dro", i); 463 464 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 465 /* No need to get a refcount on the module containing 466 the notifier, since we hold the mtd_table_mutex */ 467 list_for_each_entry(not, &mtd_notifiers, list) 468 not->add(mtd); 469 470 mutex_unlock(&mtd_table_mutex); 471 /* We _know_ we aren't being removed, because 472 our caller is still holding us here. So none 473 of this try_ nonsense, and no bitching about it 474 either. :) */ 475 __module_get(THIS_MODULE); 476 return 0; 477 478 fail_added: 479 idr_remove(&mtd_idr, i); 480 fail_locked: 481 mutex_unlock(&mtd_table_mutex); 482 return error; 483 } 484 485 /** 486 * del_mtd_device - unregister an MTD device 487 * @mtd: pointer to MTD device info structure 488 * 489 * Remove a device from the list of MTD devices present in the system, 490 * and notify each currently active MTD 'user' of its departure. 491 * Returns zero on success or 1 on failure, which currently will happen 492 * if the requested device does not appear to be present in the list. 493 */ 494 495 int del_mtd_device(struct mtd_info *mtd) 496 { 497 int ret; 498 struct mtd_notifier *not; 499 500 mutex_lock(&mtd_table_mutex); 501 502 if (idr_find(&mtd_idr, mtd->index) != mtd) { 503 ret = -ENODEV; 504 goto out_error; 505 } 506 507 /* No need to get a refcount on the module containing 508 the notifier, since we hold the mtd_table_mutex */ 509 list_for_each_entry(not, &mtd_notifiers, list) 510 not->remove(mtd); 511 512 if (mtd->usecount) { 513 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 514 mtd->index, mtd->name, mtd->usecount); 515 ret = -EBUSY; 516 } else { 517 device_unregister(&mtd->dev); 518 519 idr_remove(&mtd_idr, mtd->index); 520 521 module_put(THIS_MODULE); 522 ret = 0; 523 } 524 525 out_error: 526 mutex_unlock(&mtd_table_mutex); 527 return ret; 528 } 529 530 static int mtd_add_device_partitions(struct mtd_info *mtd, 531 struct mtd_partition *real_parts, 532 int nbparts) 533 { 534 int ret; 535 536 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 537 ret = add_mtd_device(mtd); 538 if (ret) 539 return ret; 540 } 541 542 if (nbparts > 0) { 543 ret = add_mtd_partitions(mtd, real_parts, nbparts); 544 if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 545 del_mtd_device(mtd); 546 return ret; 547 } 548 549 return 0; 550 } 551 552 553 /** 554 * mtd_device_parse_register - parse partitions and register an MTD device. 555 * 556 * @mtd: the MTD device to register 557 * @types: the list of MTD partition probes to try, see 558 * 'parse_mtd_partitions()' for more information 559 * @parser_data: MTD partition parser-specific data 560 * @parts: fallback partition information to register, if parsing fails; 561 * only valid if %nr_parts > %0 562 * @nr_parts: the number of partitions in parts, if zero then the full 563 * MTD device is registered if no partition info is found 564 * 565 * This function aggregates MTD partitions parsing (done by 566 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 567 * basically follows the most common pattern found in many MTD drivers: 568 * 569 * * It first tries to probe partitions on MTD device @mtd using parsers 570 * specified in @types (if @types is %NULL, then the default list of parsers 571 * is used, see 'parse_mtd_partitions()' for more information). If none are 572 * found this functions tries to fallback to information specified in 573 * @parts/@nr_parts. 574 * * If any partitioning info was found, this function registers the found 575 * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device 576 * as a whole is registered first. 577 * * If no partitions were found this function just registers the MTD device 578 * @mtd and exits. 579 * 580 * Returns zero in case of success and a negative error code in case of failure. 581 */ 582 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 583 struct mtd_part_parser_data *parser_data, 584 const struct mtd_partition *parts, 585 int nr_parts) 586 { 587 int ret; 588 struct mtd_partition *real_parts = NULL; 589 590 ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data); 591 if (ret <= 0 && nr_parts && parts) { 592 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, 593 GFP_KERNEL); 594 if (!real_parts) 595 ret = -ENOMEM; 596 else 597 ret = nr_parts; 598 } 599 /* Didn't come up with either parsed OR fallback partitions */ 600 if (ret < 0) { 601 pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n", 602 ret); 603 /* Don't abort on errors; we can still use unpartitioned MTD */ 604 ret = 0; 605 } 606 607 ret = mtd_add_device_partitions(mtd, real_parts, ret); 608 if (ret) 609 goto out; 610 611 /* 612 * FIXME: some drivers unfortunately call this function more than once. 613 * So we have to check if we've already assigned the reboot notifier. 614 * 615 * Generally, we can make multiple calls work for most cases, but it 616 * does cause problems with parse_mtd_partitions() above (e.g., 617 * cmdlineparts will register partitions more than once). 618 */ 619 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 620 "MTD already registered\n"); 621 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 622 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 623 register_reboot_notifier(&mtd->reboot_notifier); 624 } 625 626 out: 627 kfree(real_parts); 628 return ret; 629 } 630 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 631 632 /** 633 * mtd_device_unregister - unregister an existing MTD device. 634 * 635 * @master: the MTD device to unregister. This will unregister both the master 636 * and any partitions if registered. 637 */ 638 int mtd_device_unregister(struct mtd_info *master) 639 { 640 int err; 641 642 if (master->_reboot) 643 unregister_reboot_notifier(&master->reboot_notifier); 644 645 err = del_mtd_partitions(master); 646 if (err) 647 return err; 648 649 if (!device_is_registered(&master->dev)) 650 return 0; 651 652 return del_mtd_device(master); 653 } 654 EXPORT_SYMBOL_GPL(mtd_device_unregister); 655 656 /** 657 * register_mtd_user - register a 'user' of MTD devices. 658 * @new: pointer to notifier info structure 659 * 660 * Registers a pair of callbacks function to be called upon addition 661 * or removal of MTD devices. Causes the 'add' callback to be immediately 662 * invoked for each MTD device currently present in the system. 663 */ 664 void register_mtd_user (struct mtd_notifier *new) 665 { 666 struct mtd_info *mtd; 667 668 mutex_lock(&mtd_table_mutex); 669 670 list_add(&new->list, &mtd_notifiers); 671 672 __module_get(THIS_MODULE); 673 674 mtd_for_each_device(mtd) 675 new->add(mtd); 676 677 mutex_unlock(&mtd_table_mutex); 678 } 679 EXPORT_SYMBOL_GPL(register_mtd_user); 680 681 /** 682 * unregister_mtd_user - unregister a 'user' of MTD devices. 683 * @old: pointer to notifier info structure 684 * 685 * Removes a callback function pair from the list of 'users' to be 686 * notified upon addition or removal of MTD devices. Causes the 687 * 'remove' callback to be immediately invoked for each MTD device 688 * currently present in the system. 689 */ 690 int unregister_mtd_user (struct mtd_notifier *old) 691 { 692 struct mtd_info *mtd; 693 694 mutex_lock(&mtd_table_mutex); 695 696 module_put(THIS_MODULE); 697 698 mtd_for_each_device(mtd) 699 old->remove(mtd); 700 701 list_del(&old->list); 702 mutex_unlock(&mtd_table_mutex); 703 return 0; 704 } 705 EXPORT_SYMBOL_GPL(unregister_mtd_user); 706 707 /** 708 * get_mtd_device - obtain a validated handle for an MTD device 709 * @mtd: last known address of the required MTD device 710 * @num: internal device number of the required MTD device 711 * 712 * Given a number and NULL address, return the num'th entry in the device 713 * table, if any. Given an address and num == -1, search the device table 714 * for a device with that address and return if it's still present. Given 715 * both, return the num'th driver only if its address matches. Return 716 * error code if not. 717 */ 718 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 719 { 720 struct mtd_info *ret = NULL, *other; 721 int err = -ENODEV; 722 723 mutex_lock(&mtd_table_mutex); 724 725 if (num == -1) { 726 mtd_for_each_device(other) { 727 if (other == mtd) { 728 ret = mtd; 729 break; 730 } 731 } 732 } else if (num >= 0) { 733 ret = idr_find(&mtd_idr, num); 734 if (mtd && mtd != ret) 735 ret = NULL; 736 } 737 738 if (!ret) { 739 ret = ERR_PTR(err); 740 goto out; 741 } 742 743 err = __get_mtd_device(ret); 744 if (err) 745 ret = ERR_PTR(err); 746 out: 747 mutex_unlock(&mtd_table_mutex); 748 return ret; 749 } 750 EXPORT_SYMBOL_GPL(get_mtd_device); 751 752 753 int __get_mtd_device(struct mtd_info *mtd) 754 { 755 int err; 756 757 if (!try_module_get(mtd->owner)) 758 return -ENODEV; 759 760 if (mtd->_get_device) { 761 err = mtd->_get_device(mtd); 762 763 if (err) { 764 module_put(mtd->owner); 765 return err; 766 } 767 } 768 mtd->usecount++; 769 return 0; 770 } 771 EXPORT_SYMBOL_GPL(__get_mtd_device); 772 773 /** 774 * get_mtd_device_nm - obtain a validated handle for an MTD device by 775 * device name 776 * @name: MTD device name to open 777 * 778 * This function returns MTD device description structure in case of 779 * success and an error code in case of failure. 780 */ 781 struct mtd_info *get_mtd_device_nm(const char *name) 782 { 783 int err = -ENODEV; 784 struct mtd_info *mtd = NULL, *other; 785 786 mutex_lock(&mtd_table_mutex); 787 788 mtd_for_each_device(other) { 789 if (!strcmp(name, other->name)) { 790 mtd = other; 791 break; 792 } 793 } 794 795 if (!mtd) 796 goto out_unlock; 797 798 err = __get_mtd_device(mtd); 799 if (err) 800 goto out_unlock; 801 802 mutex_unlock(&mtd_table_mutex); 803 return mtd; 804 805 out_unlock: 806 mutex_unlock(&mtd_table_mutex); 807 return ERR_PTR(err); 808 } 809 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 810 811 void put_mtd_device(struct mtd_info *mtd) 812 { 813 mutex_lock(&mtd_table_mutex); 814 __put_mtd_device(mtd); 815 mutex_unlock(&mtd_table_mutex); 816 817 } 818 EXPORT_SYMBOL_GPL(put_mtd_device); 819 820 void __put_mtd_device(struct mtd_info *mtd) 821 { 822 --mtd->usecount; 823 BUG_ON(mtd->usecount < 0); 824 825 if (mtd->_put_device) 826 mtd->_put_device(mtd); 827 828 module_put(mtd->owner); 829 } 830 EXPORT_SYMBOL_GPL(__put_mtd_device); 831 832 /* 833 * Erase is an asynchronous operation. Device drivers are supposed 834 * to call instr->callback() whenever the operation completes, even 835 * if it completes with a failure. 836 * Callers are supposed to pass a callback function and wait for it 837 * to be called before writing to the block. 838 */ 839 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 840 { 841 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 842 return -EINVAL; 843 if (!(mtd->flags & MTD_WRITEABLE)) 844 return -EROFS; 845 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 846 if (!instr->len) { 847 instr->state = MTD_ERASE_DONE; 848 mtd_erase_callback(instr); 849 return 0; 850 } 851 return mtd->_erase(mtd, instr); 852 } 853 EXPORT_SYMBOL_GPL(mtd_erase); 854 855 /* 856 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 857 */ 858 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 859 void **virt, resource_size_t *phys) 860 { 861 *retlen = 0; 862 *virt = NULL; 863 if (phys) 864 *phys = 0; 865 if (!mtd->_point) 866 return -EOPNOTSUPP; 867 if (from < 0 || from >= mtd->size || len > mtd->size - from) 868 return -EINVAL; 869 if (!len) 870 return 0; 871 return mtd->_point(mtd, from, len, retlen, virt, phys); 872 } 873 EXPORT_SYMBOL_GPL(mtd_point); 874 875 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 876 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 877 { 878 if (!mtd->_point) 879 return -EOPNOTSUPP; 880 if (from < 0 || from >= mtd->size || len > mtd->size - from) 881 return -EINVAL; 882 if (!len) 883 return 0; 884 return mtd->_unpoint(mtd, from, len); 885 } 886 EXPORT_SYMBOL_GPL(mtd_unpoint); 887 888 /* 889 * Allow NOMMU mmap() to directly map the device (if not NULL) 890 * - return the address to which the offset maps 891 * - return -ENOSYS to indicate refusal to do the mapping 892 */ 893 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 894 unsigned long offset, unsigned long flags) 895 { 896 if (!mtd->_get_unmapped_area) 897 return -EOPNOTSUPP; 898 if (offset >= mtd->size || len > mtd->size - offset) 899 return -EINVAL; 900 return mtd->_get_unmapped_area(mtd, len, offset, flags); 901 } 902 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 903 904 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 905 u_char *buf) 906 { 907 int ret_code; 908 *retlen = 0; 909 if (from < 0 || from >= mtd->size || len > mtd->size - from) 910 return -EINVAL; 911 if (!len) 912 return 0; 913 914 /* 915 * In the absence of an error, drivers return a non-negative integer 916 * representing the maximum number of bitflips that were corrected on 917 * any one ecc region (if applicable; zero otherwise). 918 */ 919 ret_code = mtd->_read(mtd, from, len, retlen, buf); 920 if (unlikely(ret_code < 0)) 921 return ret_code; 922 if (mtd->ecc_strength == 0) 923 return 0; /* device lacks ecc */ 924 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 925 } 926 EXPORT_SYMBOL_GPL(mtd_read); 927 928 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 929 const u_char *buf) 930 { 931 *retlen = 0; 932 if (to < 0 || to >= mtd->size || len > mtd->size - to) 933 return -EINVAL; 934 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) 935 return -EROFS; 936 if (!len) 937 return 0; 938 return mtd->_write(mtd, to, len, retlen, buf); 939 } 940 EXPORT_SYMBOL_GPL(mtd_write); 941 942 /* 943 * In blackbox flight recorder like scenarios we want to make successful writes 944 * in interrupt context. panic_write() is only intended to be called when its 945 * known the kernel is about to panic and we need the write to succeed. Since 946 * the kernel is not going to be running for much longer, this function can 947 * break locks and delay to ensure the write succeeds (but not sleep). 948 */ 949 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 950 const u_char *buf) 951 { 952 *retlen = 0; 953 if (!mtd->_panic_write) 954 return -EOPNOTSUPP; 955 if (to < 0 || to >= mtd->size || len > mtd->size - to) 956 return -EINVAL; 957 if (!(mtd->flags & MTD_WRITEABLE)) 958 return -EROFS; 959 if (!len) 960 return 0; 961 return mtd->_panic_write(mtd, to, len, retlen, buf); 962 } 963 EXPORT_SYMBOL_GPL(mtd_panic_write); 964 965 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 966 { 967 int ret_code; 968 ops->retlen = ops->oobretlen = 0; 969 if (!mtd->_read_oob) 970 return -EOPNOTSUPP; 971 /* 972 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 973 * similar to mtd->_read(), returning a non-negative integer 974 * representing max bitflips. In other cases, mtd->_read_oob() may 975 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 976 */ 977 ret_code = mtd->_read_oob(mtd, from, ops); 978 if (unlikely(ret_code < 0)) 979 return ret_code; 980 if (mtd->ecc_strength == 0) 981 return 0; /* device lacks ecc */ 982 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 983 } 984 EXPORT_SYMBOL_GPL(mtd_read_oob); 985 986 /* 987 * Method to access the protection register area, present in some flash 988 * devices. The user data is one time programmable but the factory data is read 989 * only. 990 */ 991 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 992 struct otp_info *buf) 993 { 994 if (!mtd->_get_fact_prot_info) 995 return -EOPNOTSUPP; 996 if (!len) 997 return 0; 998 return mtd->_get_fact_prot_info(mtd, len, retlen, buf); 999 } 1000 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1001 1002 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1003 size_t *retlen, u_char *buf) 1004 { 1005 *retlen = 0; 1006 if (!mtd->_read_fact_prot_reg) 1007 return -EOPNOTSUPP; 1008 if (!len) 1009 return 0; 1010 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); 1011 } 1012 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1013 1014 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1015 struct otp_info *buf) 1016 { 1017 if (!mtd->_get_user_prot_info) 1018 return -EOPNOTSUPP; 1019 if (!len) 1020 return 0; 1021 return mtd->_get_user_prot_info(mtd, len, retlen, buf); 1022 } 1023 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1024 1025 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1026 size_t *retlen, u_char *buf) 1027 { 1028 *retlen = 0; 1029 if (!mtd->_read_user_prot_reg) 1030 return -EOPNOTSUPP; 1031 if (!len) 1032 return 0; 1033 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); 1034 } 1035 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1036 1037 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1038 size_t *retlen, u_char *buf) 1039 { 1040 int ret; 1041 1042 *retlen = 0; 1043 if (!mtd->_write_user_prot_reg) 1044 return -EOPNOTSUPP; 1045 if (!len) 1046 return 0; 1047 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); 1048 if (ret) 1049 return ret; 1050 1051 /* 1052 * If no data could be written at all, we are out of memory and 1053 * must return -ENOSPC. 1054 */ 1055 return (*retlen) ? 0 : -ENOSPC; 1056 } 1057 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 1058 1059 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1060 { 1061 if (!mtd->_lock_user_prot_reg) 1062 return -EOPNOTSUPP; 1063 if (!len) 1064 return 0; 1065 return mtd->_lock_user_prot_reg(mtd, from, len); 1066 } 1067 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1068 1069 /* Chip-supported device locking */ 1070 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1071 { 1072 if (!mtd->_lock) 1073 return -EOPNOTSUPP; 1074 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1075 return -EINVAL; 1076 if (!len) 1077 return 0; 1078 return mtd->_lock(mtd, ofs, len); 1079 } 1080 EXPORT_SYMBOL_GPL(mtd_lock); 1081 1082 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1083 { 1084 if (!mtd->_unlock) 1085 return -EOPNOTSUPP; 1086 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1087 return -EINVAL; 1088 if (!len) 1089 return 0; 1090 return mtd->_unlock(mtd, ofs, len); 1091 } 1092 EXPORT_SYMBOL_GPL(mtd_unlock); 1093 1094 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1095 { 1096 if (!mtd->_is_locked) 1097 return -EOPNOTSUPP; 1098 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1099 return -EINVAL; 1100 if (!len) 1101 return 0; 1102 return mtd->_is_locked(mtd, ofs, len); 1103 } 1104 EXPORT_SYMBOL_GPL(mtd_is_locked); 1105 1106 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 1107 { 1108 if (ofs < 0 || ofs >= mtd->size) 1109 return -EINVAL; 1110 if (!mtd->_block_isreserved) 1111 return 0; 1112 return mtd->_block_isreserved(mtd, ofs); 1113 } 1114 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 1115 1116 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 1117 { 1118 if (ofs < 0 || ofs >= mtd->size) 1119 return -EINVAL; 1120 if (!mtd->_block_isbad) 1121 return 0; 1122 return mtd->_block_isbad(mtd, ofs); 1123 } 1124 EXPORT_SYMBOL_GPL(mtd_block_isbad); 1125 1126 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 1127 { 1128 if (!mtd->_block_markbad) 1129 return -EOPNOTSUPP; 1130 if (ofs < 0 || ofs >= mtd->size) 1131 return -EINVAL; 1132 if (!(mtd->flags & MTD_WRITEABLE)) 1133 return -EROFS; 1134 return mtd->_block_markbad(mtd, ofs); 1135 } 1136 EXPORT_SYMBOL_GPL(mtd_block_markbad); 1137 1138 /* 1139 * default_mtd_writev - the default writev method 1140 * @mtd: mtd device description object pointer 1141 * @vecs: the vectors to write 1142 * @count: count of vectors in @vecs 1143 * @to: the MTD device offset to write to 1144 * @retlen: on exit contains the count of bytes written to the MTD device. 1145 * 1146 * This function returns zero in case of success and a negative error code in 1147 * case of failure. 1148 */ 1149 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1150 unsigned long count, loff_t to, size_t *retlen) 1151 { 1152 unsigned long i; 1153 size_t totlen = 0, thislen; 1154 int ret = 0; 1155 1156 for (i = 0; i < count; i++) { 1157 if (!vecs[i].iov_len) 1158 continue; 1159 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 1160 vecs[i].iov_base); 1161 totlen += thislen; 1162 if (ret || thislen != vecs[i].iov_len) 1163 break; 1164 to += vecs[i].iov_len; 1165 } 1166 *retlen = totlen; 1167 return ret; 1168 } 1169 1170 /* 1171 * mtd_writev - the vector-based MTD write method 1172 * @mtd: mtd device description object pointer 1173 * @vecs: the vectors to write 1174 * @count: count of vectors in @vecs 1175 * @to: the MTD device offset to write to 1176 * @retlen: on exit contains the count of bytes written to the MTD device. 1177 * 1178 * This function returns zero in case of success and a negative error code in 1179 * case of failure. 1180 */ 1181 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1182 unsigned long count, loff_t to, size_t *retlen) 1183 { 1184 *retlen = 0; 1185 if (!(mtd->flags & MTD_WRITEABLE)) 1186 return -EROFS; 1187 if (!mtd->_writev) 1188 return default_mtd_writev(mtd, vecs, count, to, retlen); 1189 return mtd->_writev(mtd, vecs, count, to, retlen); 1190 } 1191 EXPORT_SYMBOL_GPL(mtd_writev); 1192 1193 /** 1194 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 1195 * @mtd: mtd device description object pointer 1196 * @size: a pointer to the ideal or maximum size of the allocation, points 1197 * to the actual allocation size on success. 1198 * 1199 * This routine attempts to allocate a contiguous kernel buffer up to 1200 * the specified size, backing off the size of the request exponentially 1201 * until the request succeeds or until the allocation size falls below 1202 * the system page size. This attempts to make sure it does not adversely 1203 * impact system performance, so when allocating more than one page, we 1204 * ask the memory allocator to avoid re-trying, swapping, writing back 1205 * or performing I/O. 1206 * 1207 * Note, this function also makes sure that the allocated buffer is aligned to 1208 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 1209 * 1210 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 1211 * to handle smaller (i.e. degraded) buffer allocations under low- or 1212 * fragmented-memory situations where such reduced allocations, from a 1213 * requested ideal, are allowed. 1214 * 1215 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 1216 */ 1217 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 1218 { 1219 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 1220 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 1221 void *kbuf; 1222 1223 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 1224 1225 while (*size > min_alloc) { 1226 kbuf = kmalloc(*size, flags); 1227 if (kbuf) 1228 return kbuf; 1229 1230 *size >>= 1; 1231 *size = ALIGN(*size, mtd->writesize); 1232 } 1233 1234 /* 1235 * For the last resort allocation allow 'kmalloc()' to do all sorts of 1236 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 1237 */ 1238 return kmalloc(*size, GFP_KERNEL); 1239 } 1240 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 1241 1242 #ifdef CONFIG_PROC_FS 1243 1244 /*====================================================================*/ 1245 /* Support for /proc/mtd */ 1246 1247 static int mtd_proc_show(struct seq_file *m, void *v) 1248 { 1249 struct mtd_info *mtd; 1250 1251 seq_puts(m, "dev: size erasesize name\n"); 1252 mutex_lock(&mtd_table_mutex); 1253 mtd_for_each_device(mtd) { 1254 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 1255 mtd->index, (unsigned long long)mtd->size, 1256 mtd->erasesize, mtd->name); 1257 } 1258 mutex_unlock(&mtd_table_mutex); 1259 return 0; 1260 } 1261 1262 static int mtd_proc_open(struct inode *inode, struct file *file) 1263 { 1264 return single_open(file, mtd_proc_show, NULL); 1265 } 1266 1267 static const struct file_operations mtd_proc_ops = { 1268 .open = mtd_proc_open, 1269 .read = seq_read, 1270 .llseek = seq_lseek, 1271 .release = single_release, 1272 }; 1273 #endif /* CONFIG_PROC_FS */ 1274 1275 /*====================================================================*/ 1276 /* Init code */ 1277 1278 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) 1279 { 1280 int ret; 1281 1282 ret = bdi_init(bdi); 1283 if (!ret) 1284 ret = bdi_register(bdi, NULL, "%s", name); 1285 1286 if (ret) 1287 bdi_destroy(bdi); 1288 1289 return ret; 1290 } 1291 1292 static struct proc_dir_entry *proc_mtd; 1293 1294 static int __init init_mtd(void) 1295 { 1296 int ret; 1297 1298 ret = class_register(&mtd_class); 1299 if (ret) 1300 goto err_reg; 1301 1302 ret = mtd_bdi_init(&mtd_bdi, "mtd"); 1303 if (ret) 1304 goto err_bdi; 1305 1306 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); 1307 1308 ret = init_mtdchar(); 1309 if (ret) 1310 goto out_procfs; 1311 1312 return 0; 1313 1314 out_procfs: 1315 if (proc_mtd) 1316 remove_proc_entry("mtd", NULL); 1317 err_bdi: 1318 class_unregister(&mtd_class); 1319 err_reg: 1320 pr_err("Error registering mtd class or bdi: %d\n", ret); 1321 return ret; 1322 } 1323 1324 static void __exit cleanup_mtd(void) 1325 { 1326 cleanup_mtdchar(); 1327 if (proc_mtd) 1328 remove_proc_entry("mtd", NULL); 1329 class_unregister(&mtd_class); 1330 bdi_destroy(&mtd_bdi); 1331 idr_destroy(&mtd_idr); 1332 } 1333 1334 module_init(init_mtd); 1335 module_exit(cleanup_mtd); 1336 1337 MODULE_LICENSE("GPL"); 1338 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1339 MODULE_DESCRIPTION("Core MTD registration and access routines"); 1340