1 /* 2 * Core registration and callback routines for MTD 3 * drivers and users. 4 * 5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 6 * Copyright © 2006 Red Hat UK Limited 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/ptrace.h> 27 #include <linux/seq_file.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/major.h> 31 #include <linux/fs.h> 32 #include <linux/err.h> 33 #include <linux/ioctl.h> 34 #include <linux/init.h> 35 #include <linux/of.h> 36 #include <linux/proc_fs.h> 37 #include <linux/idr.h> 38 #include <linux/backing-dev.h> 39 #include <linux/gfp.h> 40 #include <linux/slab.h> 41 #include <linux/reboot.h> 42 #include <linux/kconfig.h> 43 44 #include <linux/mtd/mtd.h> 45 #include <linux/mtd/partitions.h> 46 47 #include "mtdcore.h" 48 49 static struct backing_dev_info mtd_bdi = { 50 }; 51 52 #ifdef CONFIG_PM_SLEEP 53 54 static int mtd_cls_suspend(struct device *dev) 55 { 56 struct mtd_info *mtd = dev_get_drvdata(dev); 57 58 return mtd ? mtd_suspend(mtd) : 0; 59 } 60 61 static int mtd_cls_resume(struct device *dev) 62 { 63 struct mtd_info *mtd = dev_get_drvdata(dev); 64 65 if (mtd) 66 mtd_resume(mtd); 67 return 0; 68 } 69 70 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 71 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 72 #else 73 #define MTD_CLS_PM_OPS NULL 74 #endif 75 76 static struct class mtd_class = { 77 .name = "mtd", 78 .owner = THIS_MODULE, 79 .pm = MTD_CLS_PM_OPS, 80 }; 81 82 static DEFINE_IDR(mtd_idr); 83 84 /* These are exported solely for the purpose of mtd_blkdevs.c. You 85 should not use them for _anything_ else */ 86 DEFINE_MUTEX(mtd_table_mutex); 87 EXPORT_SYMBOL_GPL(mtd_table_mutex); 88 89 struct mtd_info *__mtd_next_device(int i) 90 { 91 return idr_get_next(&mtd_idr, &i); 92 } 93 EXPORT_SYMBOL_GPL(__mtd_next_device); 94 95 static LIST_HEAD(mtd_notifiers); 96 97 98 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 99 100 /* REVISIT once MTD uses the driver model better, whoever allocates 101 * the mtd_info will probably want to use the release() hook... 102 */ 103 static void mtd_release(struct device *dev) 104 { 105 struct mtd_info *mtd = dev_get_drvdata(dev); 106 dev_t index = MTD_DEVT(mtd->index); 107 108 /* remove /dev/mtdXro node */ 109 device_destroy(&mtd_class, index + 1); 110 } 111 112 static ssize_t mtd_type_show(struct device *dev, 113 struct device_attribute *attr, char *buf) 114 { 115 struct mtd_info *mtd = dev_get_drvdata(dev); 116 char *type; 117 118 switch (mtd->type) { 119 case MTD_ABSENT: 120 type = "absent"; 121 break; 122 case MTD_RAM: 123 type = "ram"; 124 break; 125 case MTD_ROM: 126 type = "rom"; 127 break; 128 case MTD_NORFLASH: 129 type = "nor"; 130 break; 131 case MTD_NANDFLASH: 132 type = "nand"; 133 break; 134 case MTD_DATAFLASH: 135 type = "dataflash"; 136 break; 137 case MTD_UBIVOLUME: 138 type = "ubi"; 139 break; 140 case MTD_MLCNANDFLASH: 141 type = "mlc-nand"; 142 break; 143 default: 144 type = "unknown"; 145 } 146 147 return snprintf(buf, PAGE_SIZE, "%s\n", type); 148 } 149 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 150 151 static ssize_t mtd_flags_show(struct device *dev, 152 struct device_attribute *attr, char *buf) 153 { 154 struct mtd_info *mtd = dev_get_drvdata(dev); 155 156 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 157 158 } 159 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 160 161 static ssize_t mtd_size_show(struct device *dev, 162 struct device_attribute *attr, char *buf) 163 { 164 struct mtd_info *mtd = dev_get_drvdata(dev); 165 166 return snprintf(buf, PAGE_SIZE, "%llu\n", 167 (unsigned long long)mtd->size); 168 169 } 170 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 171 172 static ssize_t mtd_erasesize_show(struct device *dev, 173 struct device_attribute *attr, char *buf) 174 { 175 struct mtd_info *mtd = dev_get_drvdata(dev); 176 177 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 178 179 } 180 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 181 182 static ssize_t mtd_writesize_show(struct device *dev, 183 struct device_attribute *attr, char *buf) 184 { 185 struct mtd_info *mtd = dev_get_drvdata(dev); 186 187 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 188 189 } 190 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 191 192 static ssize_t mtd_subpagesize_show(struct device *dev, 193 struct device_attribute *attr, char *buf) 194 { 195 struct mtd_info *mtd = dev_get_drvdata(dev); 196 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 197 198 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 199 200 } 201 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 202 203 static ssize_t mtd_oobsize_show(struct device *dev, 204 struct device_attribute *attr, char *buf) 205 { 206 struct mtd_info *mtd = dev_get_drvdata(dev); 207 208 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 209 210 } 211 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 212 213 static ssize_t mtd_numeraseregions_show(struct device *dev, 214 struct device_attribute *attr, char *buf) 215 { 216 struct mtd_info *mtd = dev_get_drvdata(dev); 217 218 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 219 220 } 221 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 222 NULL); 223 224 static ssize_t mtd_name_show(struct device *dev, 225 struct device_attribute *attr, char *buf) 226 { 227 struct mtd_info *mtd = dev_get_drvdata(dev); 228 229 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 230 231 } 232 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 233 234 static ssize_t mtd_ecc_strength_show(struct device *dev, 235 struct device_attribute *attr, char *buf) 236 { 237 struct mtd_info *mtd = dev_get_drvdata(dev); 238 239 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 240 } 241 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 242 243 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 244 struct device_attribute *attr, 245 char *buf) 246 { 247 struct mtd_info *mtd = dev_get_drvdata(dev); 248 249 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 250 } 251 252 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 253 struct device_attribute *attr, 254 const char *buf, size_t count) 255 { 256 struct mtd_info *mtd = dev_get_drvdata(dev); 257 unsigned int bitflip_threshold; 258 int retval; 259 260 retval = kstrtouint(buf, 0, &bitflip_threshold); 261 if (retval) 262 return retval; 263 264 mtd->bitflip_threshold = bitflip_threshold; 265 return count; 266 } 267 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 268 mtd_bitflip_threshold_show, 269 mtd_bitflip_threshold_store); 270 271 static ssize_t mtd_ecc_step_size_show(struct device *dev, 272 struct device_attribute *attr, char *buf) 273 { 274 struct mtd_info *mtd = dev_get_drvdata(dev); 275 276 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); 277 278 } 279 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); 280 281 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, 282 struct device_attribute *attr, char *buf) 283 { 284 struct mtd_info *mtd = dev_get_drvdata(dev); 285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 286 287 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); 288 } 289 static DEVICE_ATTR(corrected_bits, S_IRUGO, 290 mtd_ecc_stats_corrected_show, NULL); 291 292 static ssize_t mtd_ecc_stats_errors_show(struct device *dev, 293 struct device_attribute *attr, char *buf) 294 { 295 struct mtd_info *mtd = dev_get_drvdata(dev); 296 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 297 298 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); 299 } 300 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); 301 302 static ssize_t mtd_badblocks_show(struct device *dev, 303 struct device_attribute *attr, char *buf) 304 { 305 struct mtd_info *mtd = dev_get_drvdata(dev); 306 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 307 308 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); 309 } 310 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); 311 312 static ssize_t mtd_bbtblocks_show(struct device *dev, 313 struct device_attribute *attr, char *buf) 314 { 315 struct mtd_info *mtd = dev_get_drvdata(dev); 316 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 317 318 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); 319 } 320 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); 321 322 static struct attribute *mtd_attrs[] = { 323 &dev_attr_type.attr, 324 &dev_attr_flags.attr, 325 &dev_attr_size.attr, 326 &dev_attr_erasesize.attr, 327 &dev_attr_writesize.attr, 328 &dev_attr_subpagesize.attr, 329 &dev_attr_oobsize.attr, 330 &dev_attr_numeraseregions.attr, 331 &dev_attr_name.attr, 332 &dev_attr_ecc_strength.attr, 333 &dev_attr_ecc_step_size.attr, 334 &dev_attr_corrected_bits.attr, 335 &dev_attr_ecc_failures.attr, 336 &dev_attr_bad_blocks.attr, 337 &dev_attr_bbt_blocks.attr, 338 &dev_attr_bitflip_threshold.attr, 339 NULL, 340 }; 341 ATTRIBUTE_GROUPS(mtd); 342 343 static struct device_type mtd_devtype = { 344 .name = "mtd", 345 .groups = mtd_groups, 346 .release = mtd_release, 347 }; 348 349 #ifndef CONFIG_MMU 350 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 351 { 352 switch (mtd->type) { 353 case MTD_RAM: 354 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 355 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 356 case MTD_ROM: 357 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 358 NOMMU_MAP_READ; 359 default: 360 return NOMMU_MAP_COPY; 361 } 362 } 363 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 364 #endif 365 366 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 367 void *cmd) 368 { 369 struct mtd_info *mtd; 370 371 mtd = container_of(n, struct mtd_info, reboot_notifier); 372 mtd->_reboot(mtd); 373 374 return NOTIFY_DONE; 375 } 376 377 /** 378 * add_mtd_device - register an MTD device 379 * @mtd: pointer to new MTD device info structure 380 * 381 * Add a device to the list of MTD devices present in the system, and 382 * notify each currently active MTD 'user' of its arrival. Returns 383 * zero on success or non-zero on failure. 384 */ 385 386 int add_mtd_device(struct mtd_info *mtd) 387 { 388 struct mtd_notifier *not; 389 int i, error; 390 391 /* 392 * May occur, for instance, on buggy drivers which call 393 * mtd_device_parse_register() multiple times on the same master MTD, 394 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 395 */ 396 if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n")) 397 return -EEXIST; 398 399 mtd->backing_dev_info = &mtd_bdi; 400 401 BUG_ON(mtd->writesize == 0); 402 mutex_lock(&mtd_table_mutex); 403 404 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 405 if (i < 0) { 406 error = i; 407 goto fail_locked; 408 } 409 410 mtd->index = i; 411 mtd->usecount = 0; 412 413 /* default value if not set by driver */ 414 if (mtd->bitflip_threshold == 0) 415 mtd->bitflip_threshold = mtd->ecc_strength; 416 417 if (is_power_of_2(mtd->erasesize)) 418 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 419 else 420 mtd->erasesize_shift = 0; 421 422 if (is_power_of_2(mtd->writesize)) 423 mtd->writesize_shift = ffs(mtd->writesize) - 1; 424 else 425 mtd->writesize_shift = 0; 426 427 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 428 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 429 430 /* Some chips always power up locked. Unlock them now */ 431 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 432 error = mtd_unlock(mtd, 0, mtd->size); 433 if (error && error != -EOPNOTSUPP) 434 printk(KERN_WARNING 435 "%s: unlock failed, writes may not work\n", 436 mtd->name); 437 /* Ignore unlock failures? */ 438 error = 0; 439 } 440 441 /* Caller should have set dev.parent to match the 442 * physical device, if appropriate. 443 */ 444 mtd->dev.type = &mtd_devtype; 445 mtd->dev.class = &mtd_class; 446 mtd->dev.devt = MTD_DEVT(i); 447 dev_set_name(&mtd->dev, "mtd%d", i); 448 dev_set_drvdata(&mtd->dev, mtd); 449 of_node_get(mtd_get_of_node(mtd)); 450 error = device_register(&mtd->dev); 451 if (error) 452 goto fail_added; 453 454 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 455 "mtd%dro", i); 456 457 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 458 /* No need to get a refcount on the module containing 459 the notifier, since we hold the mtd_table_mutex */ 460 list_for_each_entry(not, &mtd_notifiers, list) 461 not->add(mtd); 462 463 mutex_unlock(&mtd_table_mutex); 464 /* We _know_ we aren't being removed, because 465 our caller is still holding us here. So none 466 of this try_ nonsense, and no bitching about it 467 either. :) */ 468 __module_get(THIS_MODULE); 469 return 0; 470 471 fail_added: 472 of_node_put(mtd_get_of_node(mtd)); 473 idr_remove(&mtd_idr, i); 474 fail_locked: 475 mutex_unlock(&mtd_table_mutex); 476 return error; 477 } 478 479 /** 480 * del_mtd_device - unregister an MTD device 481 * @mtd: pointer to MTD device info structure 482 * 483 * Remove a device from the list of MTD devices present in the system, 484 * and notify each currently active MTD 'user' of its departure. 485 * Returns zero on success or 1 on failure, which currently will happen 486 * if the requested device does not appear to be present in the list. 487 */ 488 489 int del_mtd_device(struct mtd_info *mtd) 490 { 491 int ret; 492 struct mtd_notifier *not; 493 494 mutex_lock(&mtd_table_mutex); 495 496 if (idr_find(&mtd_idr, mtd->index) != mtd) { 497 ret = -ENODEV; 498 goto out_error; 499 } 500 501 /* No need to get a refcount on the module containing 502 the notifier, since we hold the mtd_table_mutex */ 503 list_for_each_entry(not, &mtd_notifiers, list) 504 not->remove(mtd); 505 506 if (mtd->usecount) { 507 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 508 mtd->index, mtd->name, mtd->usecount); 509 ret = -EBUSY; 510 } else { 511 device_unregister(&mtd->dev); 512 513 idr_remove(&mtd_idr, mtd->index); 514 of_node_put(mtd_get_of_node(mtd)); 515 516 module_put(THIS_MODULE); 517 ret = 0; 518 } 519 520 out_error: 521 mutex_unlock(&mtd_table_mutex); 522 return ret; 523 } 524 525 static int mtd_add_device_partitions(struct mtd_info *mtd, 526 struct mtd_partitions *parts) 527 { 528 const struct mtd_partition *real_parts = parts->parts; 529 int nbparts = parts->nr_parts; 530 int ret; 531 532 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 533 ret = add_mtd_device(mtd); 534 if (ret) 535 return ret; 536 } 537 538 if (nbparts > 0) { 539 ret = add_mtd_partitions(mtd, real_parts, nbparts); 540 if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 541 del_mtd_device(mtd); 542 return ret; 543 } 544 545 return 0; 546 } 547 548 /* 549 * Set a few defaults based on the parent devices, if not provided by the 550 * driver 551 */ 552 static void mtd_set_dev_defaults(struct mtd_info *mtd) 553 { 554 if (mtd->dev.parent) { 555 if (!mtd->owner && mtd->dev.parent->driver) 556 mtd->owner = mtd->dev.parent->driver->owner; 557 if (!mtd->name) 558 mtd->name = dev_name(mtd->dev.parent); 559 } else { 560 pr_debug("mtd device won't show a device symlink in sysfs\n"); 561 } 562 } 563 564 /** 565 * mtd_device_parse_register - parse partitions and register an MTD device. 566 * 567 * @mtd: the MTD device to register 568 * @types: the list of MTD partition probes to try, see 569 * 'parse_mtd_partitions()' for more information 570 * @parser_data: MTD partition parser-specific data 571 * @parts: fallback partition information to register, if parsing fails; 572 * only valid if %nr_parts > %0 573 * @nr_parts: the number of partitions in parts, if zero then the full 574 * MTD device is registered if no partition info is found 575 * 576 * This function aggregates MTD partitions parsing (done by 577 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 578 * basically follows the most common pattern found in many MTD drivers: 579 * 580 * * It first tries to probe partitions on MTD device @mtd using parsers 581 * specified in @types (if @types is %NULL, then the default list of parsers 582 * is used, see 'parse_mtd_partitions()' for more information). If none are 583 * found this functions tries to fallback to information specified in 584 * @parts/@nr_parts. 585 * * If any partitioning info was found, this function registers the found 586 * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device 587 * as a whole is registered first. 588 * * If no partitions were found this function just registers the MTD device 589 * @mtd and exits. 590 * 591 * Returns zero in case of success and a negative error code in case of failure. 592 */ 593 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 594 struct mtd_part_parser_data *parser_data, 595 const struct mtd_partition *parts, 596 int nr_parts) 597 { 598 struct mtd_partitions parsed; 599 int ret; 600 601 mtd_set_dev_defaults(mtd); 602 603 memset(&parsed, 0, sizeof(parsed)); 604 605 ret = parse_mtd_partitions(mtd, types, &parsed, parser_data); 606 if ((ret < 0 || parsed.nr_parts == 0) && parts && nr_parts) { 607 /* Fall back to driver-provided partitions */ 608 parsed = (struct mtd_partitions){ 609 .parts = parts, 610 .nr_parts = nr_parts, 611 }; 612 } else if (ret < 0) { 613 /* Didn't come up with parsed OR fallback partitions */ 614 pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n", 615 ret); 616 /* Don't abort on errors; we can still use unpartitioned MTD */ 617 memset(&parsed, 0, sizeof(parsed)); 618 } 619 620 ret = mtd_add_device_partitions(mtd, &parsed); 621 if (ret) 622 goto out; 623 624 /* 625 * FIXME: some drivers unfortunately call this function more than once. 626 * So we have to check if we've already assigned the reboot notifier. 627 * 628 * Generally, we can make multiple calls work for most cases, but it 629 * does cause problems with parse_mtd_partitions() above (e.g., 630 * cmdlineparts will register partitions more than once). 631 */ 632 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 633 "MTD already registered\n"); 634 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 635 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 636 register_reboot_notifier(&mtd->reboot_notifier); 637 } 638 639 out: 640 /* Cleanup any parsed partitions */ 641 mtd_part_parser_cleanup(&parsed); 642 return ret; 643 } 644 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 645 646 /** 647 * mtd_device_unregister - unregister an existing MTD device. 648 * 649 * @master: the MTD device to unregister. This will unregister both the master 650 * and any partitions if registered. 651 */ 652 int mtd_device_unregister(struct mtd_info *master) 653 { 654 int err; 655 656 if (master->_reboot) 657 unregister_reboot_notifier(&master->reboot_notifier); 658 659 err = del_mtd_partitions(master); 660 if (err) 661 return err; 662 663 if (!device_is_registered(&master->dev)) 664 return 0; 665 666 return del_mtd_device(master); 667 } 668 EXPORT_SYMBOL_GPL(mtd_device_unregister); 669 670 /** 671 * register_mtd_user - register a 'user' of MTD devices. 672 * @new: pointer to notifier info structure 673 * 674 * Registers a pair of callbacks function to be called upon addition 675 * or removal of MTD devices. Causes the 'add' callback to be immediately 676 * invoked for each MTD device currently present in the system. 677 */ 678 void register_mtd_user (struct mtd_notifier *new) 679 { 680 struct mtd_info *mtd; 681 682 mutex_lock(&mtd_table_mutex); 683 684 list_add(&new->list, &mtd_notifiers); 685 686 __module_get(THIS_MODULE); 687 688 mtd_for_each_device(mtd) 689 new->add(mtd); 690 691 mutex_unlock(&mtd_table_mutex); 692 } 693 EXPORT_SYMBOL_GPL(register_mtd_user); 694 695 /** 696 * unregister_mtd_user - unregister a 'user' of MTD devices. 697 * @old: pointer to notifier info structure 698 * 699 * Removes a callback function pair from the list of 'users' to be 700 * notified upon addition or removal of MTD devices. Causes the 701 * 'remove' callback to be immediately invoked for each MTD device 702 * currently present in the system. 703 */ 704 int unregister_mtd_user (struct mtd_notifier *old) 705 { 706 struct mtd_info *mtd; 707 708 mutex_lock(&mtd_table_mutex); 709 710 module_put(THIS_MODULE); 711 712 mtd_for_each_device(mtd) 713 old->remove(mtd); 714 715 list_del(&old->list); 716 mutex_unlock(&mtd_table_mutex); 717 return 0; 718 } 719 EXPORT_SYMBOL_GPL(unregister_mtd_user); 720 721 /** 722 * get_mtd_device - obtain a validated handle for an MTD device 723 * @mtd: last known address of the required MTD device 724 * @num: internal device number of the required MTD device 725 * 726 * Given a number and NULL address, return the num'th entry in the device 727 * table, if any. Given an address and num == -1, search the device table 728 * for a device with that address and return if it's still present. Given 729 * both, return the num'th driver only if its address matches. Return 730 * error code if not. 731 */ 732 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 733 { 734 struct mtd_info *ret = NULL, *other; 735 int err = -ENODEV; 736 737 mutex_lock(&mtd_table_mutex); 738 739 if (num == -1) { 740 mtd_for_each_device(other) { 741 if (other == mtd) { 742 ret = mtd; 743 break; 744 } 745 } 746 } else if (num >= 0) { 747 ret = idr_find(&mtd_idr, num); 748 if (mtd && mtd != ret) 749 ret = NULL; 750 } 751 752 if (!ret) { 753 ret = ERR_PTR(err); 754 goto out; 755 } 756 757 err = __get_mtd_device(ret); 758 if (err) 759 ret = ERR_PTR(err); 760 out: 761 mutex_unlock(&mtd_table_mutex); 762 return ret; 763 } 764 EXPORT_SYMBOL_GPL(get_mtd_device); 765 766 767 int __get_mtd_device(struct mtd_info *mtd) 768 { 769 int err; 770 771 if (!try_module_get(mtd->owner)) 772 return -ENODEV; 773 774 if (mtd->_get_device) { 775 err = mtd->_get_device(mtd); 776 777 if (err) { 778 module_put(mtd->owner); 779 return err; 780 } 781 } 782 mtd->usecount++; 783 return 0; 784 } 785 EXPORT_SYMBOL_GPL(__get_mtd_device); 786 787 /** 788 * get_mtd_device_nm - obtain a validated handle for an MTD device by 789 * device name 790 * @name: MTD device name to open 791 * 792 * This function returns MTD device description structure in case of 793 * success and an error code in case of failure. 794 */ 795 struct mtd_info *get_mtd_device_nm(const char *name) 796 { 797 int err = -ENODEV; 798 struct mtd_info *mtd = NULL, *other; 799 800 mutex_lock(&mtd_table_mutex); 801 802 mtd_for_each_device(other) { 803 if (!strcmp(name, other->name)) { 804 mtd = other; 805 break; 806 } 807 } 808 809 if (!mtd) 810 goto out_unlock; 811 812 err = __get_mtd_device(mtd); 813 if (err) 814 goto out_unlock; 815 816 mutex_unlock(&mtd_table_mutex); 817 return mtd; 818 819 out_unlock: 820 mutex_unlock(&mtd_table_mutex); 821 return ERR_PTR(err); 822 } 823 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 824 825 void put_mtd_device(struct mtd_info *mtd) 826 { 827 mutex_lock(&mtd_table_mutex); 828 __put_mtd_device(mtd); 829 mutex_unlock(&mtd_table_mutex); 830 831 } 832 EXPORT_SYMBOL_GPL(put_mtd_device); 833 834 void __put_mtd_device(struct mtd_info *mtd) 835 { 836 --mtd->usecount; 837 BUG_ON(mtd->usecount < 0); 838 839 if (mtd->_put_device) 840 mtd->_put_device(mtd); 841 842 module_put(mtd->owner); 843 } 844 EXPORT_SYMBOL_GPL(__put_mtd_device); 845 846 /* 847 * Erase is an asynchronous operation. Device drivers are supposed 848 * to call instr->callback() whenever the operation completes, even 849 * if it completes with a failure. 850 * Callers are supposed to pass a callback function and wait for it 851 * to be called before writing to the block. 852 */ 853 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 854 { 855 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 856 return -EINVAL; 857 if (!(mtd->flags & MTD_WRITEABLE)) 858 return -EROFS; 859 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 860 if (!instr->len) { 861 instr->state = MTD_ERASE_DONE; 862 mtd_erase_callback(instr); 863 return 0; 864 } 865 return mtd->_erase(mtd, instr); 866 } 867 EXPORT_SYMBOL_GPL(mtd_erase); 868 869 /* 870 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 871 */ 872 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 873 void **virt, resource_size_t *phys) 874 { 875 *retlen = 0; 876 *virt = NULL; 877 if (phys) 878 *phys = 0; 879 if (!mtd->_point) 880 return -EOPNOTSUPP; 881 if (from < 0 || from >= mtd->size || len > mtd->size - from) 882 return -EINVAL; 883 if (!len) 884 return 0; 885 return mtd->_point(mtd, from, len, retlen, virt, phys); 886 } 887 EXPORT_SYMBOL_GPL(mtd_point); 888 889 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 890 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 891 { 892 if (!mtd->_point) 893 return -EOPNOTSUPP; 894 if (from < 0 || from >= mtd->size || len > mtd->size - from) 895 return -EINVAL; 896 if (!len) 897 return 0; 898 return mtd->_unpoint(mtd, from, len); 899 } 900 EXPORT_SYMBOL_GPL(mtd_unpoint); 901 902 /* 903 * Allow NOMMU mmap() to directly map the device (if not NULL) 904 * - return the address to which the offset maps 905 * - return -ENOSYS to indicate refusal to do the mapping 906 */ 907 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 908 unsigned long offset, unsigned long flags) 909 { 910 if (!mtd->_get_unmapped_area) 911 return -EOPNOTSUPP; 912 if (offset >= mtd->size || len > mtd->size - offset) 913 return -EINVAL; 914 return mtd->_get_unmapped_area(mtd, len, offset, flags); 915 } 916 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 917 918 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 919 u_char *buf) 920 { 921 int ret_code; 922 *retlen = 0; 923 if (from < 0 || from >= mtd->size || len > mtd->size - from) 924 return -EINVAL; 925 if (!len) 926 return 0; 927 928 /* 929 * In the absence of an error, drivers return a non-negative integer 930 * representing the maximum number of bitflips that were corrected on 931 * any one ecc region (if applicable; zero otherwise). 932 */ 933 ret_code = mtd->_read(mtd, from, len, retlen, buf); 934 if (unlikely(ret_code < 0)) 935 return ret_code; 936 if (mtd->ecc_strength == 0) 937 return 0; /* device lacks ecc */ 938 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 939 } 940 EXPORT_SYMBOL_GPL(mtd_read); 941 942 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 943 const u_char *buf) 944 { 945 *retlen = 0; 946 if (to < 0 || to >= mtd->size || len > mtd->size - to) 947 return -EINVAL; 948 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) 949 return -EROFS; 950 if (!len) 951 return 0; 952 return mtd->_write(mtd, to, len, retlen, buf); 953 } 954 EXPORT_SYMBOL_GPL(mtd_write); 955 956 /* 957 * In blackbox flight recorder like scenarios we want to make successful writes 958 * in interrupt context. panic_write() is only intended to be called when its 959 * known the kernel is about to panic and we need the write to succeed. Since 960 * the kernel is not going to be running for much longer, this function can 961 * break locks and delay to ensure the write succeeds (but not sleep). 962 */ 963 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 964 const u_char *buf) 965 { 966 *retlen = 0; 967 if (!mtd->_panic_write) 968 return -EOPNOTSUPP; 969 if (to < 0 || to >= mtd->size || len > mtd->size - to) 970 return -EINVAL; 971 if (!(mtd->flags & MTD_WRITEABLE)) 972 return -EROFS; 973 if (!len) 974 return 0; 975 return mtd->_panic_write(mtd, to, len, retlen, buf); 976 } 977 EXPORT_SYMBOL_GPL(mtd_panic_write); 978 979 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 980 { 981 int ret_code; 982 ops->retlen = ops->oobretlen = 0; 983 if (!mtd->_read_oob) 984 return -EOPNOTSUPP; 985 /* 986 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 987 * similar to mtd->_read(), returning a non-negative integer 988 * representing max bitflips. In other cases, mtd->_read_oob() may 989 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 990 */ 991 ret_code = mtd->_read_oob(mtd, from, ops); 992 if (unlikely(ret_code < 0)) 993 return ret_code; 994 if (mtd->ecc_strength == 0) 995 return 0; /* device lacks ecc */ 996 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 997 } 998 EXPORT_SYMBOL_GPL(mtd_read_oob); 999 1000 /* 1001 * Method to access the protection register area, present in some flash 1002 * devices. The user data is one time programmable but the factory data is read 1003 * only. 1004 */ 1005 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1006 struct otp_info *buf) 1007 { 1008 if (!mtd->_get_fact_prot_info) 1009 return -EOPNOTSUPP; 1010 if (!len) 1011 return 0; 1012 return mtd->_get_fact_prot_info(mtd, len, retlen, buf); 1013 } 1014 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1015 1016 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1017 size_t *retlen, u_char *buf) 1018 { 1019 *retlen = 0; 1020 if (!mtd->_read_fact_prot_reg) 1021 return -EOPNOTSUPP; 1022 if (!len) 1023 return 0; 1024 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); 1025 } 1026 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1027 1028 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1029 struct otp_info *buf) 1030 { 1031 if (!mtd->_get_user_prot_info) 1032 return -EOPNOTSUPP; 1033 if (!len) 1034 return 0; 1035 return mtd->_get_user_prot_info(mtd, len, retlen, buf); 1036 } 1037 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1038 1039 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1040 size_t *retlen, u_char *buf) 1041 { 1042 *retlen = 0; 1043 if (!mtd->_read_user_prot_reg) 1044 return -EOPNOTSUPP; 1045 if (!len) 1046 return 0; 1047 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); 1048 } 1049 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1050 1051 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1052 size_t *retlen, u_char *buf) 1053 { 1054 int ret; 1055 1056 *retlen = 0; 1057 if (!mtd->_write_user_prot_reg) 1058 return -EOPNOTSUPP; 1059 if (!len) 1060 return 0; 1061 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); 1062 if (ret) 1063 return ret; 1064 1065 /* 1066 * If no data could be written at all, we are out of memory and 1067 * must return -ENOSPC. 1068 */ 1069 return (*retlen) ? 0 : -ENOSPC; 1070 } 1071 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 1072 1073 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1074 { 1075 if (!mtd->_lock_user_prot_reg) 1076 return -EOPNOTSUPP; 1077 if (!len) 1078 return 0; 1079 return mtd->_lock_user_prot_reg(mtd, from, len); 1080 } 1081 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1082 1083 /* Chip-supported device locking */ 1084 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1085 { 1086 if (!mtd->_lock) 1087 return -EOPNOTSUPP; 1088 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1089 return -EINVAL; 1090 if (!len) 1091 return 0; 1092 return mtd->_lock(mtd, ofs, len); 1093 } 1094 EXPORT_SYMBOL_GPL(mtd_lock); 1095 1096 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1097 { 1098 if (!mtd->_unlock) 1099 return -EOPNOTSUPP; 1100 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1101 return -EINVAL; 1102 if (!len) 1103 return 0; 1104 return mtd->_unlock(mtd, ofs, len); 1105 } 1106 EXPORT_SYMBOL_GPL(mtd_unlock); 1107 1108 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1109 { 1110 if (!mtd->_is_locked) 1111 return -EOPNOTSUPP; 1112 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1113 return -EINVAL; 1114 if (!len) 1115 return 0; 1116 return mtd->_is_locked(mtd, ofs, len); 1117 } 1118 EXPORT_SYMBOL_GPL(mtd_is_locked); 1119 1120 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 1121 { 1122 if (ofs < 0 || ofs >= mtd->size) 1123 return -EINVAL; 1124 if (!mtd->_block_isreserved) 1125 return 0; 1126 return mtd->_block_isreserved(mtd, ofs); 1127 } 1128 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 1129 1130 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 1131 { 1132 if (ofs < 0 || ofs >= mtd->size) 1133 return -EINVAL; 1134 if (!mtd->_block_isbad) 1135 return 0; 1136 return mtd->_block_isbad(mtd, ofs); 1137 } 1138 EXPORT_SYMBOL_GPL(mtd_block_isbad); 1139 1140 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 1141 { 1142 if (!mtd->_block_markbad) 1143 return -EOPNOTSUPP; 1144 if (ofs < 0 || ofs >= mtd->size) 1145 return -EINVAL; 1146 if (!(mtd->flags & MTD_WRITEABLE)) 1147 return -EROFS; 1148 return mtd->_block_markbad(mtd, ofs); 1149 } 1150 EXPORT_SYMBOL_GPL(mtd_block_markbad); 1151 1152 /* 1153 * default_mtd_writev - the default writev method 1154 * @mtd: mtd device description object pointer 1155 * @vecs: the vectors to write 1156 * @count: count of vectors in @vecs 1157 * @to: the MTD device offset to write to 1158 * @retlen: on exit contains the count of bytes written to the MTD device. 1159 * 1160 * This function returns zero in case of success and a negative error code in 1161 * case of failure. 1162 */ 1163 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1164 unsigned long count, loff_t to, size_t *retlen) 1165 { 1166 unsigned long i; 1167 size_t totlen = 0, thislen; 1168 int ret = 0; 1169 1170 for (i = 0; i < count; i++) { 1171 if (!vecs[i].iov_len) 1172 continue; 1173 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 1174 vecs[i].iov_base); 1175 totlen += thislen; 1176 if (ret || thislen != vecs[i].iov_len) 1177 break; 1178 to += vecs[i].iov_len; 1179 } 1180 *retlen = totlen; 1181 return ret; 1182 } 1183 1184 /* 1185 * mtd_writev - the vector-based MTD write method 1186 * @mtd: mtd device description object pointer 1187 * @vecs: the vectors to write 1188 * @count: count of vectors in @vecs 1189 * @to: the MTD device offset to write to 1190 * @retlen: on exit contains the count of bytes written to the MTD device. 1191 * 1192 * This function returns zero in case of success and a negative error code in 1193 * case of failure. 1194 */ 1195 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1196 unsigned long count, loff_t to, size_t *retlen) 1197 { 1198 *retlen = 0; 1199 if (!(mtd->flags & MTD_WRITEABLE)) 1200 return -EROFS; 1201 if (!mtd->_writev) 1202 return default_mtd_writev(mtd, vecs, count, to, retlen); 1203 return mtd->_writev(mtd, vecs, count, to, retlen); 1204 } 1205 EXPORT_SYMBOL_GPL(mtd_writev); 1206 1207 /** 1208 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 1209 * @mtd: mtd device description object pointer 1210 * @size: a pointer to the ideal or maximum size of the allocation, points 1211 * to the actual allocation size on success. 1212 * 1213 * This routine attempts to allocate a contiguous kernel buffer up to 1214 * the specified size, backing off the size of the request exponentially 1215 * until the request succeeds or until the allocation size falls below 1216 * the system page size. This attempts to make sure it does not adversely 1217 * impact system performance, so when allocating more than one page, we 1218 * ask the memory allocator to avoid re-trying, swapping, writing back 1219 * or performing I/O. 1220 * 1221 * Note, this function also makes sure that the allocated buffer is aligned to 1222 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 1223 * 1224 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 1225 * to handle smaller (i.e. degraded) buffer allocations under low- or 1226 * fragmented-memory situations where such reduced allocations, from a 1227 * requested ideal, are allowed. 1228 * 1229 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 1230 */ 1231 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 1232 { 1233 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 1234 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 1235 void *kbuf; 1236 1237 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 1238 1239 while (*size > min_alloc) { 1240 kbuf = kmalloc(*size, flags); 1241 if (kbuf) 1242 return kbuf; 1243 1244 *size >>= 1; 1245 *size = ALIGN(*size, mtd->writesize); 1246 } 1247 1248 /* 1249 * For the last resort allocation allow 'kmalloc()' to do all sorts of 1250 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 1251 */ 1252 return kmalloc(*size, GFP_KERNEL); 1253 } 1254 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 1255 1256 #ifdef CONFIG_PROC_FS 1257 1258 /*====================================================================*/ 1259 /* Support for /proc/mtd */ 1260 1261 static int mtd_proc_show(struct seq_file *m, void *v) 1262 { 1263 struct mtd_info *mtd; 1264 1265 seq_puts(m, "dev: size erasesize name\n"); 1266 mutex_lock(&mtd_table_mutex); 1267 mtd_for_each_device(mtd) { 1268 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 1269 mtd->index, (unsigned long long)mtd->size, 1270 mtd->erasesize, mtd->name); 1271 } 1272 mutex_unlock(&mtd_table_mutex); 1273 return 0; 1274 } 1275 1276 static int mtd_proc_open(struct inode *inode, struct file *file) 1277 { 1278 return single_open(file, mtd_proc_show, NULL); 1279 } 1280 1281 static const struct file_operations mtd_proc_ops = { 1282 .open = mtd_proc_open, 1283 .read = seq_read, 1284 .llseek = seq_lseek, 1285 .release = single_release, 1286 }; 1287 #endif /* CONFIG_PROC_FS */ 1288 1289 /*====================================================================*/ 1290 /* Init code */ 1291 1292 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) 1293 { 1294 int ret; 1295 1296 ret = bdi_init(bdi); 1297 if (!ret) 1298 ret = bdi_register(bdi, NULL, "%s", name); 1299 1300 if (ret) 1301 bdi_destroy(bdi); 1302 1303 return ret; 1304 } 1305 1306 static struct proc_dir_entry *proc_mtd; 1307 1308 static int __init init_mtd(void) 1309 { 1310 int ret; 1311 1312 ret = class_register(&mtd_class); 1313 if (ret) 1314 goto err_reg; 1315 1316 ret = mtd_bdi_init(&mtd_bdi, "mtd"); 1317 if (ret) 1318 goto err_bdi; 1319 1320 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); 1321 1322 ret = init_mtdchar(); 1323 if (ret) 1324 goto out_procfs; 1325 1326 return 0; 1327 1328 out_procfs: 1329 if (proc_mtd) 1330 remove_proc_entry("mtd", NULL); 1331 err_bdi: 1332 class_unregister(&mtd_class); 1333 err_reg: 1334 pr_err("Error registering mtd class or bdi: %d\n", ret); 1335 return ret; 1336 } 1337 1338 static void __exit cleanup_mtd(void) 1339 { 1340 cleanup_mtdchar(); 1341 if (proc_mtd) 1342 remove_proc_entry("mtd", NULL); 1343 class_unregister(&mtd_class); 1344 bdi_destroy(&mtd_bdi); 1345 idr_destroy(&mtd_idr); 1346 } 1347 1348 module_init(init_mtd); 1349 module_exit(cleanup_mtd); 1350 1351 MODULE_LICENSE("GPL"); 1352 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1353 MODULE_DESCRIPTION("Core MTD registration and access routines"); 1354