1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Core registration and callback routines for MTD 4 * drivers and users. 5 * 6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 7 * Copyright © 2006 Red Hat UK Limited 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/ptrace.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/major.h> 17 #include <linux/fs.h> 18 #include <linux/err.h> 19 #include <linux/ioctl.h> 20 #include <linux/init.h> 21 #include <linux/of.h> 22 #include <linux/proc_fs.h> 23 #include <linux/idr.h> 24 #include <linux/backing-dev.h> 25 #include <linux/gfp.h> 26 #include <linux/random.h> 27 #include <linux/slab.h> 28 #include <linux/reboot.h> 29 #include <linux/leds.h> 30 #include <linux/debugfs.h> 31 #include <linux/nvmem-provider.h> 32 #include <linux/root_dev.h> 33 34 #include <linux/mtd/mtd.h> 35 #include <linux/mtd/partitions.h> 36 37 #include "mtdcore.h" 38 39 struct backing_dev_info *mtd_bdi; 40 41 #ifdef CONFIG_PM_SLEEP 42 43 static int mtd_cls_suspend(struct device *dev) 44 { 45 struct mtd_info *mtd = dev_get_drvdata(dev); 46 47 return mtd ? mtd_suspend(mtd) : 0; 48 } 49 50 static int mtd_cls_resume(struct device *dev) 51 { 52 struct mtd_info *mtd = dev_get_drvdata(dev); 53 54 if (mtd) 55 mtd_resume(mtd); 56 return 0; 57 } 58 59 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 60 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 61 #else 62 #define MTD_CLS_PM_OPS NULL 63 #endif 64 65 static struct class mtd_class = { 66 .name = "mtd", 67 .pm = MTD_CLS_PM_OPS, 68 }; 69 70 static DEFINE_IDR(mtd_idr); 71 72 /* These are exported solely for the purpose of mtd_blkdevs.c. You 73 should not use them for _anything_ else */ 74 DEFINE_MUTEX(mtd_table_mutex); 75 EXPORT_SYMBOL_GPL(mtd_table_mutex); 76 77 struct mtd_info *__mtd_next_device(int i) 78 { 79 return idr_get_next(&mtd_idr, &i); 80 } 81 EXPORT_SYMBOL_GPL(__mtd_next_device); 82 83 static LIST_HEAD(mtd_notifiers); 84 85 86 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 87 88 /* REVISIT once MTD uses the driver model better, whoever allocates 89 * the mtd_info will probably want to use the release() hook... 90 */ 91 static void mtd_release(struct device *dev) 92 { 93 struct mtd_info *mtd = dev_get_drvdata(dev); 94 dev_t index = MTD_DEVT(mtd->index); 95 96 idr_remove(&mtd_idr, mtd->index); 97 of_node_put(mtd_get_of_node(mtd)); 98 99 if (mtd_is_partition(mtd)) 100 release_mtd_partition(mtd); 101 102 /* remove /dev/mtdXro node */ 103 device_destroy(&mtd_class, index + 1); 104 } 105 106 static void mtd_device_release(struct kref *kref) 107 { 108 struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); 109 bool is_partition = mtd_is_partition(mtd); 110 111 debugfs_remove_recursive(mtd->dbg.dfs_dir); 112 113 /* Try to remove the NVMEM provider */ 114 nvmem_unregister(mtd->nvmem); 115 116 device_unregister(&mtd->dev); 117 118 /* 119 * Clear dev so mtd can be safely re-registered later if desired. 120 * Should not be done for partition, 121 * as it was already destroyed in device_unregister(). 122 */ 123 if (!is_partition) 124 memset(&mtd->dev, 0, sizeof(mtd->dev)); 125 126 module_put(THIS_MODULE); 127 } 128 129 #define MTD_DEVICE_ATTR_RO(name) \ 130 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) 131 132 #define MTD_DEVICE_ATTR_RW(name) \ 133 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) 134 135 static ssize_t mtd_type_show(struct device *dev, 136 struct device_attribute *attr, char *buf) 137 { 138 struct mtd_info *mtd = dev_get_drvdata(dev); 139 char *type; 140 141 switch (mtd->type) { 142 case MTD_ABSENT: 143 type = "absent"; 144 break; 145 case MTD_RAM: 146 type = "ram"; 147 break; 148 case MTD_ROM: 149 type = "rom"; 150 break; 151 case MTD_NORFLASH: 152 type = "nor"; 153 break; 154 case MTD_NANDFLASH: 155 type = "nand"; 156 break; 157 case MTD_DATAFLASH: 158 type = "dataflash"; 159 break; 160 case MTD_UBIVOLUME: 161 type = "ubi"; 162 break; 163 case MTD_MLCNANDFLASH: 164 type = "mlc-nand"; 165 break; 166 default: 167 type = "unknown"; 168 } 169 170 return sysfs_emit(buf, "%s\n", type); 171 } 172 MTD_DEVICE_ATTR_RO(type); 173 174 static ssize_t mtd_flags_show(struct device *dev, 175 struct device_attribute *attr, char *buf) 176 { 177 struct mtd_info *mtd = dev_get_drvdata(dev); 178 179 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags); 180 } 181 MTD_DEVICE_ATTR_RO(flags); 182 183 static ssize_t mtd_size_show(struct device *dev, 184 struct device_attribute *attr, char *buf) 185 { 186 struct mtd_info *mtd = dev_get_drvdata(dev); 187 188 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size); 189 } 190 MTD_DEVICE_ATTR_RO(size); 191 192 static ssize_t mtd_erasesize_show(struct device *dev, 193 struct device_attribute *attr, char *buf) 194 { 195 struct mtd_info *mtd = dev_get_drvdata(dev); 196 197 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize); 198 } 199 MTD_DEVICE_ATTR_RO(erasesize); 200 201 static ssize_t mtd_writesize_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203 { 204 struct mtd_info *mtd = dev_get_drvdata(dev); 205 206 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize); 207 } 208 MTD_DEVICE_ATTR_RO(writesize); 209 210 static ssize_t mtd_subpagesize_show(struct device *dev, 211 struct device_attribute *attr, char *buf) 212 { 213 struct mtd_info *mtd = dev_get_drvdata(dev); 214 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 215 216 return sysfs_emit(buf, "%u\n", subpagesize); 217 } 218 MTD_DEVICE_ATTR_RO(subpagesize); 219 220 static ssize_t mtd_oobsize_show(struct device *dev, 221 struct device_attribute *attr, char *buf) 222 { 223 struct mtd_info *mtd = dev_get_drvdata(dev); 224 225 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize); 226 } 227 MTD_DEVICE_ATTR_RO(oobsize); 228 229 static ssize_t mtd_oobavail_show(struct device *dev, 230 struct device_attribute *attr, char *buf) 231 { 232 struct mtd_info *mtd = dev_get_drvdata(dev); 233 234 return sysfs_emit(buf, "%u\n", mtd->oobavail); 235 } 236 MTD_DEVICE_ATTR_RO(oobavail); 237 238 static ssize_t mtd_numeraseregions_show(struct device *dev, 239 struct device_attribute *attr, char *buf) 240 { 241 struct mtd_info *mtd = dev_get_drvdata(dev); 242 243 return sysfs_emit(buf, "%u\n", mtd->numeraseregions); 244 } 245 MTD_DEVICE_ATTR_RO(numeraseregions); 246 247 static ssize_t mtd_name_show(struct device *dev, 248 struct device_attribute *attr, char *buf) 249 { 250 struct mtd_info *mtd = dev_get_drvdata(dev); 251 252 return sysfs_emit(buf, "%s\n", mtd->name); 253 } 254 MTD_DEVICE_ATTR_RO(name); 255 256 static ssize_t mtd_ecc_strength_show(struct device *dev, 257 struct device_attribute *attr, char *buf) 258 { 259 struct mtd_info *mtd = dev_get_drvdata(dev); 260 261 return sysfs_emit(buf, "%u\n", mtd->ecc_strength); 262 } 263 MTD_DEVICE_ATTR_RO(ecc_strength); 264 265 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 266 struct device_attribute *attr, 267 char *buf) 268 { 269 struct mtd_info *mtd = dev_get_drvdata(dev); 270 271 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold); 272 } 273 274 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 275 struct device_attribute *attr, 276 const char *buf, size_t count) 277 { 278 struct mtd_info *mtd = dev_get_drvdata(dev); 279 unsigned int bitflip_threshold; 280 int retval; 281 282 retval = kstrtouint(buf, 0, &bitflip_threshold); 283 if (retval) 284 return retval; 285 286 mtd->bitflip_threshold = bitflip_threshold; 287 return count; 288 } 289 MTD_DEVICE_ATTR_RW(bitflip_threshold); 290 291 static ssize_t mtd_ecc_step_size_show(struct device *dev, 292 struct device_attribute *attr, char *buf) 293 { 294 struct mtd_info *mtd = dev_get_drvdata(dev); 295 296 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size); 297 298 } 299 MTD_DEVICE_ATTR_RO(ecc_step_size); 300 301 static ssize_t mtd_corrected_bits_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303 { 304 struct mtd_info *mtd = dev_get_drvdata(dev); 305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 306 307 return sysfs_emit(buf, "%u\n", ecc_stats->corrected); 308 } 309 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ 310 311 static ssize_t mtd_ecc_failures_show(struct device *dev, 312 struct device_attribute *attr, char *buf) 313 { 314 struct mtd_info *mtd = dev_get_drvdata(dev); 315 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 316 317 return sysfs_emit(buf, "%u\n", ecc_stats->failed); 318 } 319 MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ 320 321 static ssize_t mtd_bad_blocks_show(struct device *dev, 322 struct device_attribute *attr, char *buf) 323 { 324 struct mtd_info *mtd = dev_get_drvdata(dev); 325 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 326 327 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks); 328 } 329 MTD_DEVICE_ATTR_RO(bad_blocks); 330 331 static ssize_t mtd_bbt_blocks_show(struct device *dev, 332 struct device_attribute *attr, char *buf) 333 { 334 struct mtd_info *mtd = dev_get_drvdata(dev); 335 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 336 337 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks); 338 } 339 MTD_DEVICE_ATTR_RO(bbt_blocks); 340 341 static struct attribute *mtd_attrs[] = { 342 &dev_attr_type.attr, 343 &dev_attr_flags.attr, 344 &dev_attr_size.attr, 345 &dev_attr_erasesize.attr, 346 &dev_attr_writesize.attr, 347 &dev_attr_subpagesize.attr, 348 &dev_attr_oobsize.attr, 349 &dev_attr_oobavail.attr, 350 &dev_attr_numeraseregions.attr, 351 &dev_attr_name.attr, 352 &dev_attr_ecc_strength.attr, 353 &dev_attr_ecc_step_size.attr, 354 &dev_attr_corrected_bits.attr, 355 &dev_attr_ecc_failures.attr, 356 &dev_attr_bad_blocks.attr, 357 &dev_attr_bbt_blocks.attr, 358 &dev_attr_bitflip_threshold.attr, 359 NULL, 360 }; 361 ATTRIBUTE_GROUPS(mtd); 362 363 static const struct device_type mtd_devtype = { 364 .name = "mtd", 365 .groups = mtd_groups, 366 .release = mtd_release, 367 }; 368 369 static bool mtd_expert_analysis_mode; 370 371 #ifdef CONFIG_DEBUG_FS 372 bool mtd_check_expert_analysis_mode(void) 373 { 374 const char *mtd_expert_analysis_warning = 375 "Bad block checks have been entirely disabled.\n" 376 "This is only reserved for post-mortem forensics and debug purposes.\n" 377 "Never enable this mode if you do not know what you are doing!\n"; 378 379 return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning); 380 } 381 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); 382 #endif 383 384 static struct dentry *dfs_dir_mtd; 385 386 static void mtd_debugfs_populate(struct mtd_info *mtd) 387 { 388 struct device *dev = &mtd->dev; 389 390 if (IS_ERR_OR_NULL(dfs_dir_mtd)) 391 return; 392 393 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); 394 } 395 396 #ifndef CONFIG_MMU 397 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 398 { 399 switch (mtd->type) { 400 case MTD_RAM: 401 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 402 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 403 case MTD_ROM: 404 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 405 NOMMU_MAP_READ; 406 default: 407 return NOMMU_MAP_COPY; 408 } 409 } 410 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 411 #endif 412 413 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 414 void *cmd) 415 { 416 struct mtd_info *mtd; 417 418 mtd = container_of(n, struct mtd_info, reboot_notifier); 419 mtd->_reboot(mtd); 420 421 return NOTIFY_DONE; 422 } 423 424 /** 425 * mtd_wunit_to_pairing_info - get pairing information of a wunit 426 * @mtd: pointer to new MTD device info structure 427 * @wunit: write unit we are interested in 428 * @info: returned pairing information 429 * 430 * Retrieve pairing information associated to the wunit. 431 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be 432 * paired together, and where programming a page may influence the page it is 433 * paired with. 434 * The notion of page is replaced by the term wunit (write-unit) to stay 435 * consistent with the ->writesize field. 436 * 437 * The @wunit argument can be extracted from an absolute offset using 438 * mtd_offset_to_wunit(). @info is filled with the pairing information attached 439 * to @wunit. 440 * 441 * From the pairing info the MTD user can find all the wunits paired with 442 * @wunit using the following loop: 443 * 444 * for (i = 0; i < mtd_pairing_groups(mtd); i++) { 445 * info.pair = i; 446 * mtd_pairing_info_to_wunit(mtd, &info); 447 * ... 448 * } 449 */ 450 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 451 struct mtd_pairing_info *info) 452 { 453 struct mtd_info *master = mtd_get_master(mtd); 454 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 455 456 if (wunit < 0 || wunit >= npairs) 457 return -EINVAL; 458 459 if (master->pairing && master->pairing->get_info) 460 return master->pairing->get_info(master, wunit, info); 461 462 info->group = 0; 463 info->pair = wunit; 464 465 return 0; 466 } 467 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); 468 469 /** 470 * mtd_pairing_info_to_wunit - get wunit from pairing information 471 * @mtd: pointer to new MTD device info structure 472 * @info: pairing information struct 473 * 474 * Returns a positive number representing the wunit associated to the info 475 * struct, or a negative error code. 476 * 477 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to 478 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() 479 * doc). 480 * 481 * It can also be used to only program the first page of each pair (i.e. 482 * page attached to group 0), which allows one to use an MLC NAND in 483 * software-emulated SLC mode: 484 * 485 * info.group = 0; 486 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 487 * for (info.pair = 0; info.pair < npairs; info.pair++) { 488 * wunit = mtd_pairing_info_to_wunit(mtd, &info); 489 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), 490 * mtd->writesize, &retlen, buf + (i * mtd->writesize)); 491 * } 492 */ 493 int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 494 const struct mtd_pairing_info *info) 495 { 496 struct mtd_info *master = mtd_get_master(mtd); 497 int ngroups = mtd_pairing_groups(master); 498 int npairs = mtd_wunit_per_eb(master) / ngroups; 499 500 if (!info || info->pair < 0 || info->pair >= npairs || 501 info->group < 0 || info->group >= ngroups) 502 return -EINVAL; 503 504 if (master->pairing && master->pairing->get_wunit) 505 return mtd->pairing->get_wunit(master, info); 506 507 return info->pair; 508 } 509 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); 510 511 /** 512 * mtd_pairing_groups - get the number of pairing groups 513 * @mtd: pointer to new MTD device info structure 514 * 515 * Returns the number of pairing groups. 516 * 517 * This number is usually equal to the number of bits exposed by a single 518 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() 519 * to iterate over all pages of a given pair. 520 */ 521 int mtd_pairing_groups(struct mtd_info *mtd) 522 { 523 struct mtd_info *master = mtd_get_master(mtd); 524 525 if (!master->pairing || !master->pairing->ngroups) 526 return 1; 527 528 return master->pairing->ngroups; 529 } 530 EXPORT_SYMBOL_GPL(mtd_pairing_groups); 531 532 static int mtd_nvmem_reg_read(void *priv, unsigned int offset, 533 void *val, size_t bytes) 534 { 535 struct mtd_info *mtd = priv; 536 size_t retlen; 537 int err; 538 539 err = mtd_read(mtd, offset, bytes, &retlen, val); 540 if (err && err != -EUCLEAN) 541 return err; 542 543 return retlen == bytes ? 0 : -EIO; 544 } 545 546 static int mtd_nvmem_add(struct mtd_info *mtd) 547 { 548 struct device_node *node = mtd_get_of_node(mtd); 549 struct nvmem_config config = {}; 550 551 config.id = NVMEM_DEVID_NONE; 552 config.dev = &mtd->dev; 553 config.name = dev_name(&mtd->dev); 554 config.owner = THIS_MODULE; 555 config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells"); 556 config.reg_read = mtd_nvmem_reg_read; 557 config.size = mtd->size; 558 config.word_size = 1; 559 config.stride = 1; 560 config.read_only = true; 561 config.root_only = true; 562 config.ignore_wp = true; 563 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); 564 config.priv = mtd; 565 566 mtd->nvmem = nvmem_register(&config); 567 if (IS_ERR(mtd->nvmem)) { 568 /* Just ignore if there is no NVMEM support in the kernel */ 569 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) 570 mtd->nvmem = NULL; 571 else 572 return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem), 573 "Failed to register NVMEM device\n"); 574 } 575 576 return 0; 577 } 578 579 static void mtd_check_of_node(struct mtd_info *mtd) 580 { 581 struct device_node *partitions, *parent_dn, *mtd_dn = NULL; 582 const char *pname, *prefix = "partition-"; 583 int plen, mtd_name_len, offset, prefix_len; 584 585 /* Check if MTD already has a device node */ 586 if (mtd_get_of_node(mtd)) 587 return; 588 589 if (!mtd_is_partition(mtd)) 590 return; 591 592 parent_dn = of_node_get(mtd_get_of_node(mtd->parent)); 593 if (!parent_dn) 594 return; 595 596 if (mtd_is_partition(mtd->parent)) 597 partitions = of_node_get(parent_dn); 598 else 599 partitions = of_get_child_by_name(parent_dn, "partitions"); 600 if (!partitions) 601 goto exit_parent; 602 603 prefix_len = strlen(prefix); 604 mtd_name_len = strlen(mtd->name); 605 606 /* Search if a partition is defined with the same name */ 607 for_each_child_of_node(partitions, mtd_dn) { 608 /* Skip partition with no/wrong prefix */ 609 if (!of_node_name_prefix(mtd_dn, prefix)) 610 continue; 611 612 /* Label have priority. Check that first */ 613 if (!of_property_read_string(mtd_dn, "label", &pname)) { 614 offset = 0; 615 } else { 616 pname = mtd_dn->name; 617 offset = prefix_len; 618 } 619 620 plen = strlen(pname) - offset; 621 if (plen == mtd_name_len && 622 !strncmp(mtd->name, pname + offset, plen)) { 623 mtd_set_of_node(mtd, mtd_dn); 624 break; 625 } 626 } 627 628 of_node_put(partitions); 629 exit_parent: 630 of_node_put(parent_dn); 631 } 632 633 /** 634 * add_mtd_device - register an MTD device 635 * @mtd: pointer to new MTD device info structure 636 * 637 * Add a device to the list of MTD devices present in the system, and 638 * notify each currently active MTD 'user' of its arrival. Returns 639 * zero on success or non-zero on failure. 640 */ 641 642 int add_mtd_device(struct mtd_info *mtd) 643 { 644 struct device_node *np = mtd_get_of_node(mtd); 645 struct mtd_info *master = mtd_get_master(mtd); 646 struct mtd_notifier *not; 647 int i, error, ofidx; 648 649 /* 650 * May occur, for instance, on buggy drivers which call 651 * mtd_device_parse_register() multiple times on the same master MTD, 652 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 653 */ 654 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) 655 return -EEXIST; 656 657 BUG_ON(mtd->writesize == 0); 658 659 /* 660 * MTD drivers should implement ->_{write,read}() or 661 * ->_{write,read}_oob(), but not both. 662 */ 663 if (WARN_ON((mtd->_write && mtd->_write_oob) || 664 (mtd->_read && mtd->_read_oob))) 665 return -EINVAL; 666 667 if (WARN_ON((!mtd->erasesize || !master->_erase) && 668 !(mtd->flags & MTD_NO_ERASE))) 669 return -EINVAL; 670 671 /* 672 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the 673 * master is an MLC NAND and has a proper pairing scheme defined. 674 * We also reject masters that implement ->_writev() for now, because 675 * NAND controller drivers don't implement this hook, and adding the 676 * SLC -> MLC address/length conversion to this path is useless if we 677 * don't have a user. 678 */ 679 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && 680 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || 681 !master->pairing || master->_writev)) 682 return -EINVAL; 683 684 mutex_lock(&mtd_table_mutex); 685 686 ofidx = -1; 687 if (np) 688 ofidx = of_alias_get_id(np, "mtd"); 689 if (ofidx >= 0) 690 i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); 691 else 692 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 693 if (i < 0) { 694 error = i; 695 goto fail_locked; 696 } 697 698 mtd->index = i; 699 kref_init(&mtd->refcnt); 700 701 /* default value if not set by driver */ 702 if (mtd->bitflip_threshold == 0) 703 mtd->bitflip_threshold = mtd->ecc_strength; 704 705 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 706 int ngroups = mtd_pairing_groups(master); 707 708 mtd->erasesize /= ngroups; 709 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * 710 mtd->erasesize; 711 } 712 713 if (is_power_of_2(mtd->erasesize)) 714 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 715 else 716 mtd->erasesize_shift = 0; 717 718 if (is_power_of_2(mtd->writesize)) 719 mtd->writesize_shift = ffs(mtd->writesize) - 1; 720 else 721 mtd->writesize_shift = 0; 722 723 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 724 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 725 726 /* Some chips always power up locked. Unlock them now */ 727 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 728 error = mtd_unlock(mtd, 0, mtd->size); 729 if (error && error != -EOPNOTSUPP) 730 printk(KERN_WARNING 731 "%s: unlock failed, writes may not work\n", 732 mtd->name); 733 /* Ignore unlock failures? */ 734 error = 0; 735 } 736 737 /* Caller should have set dev.parent to match the 738 * physical device, if appropriate. 739 */ 740 mtd->dev.type = &mtd_devtype; 741 mtd->dev.class = &mtd_class; 742 mtd->dev.devt = MTD_DEVT(i); 743 dev_set_name(&mtd->dev, "mtd%d", i); 744 dev_set_drvdata(&mtd->dev, mtd); 745 mtd_check_of_node(mtd); 746 of_node_get(mtd_get_of_node(mtd)); 747 error = device_register(&mtd->dev); 748 if (error) { 749 put_device(&mtd->dev); 750 goto fail_added; 751 } 752 753 /* Add the nvmem provider */ 754 error = mtd_nvmem_add(mtd); 755 if (error) 756 goto fail_nvmem_add; 757 758 mtd_debugfs_populate(mtd); 759 760 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 761 "mtd%dro", i); 762 763 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 764 /* No need to get a refcount on the module containing 765 the notifier, since we hold the mtd_table_mutex */ 766 list_for_each_entry(not, &mtd_notifiers, list) 767 not->add(mtd); 768 769 mutex_unlock(&mtd_table_mutex); 770 771 if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { 772 if (IS_BUILTIN(CONFIG_MTD)) { 773 pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name); 774 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); 775 } else { 776 pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", 777 mtd->index, mtd->name); 778 } 779 } 780 781 /* We _know_ we aren't being removed, because 782 our caller is still holding us here. So none 783 of this try_ nonsense, and no bitching about it 784 either. :) */ 785 __module_get(THIS_MODULE); 786 return 0; 787 788 fail_nvmem_add: 789 device_unregister(&mtd->dev); 790 fail_added: 791 of_node_put(mtd_get_of_node(mtd)); 792 idr_remove(&mtd_idr, i); 793 fail_locked: 794 mutex_unlock(&mtd_table_mutex); 795 return error; 796 } 797 798 /** 799 * del_mtd_device - unregister an MTD device 800 * @mtd: pointer to MTD device info structure 801 * 802 * Remove a device from the list of MTD devices present in the system, 803 * and notify each currently active MTD 'user' of its departure. 804 * Returns zero on success or 1 on failure, which currently will happen 805 * if the requested device does not appear to be present in the list. 806 */ 807 808 int del_mtd_device(struct mtd_info *mtd) 809 { 810 int ret; 811 struct mtd_notifier *not; 812 813 mutex_lock(&mtd_table_mutex); 814 815 if (idr_find(&mtd_idr, mtd->index) != mtd) { 816 ret = -ENODEV; 817 goto out_error; 818 } 819 820 /* No need to get a refcount on the module containing 821 the notifier, since we hold the mtd_table_mutex */ 822 list_for_each_entry(not, &mtd_notifiers, list) 823 not->remove(mtd); 824 825 kref_put(&mtd->refcnt, mtd_device_release); 826 ret = 0; 827 828 out_error: 829 mutex_unlock(&mtd_table_mutex); 830 return ret; 831 } 832 833 /* 834 * Set a few defaults based on the parent devices, if not provided by the 835 * driver 836 */ 837 static void mtd_set_dev_defaults(struct mtd_info *mtd) 838 { 839 if (mtd->dev.parent) { 840 if (!mtd->owner && mtd->dev.parent->driver) 841 mtd->owner = mtd->dev.parent->driver->owner; 842 if (!mtd->name) 843 mtd->name = dev_name(mtd->dev.parent); 844 } else { 845 pr_debug("mtd device won't show a device symlink in sysfs\n"); 846 } 847 848 INIT_LIST_HEAD(&mtd->partitions); 849 mutex_init(&mtd->master.partitions_lock); 850 mutex_init(&mtd->master.chrdev_lock); 851 } 852 853 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) 854 { 855 struct otp_info *info; 856 ssize_t size = 0; 857 unsigned int i; 858 size_t retlen; 859 int ret; 860 861 info = kmalloc(PAGE_SIZE, GFP_KERNEL); 862 if (!info) 863 return -ENOMEM; 864 865 if (is_user) 866 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info); 867 else 868 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info); 869 if (ret) 870 goto err; 871 872 for (i = 0; i < retlen / sizeof(*info); i++) 873 size += info[i].length; 874 875 kfree(info); 876 return size; 877 878 err: 879 kfree(info); 880 881 /* ENODATA means there is no OTP region. */ 882 return ret == -ENODATA ? 0 : ret; 883 } 884 885 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, 886 const char *compatible, 887 int size, 888 nvmem_reg_read_t reg_read) 889 { 890 struct nvmem_device *nvmem = NULL; 891 struct nvmem_config config = {}; 892 struct device_node *np; 893 894 /* DT binding is optional */ 895 np = of_get_compatible_child(mtd->dev.of_node, compatible); 896 897 /* OTP nvmem will be registered on the physical device */ 898 config.dev = mtd->dev.parent; 899 config.name = compatible; 900 config.id = NVMEM_DEVID_AUTO; 901 config.owner = THIS_MODULE; 902 config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd); 903 config.type = NVMEM_TYPE_OTP; 904 config.root_only = true; 905 config.ignore_wp = true; 906 config.reg_read = reg_read; 907 config.size = size; 908 config.of_node = np; 909 config.priv = mtd; 910 911 nvmem = nvmem_register(&config); 912 /* Just ignore if there is no NVMEM support in the kernel */ 913 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP) 914 nvmem = NULL; 915 916 of_node_put(np); 917 918 return nvmem; 919 } 920 921 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, 922 void *val, size_t bytes) 923 { 924 struct mtd_info *mtd = priv; 925 size_t retlen; 926 int ret; 927 928 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val); 929 if (ret) 930 return ret; 931 932 return retlen == bytes ? 0 : -EIO; 933 } 934 935 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, 936 void *val, size_t bytes) 937 { 938 struct mtd_info *mtd = priv; 939 size_t retlen; 940 int ret; 941 942 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val); 943 if (ret) 944 return ret; 945 946 return retlen == bytes ? 0 : -EIO; 947 } 948 949 static int mtd_otp_nvmem_add(struct mtd_info *mtd) 950 { 951 struct device *dev = mtd->dev.parent; 952 struct nvmem_device *nvmem; 953 ssize_t size; 954 int err; 955 956 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { 957 size = mtd_otp_size(mtd, true); 958 if (size < 0) { 959 err = size; 960 goto err; 961 } 962 963 if (size > 0) { 964 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size, 965 mtd_nvmem_user_otp_reg_read); 966 if (IS_ERR(nvmem)) { 967 err = PTR_ERR(nvmem); 968 goto err; 969 } 970 mtd->otp_user_nvmem = nvmem; 971 } 972 } 973 974 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { 975 size = mtd_otp_size(mtd, false); 976 if (size < 0) { 977 err = size; 978 goto err; 979 } 980 981 if (size > 0) { 982 /* 983 * The factory OTP contains thing such as a unique serial 984 * number and is small, so let's read it out and put it 985 * into the entropy pool. 986 */ 987 void *otp; 988 989 otp = kmalloc(size, GFP_KERNEL); 990 if (!otp) { 991 err = -ENOMEM; 992 goto err; 993 } 994 err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size); 995 if (err < 0) { 996 kfree(otp); 997 goto err; 998 } 999 add_device_randomness(otp, err); 1000 kfree(otp); 1001 1002 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size, 1003 mtd_nvmem_fact_otp_reg_read); 1004 if (IS_ERR(nvmem)) { 1005 err = PTR_ERR(nvmem); 1006 goto err; 1007 } 1008 mtd->otp_factory_nvmem = nvmem; 1009 } 1010 } 1011 1012 return 0; 1013 1014 err: 1015 nvmem_unregister(mtd->otp_user_nvmem); 1016 return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n"); 1017 } 1018 1019 /** 1020 * mtd_device_parse_register - parse partitions and register an MTD device. 1021 * 1022 * @mtd: the MTD device to register 1023 * @types: the list of MTD partition probes to try, see 1024 * 'parse_mtd_partitions()' for more information 1025 * @parser_data: MTD partition parser-specific data 1026 * @parts: fallback partition information to register, if parsing fails; 1027 * only valid if %nr_parts > %0 1028 * @nr_parts: the number of partitions in parts, if zero then the full 1029 * MTD device is registered if no partition info is found 1030 * 1031 * This function aggregates MTD partitions parsing (done by 1032 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 1033 * basically follows the most common pattern found in many MTD drivers: 1034 * 1035 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is 1036 * registered first. 1037 * * Then It tries to probe partitions on MTD device @mtd using parsers 1038 * specified in @types (if @types is %NULL, then the default list of parsers 1039 * is used, see 'parse_mtd_partitions()' for more information). If none are 1040 * found this functions tries to fallback to information specified in 1041 * @parts/@nr_parts. 1042 * * If no partitions were found this function just registers the MTD device 1043 * @mtd and exits. 1044 * 1045 * Returns zero in case of success and a negative error code in case of failure. 1046 */ 1047 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 1048 struct mtd_part_parser_data *parser_data, 1049 const struct mtd_partition *parts, 1050 int nr_parts) 1051 { 1052 int ret; 1053 1054 mtd_set_dev_defaults(mtd); 1055 1056 ret = mtd_otp_nvmem_add(mtd); 1057 if (ret) 1058 goto out; 1059 1060 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 1061 ret = add_mtd_device(mtd); 1062 if (ret) 1063 goto out; 1064 } 1065 1066 /* Prefer parsed partitions over driver-provided fallback */ 1067 ret = parse_mtd_partitions(mtd, types, parser_data); 1068 if (ret == -EPROBE_DEFER) 1069 goto out; 1070 1071 if (ret > 0) 1072 ret = 0; 1073 else if (nr_parts) 1074 ret = add_mtd_partitions(mtd, parts, nr_parts); 1075 else if (!device_is_registered(&mtd->dev)) 1076 ret = add_mtd_device(mtd); 1077 else 1078 ret = 0; 1079 1080 if (ret) 1081 goto out; 1082 1083 /* 1084 * FIXME: some drivers unfortunately call this function more than once. 1085 * So we have to check if we've already assigned the reboot notifier. 1086 * 1087 * Generally, we can make multiple calls work for most cases, but it 1088 * does cause problems with parse_mtd_partitions() above (e.g., 1089 * cmdlineparts will register partitions more than once). 1090 */ 1091 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 1092 "MTD already registered\n"); 1093 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 1094 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 1095 register_reboot_notifier(&mtd->reboot_notifier); 1096 } 1097 1098 out: 1099 if (ret) { 1100 nvmem_unregister(mtd->otp_user_nvmem); 1101 nvmem_unregister(mtd->otp_factory_nvmem); 1102 } 1103 1104 if (ret && device_is_registered(&mtd->dev)) 1105 del_mtd_device(mtd); 1106 1107 return ret; 1108 } 1109 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 1110 1111 /** 1112 * mtd_device_unregister - unregister an existing MTD device. 1113 * 1114 * @master: the MTD device to unregister. This will unregister both the master 1115 * and any partitions if registered. 1116 */ 1117 int mtd_device_unregister(struct mtd_info *master) 1118 { 1119 int err; 1120 1121 if (master->_reboot) { 1122 unregister_reboot_notifier(&master->reboot_notifier); 1123 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier)); 1124 } 1125 1126 nvmem_unregister(master->otp_user_nvmem); 1127 nvmem_unregister(master->otp_factory_nvmem); 1128 1129 err = del_mtd_partitions(master); 1130 if (err) 1131 return err; 1132 1133 if (!device_is_registered(&master->dev)) 1134 return 0; 1135 1136 return del_mtd_device(master); 1137 } 1138 EXPORT_SYMBOL_GPL(mtd_device_unregister); 1139 1140 /** 1141 * register_mtd_user - register a 'user' of MTD devices. 1142 * @new: pointer to notifier info structure 1143 * 1144 * Registers a pair of callbacks function to be called upon addition 1145 * or removal of MTD devices. Causes the 'add' callback to be immediately 1146 * invoked for each MTD device currently present in the system. 1147 */ 1148 void register_mtd_user (struct mtd_notifier *new) 1149 { 1150 struct mtd_info *mtd; 1151 1152 mutex_lock(&mtd_table_mutex); 1153 1154 list_add(&new->list, &mtd_notifiers); 1155 1156 __module_get(THIS_MODULE); 1157 1158 mtd_for_each_device(mtd) 1159 new->add(mtd); 1160 1161 mutex_unlock(&mtd_table_mutex); 1162 } 1163 EXPORT_SYMBOL_GPL(register_mtd_user); 1164 1165 /** 1166 * unregister_mtd_user - unregister a 'user' of MTD devices. 1167 * @old: pointer to notifier info structure 1168 * 1169 * Removes a callback function pair from the list of 'users' to be 1170 * notified upon addition or removal of MTD devices. Causes the 1171 * 'remove' callback to be immediately invoked for each MTD device 1172 * currently present in the system. 1173 */ 1174 int unregister_mtd_user (struct mtd_notifier *old) 1175 { 1176 struct mtd_info *mtd; 1177 1178 mutex_lock(&mtd_table_mutex); 1179 1180 module_put(THIS_MODULE); 1181 1182 mtd_for_each_device(mtd) 1183 old->remove(mtd); 1184 1185 list_del(&old->list); 1186 mutex_unlock(&mtd_table_mutex); 1187 return 0; 1188 } 1189 EXPORT_SYMBOL_GPL(unregister_mtd_user); 1190 1191 /** 1192 * get_mtd_device - obtain a validated handle for an MTD device 1193 * @mtd: last known address of the required MTD device 1194 * @num: internal device number of the required MTD device 1195 * 1196 * Given a number and NULL address, return the num'th entry in the device 1197 * table, if any. Given an address and num == -1, search the device table 1198 * for a device with that address and return if it's still present. Given 1199 * both, return the num'th driver only if its address matches. Return 1200 * error code if not. 1201 */ 1202 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 1203 { 1204 struct mtd_info *ret = NULL, *other; 1205 int err = -ENODEV; 1206 1207 mutex_lock(&mtd_table_mutex); 1208 1209 if (num == -1) { 1210 mtd_for_each_device(other) { 1211 if (other == mtd) { 1212 ret = mtd; 1213 break; 1214 } 1215 } 1216 } else if (num >= 0) { 1217 ret = idr_find(&mtd_idr, num); 1218 if (mtd && mtd != ret) 1219 ret = NULL; 1220 } 1221 1222 if (!ret) { 1223 ret = ERR_PTR(err); 1224 goto out; 1225 } 1226 1227 err = __get_mtd_device(ret); 1228 if (err) 1229 ret = ERR_PTR(err); 1230 out: 1231 mutex_unlock(&mtd_table_mutex); 1232 return ret; 1233 } 1234 EXPORT_SYMBOL_GPL(get_mtd_device); 1235 1236 1237 int __get_mtd_device(struct mtd_info *mtd) 1238 { 1239 struct mtd_info *master = mtd_get_master(mtd); 1240 int err; 1241 1242 if (master->_get_device) { 1243 err = master->_get_device(mtd); 1244 if (err) 1245 return err; 1246 } 1247 1248 if (!try_module_get(master->owner)) { 1249 if (master->_put_device) 1250 master->_put_device(master); 1251 return -ENODEV; 1252 } 1253 1254 while (mtd) { 1255 if (mtd != master) 1256 kref_get(&mtd->refcnt); 1257 mtd = mtd->parent; 1258 } 1259 1260 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 1261 kref_get(&master->refcnt); 1262 1263 return 0; 1264 } 1265 EXPORT_SYMBOL_GPL(__get_mtd_device); 1266 1267 /** 1268 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node 1269 * 1270 * @np: device tree node 1271 */ 1272 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np) 1273 { 1274 struct mtd_info *mtd = NULL; 1275 struct mtd_info *tmp; 1276 int err; 1277 1278 mutex_lock(&mtd_table_mutex); 1279 1280 err = -EPROBE_DEFER; 1281 mtd_for_each_device(tmp) { 1282 if (mtd_get_of_node(tmp) == np) { 1283 mtd = tmp; 1284 err = __get_mtd_device(mtd); 1285 break; 1286 } 1287 } 1288 1289 mutex_unlock(&mtd_table_mutex); 1290 1291 return err ? ERR_PTR(err) : mtd; 1292 } 1293 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node); 1294 1295 /** 1296 * get_mtd_device_nm - obtain a validated handle for an MTD device by 1297 * device name 1298 * @name: MTD device name to open 1299 * 1300 * This function returns MTD device description structure in case of 1301 * success and an error code in case of failure. 1302 */ 1303 struct mtd_info *get_mtd_device_nm(const char *name) 1304 { 1305 int err = -ENODEV; 1306 struct mtd_info *mtd = NULL, *other; 1307 1308 mutex_lock(&mtd_table_mutex); 1309 1310 mtd_for_each_device(other) { 1311 if (!strcmp(name, other->name)) { 1312 mtd = other; 1313 break; 1314 } 1315 } 1316 1317 if (!mtd) 1318 goto out_unlock; 1319 1320 err = __get_mtd_device(mtd); 1321 if (err) 1322 goto out_unlock; 1323 1324 mutex_unlock(&mtd_table_mutex); 1325 return mtd; 1326 1327 out_unlock: 1328 mutex_unlock(&mtd_table_mutex); 1329 return ERR_PTR(err); 1330 } 1331 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 1332 1333 void put_mtd_device(struct mtd_info *mtd) 1334 { 1335 mutex_lock(&mtd_table_mutex); 1336 __put_mtd_device(mtd); 1337 mutex_unlock(&mtd_table_mutex); 1338 1339 } 1340 EXPORT_SYMBOL_GPL(put_mtd_device); 1341 1342 void __put_mtd_device(struct mtd_info *mtd) 1343 { 1344 struct mtd_info *master = mtd_get_master(mtd); 1345 1346 while (mtd) { 1347 /* kref_put() can relese mtd, so keep a reference mtd->parent */ 1348 struct mtd_info *parent = mtd->parent; 1349 1350 if (mtd != master) 1351 kref_put(&mtd->refcnt, mtd_device_release); 1352 mtd = parent; 1353 } 1354 1355 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 1356 kref_put(&master->refcnt, mtd_device_release); 1357 1358 module_put(master->owner); 1359 1360 /* must be the last as master can be freed in the _put_device */ 1361 if (master->_put_device) 1362 master->_put_device(master); 1363 } 1364 EXPORT_SYMBOL_GPL(__put_mtd_device); 1365 1366 /* 1367 * Erase is an synchronous operation. Device drivers are epected to return a 1368 * negative error code if the operation failed and update instr->fail_addr 1369 * to point the portion that was not properly erased. 1370 */ 1371 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1372 { 1373 struct mtd_info *master = mtd_get_master(mtd); 1374 u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1375 struct erase_info adjinstr; 1376 int ret; 1377 1378 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1379 adjinstr = *instr; 1380 1381 if (!mtd->erasesize || !master->_erase) 1382 return -ENOTSUPP; 1383 1384 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 1385 return -EINVAL; 1386 if (!(mtd->flags & MTD_WRITEABLE)) 1387 return -EROFS; 1388 1389 if (!instr->len) 1390 return 0; 1391 1392 ledtrig_mtd_activity(); 1393 1394 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1395 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * 1396 master->erasesize; 1397 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * 1398 master->erasesize) - 1399 adjinstr.addr; 1400 } 1401 1402 adjinstr.addr += mst_ofs; 1403 1404 ret = master->_erase(master, &adjinstr); 1405 1406 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { 1407 instr->fail_addr = adjinstr.fail_addr - mst_ofs; 1408 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1409 instr->fail_addr = mtd_div_by_eb(instr->fail_addr, 1410 master); 1411 instr->fail_addr *= mtd->erasesize; 1412 } 1413 } 1414 1415 return ret; 1416 } 1417 EXPORT_SYMBOL_GPL(mtd_erase); 1418 1419 /* 1420 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 1421 */ 1422 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1423 void **virt, resource_size_t *phys) 1424 { 1425 struct mtd_info *master = mtd_get_master(mtd); 1426 1427 *retlen = 0; 1428 *virt = NULL; 1429 if (phys) 1430 *phys = 0; 1431 if (!master->_point) 1432 return -EOPNOTSUPP; 1433 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1434 return -EINVAL; 1435 if (!len) 1436 return 0; 1437 1438 from = mtd_get_master_ofs(mtd, from); 1439 return master->_point(master, from, len, retlen, virt, phys); 1440 } 1441 EXPORT_SYMBOL_GPL(mtd_point); 1442 1443 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1444 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1445 { 1446 struct mtd_info *master = mtd_get_master(mtd); 1447 1448 if (!master->_unpoint) 1449 return -EOPNOTSUPP; 1450 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1451 return -EINVAL; 1452 if (!len) 1453 return 0; 1454 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1455 } 1456 EXPORT_SYMBOL_GPL(mtd_unpoint); 1457 1458 /* 1459 * Allow NOMMU mmap() to directly map the device (if not NULL) 1460 * - return the address to which the offset maps 1461 * - return -ENOSYS to indicate refusal to do the mapping 1462 */ 1463 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1464 unsigned long offset, unsigned long flags) 1465 { 1466 size_t retlen; 1467 void *virt; 1468 int ret; 1469 1470 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1471 if (ret) 1472 return ret; 1473 if (retlen != len) { 1474 mtd_unpoint(mtd, offset, retlen); 1475 return -ENOSYS; 1476 } 1477 return (unsigned long)virt; 1478 } 1479 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1480 1481 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1482 const struct mtd_ecc_stats *old_stats) 1483 { 1484 struct mtd_ecc_stats diff; 1485 1486 if (master == mtd) 1487 return; 1488 1489 diff = master->ecc_stats; 1490 diff.failed -= old_stats->failed; 1491 diff.corrected -= old_stats->corrected; 1492 1493 while (mtd->parent) { 1494 mtd->ecc_stats.failed += diff.failed; 1495 mtd->ecc_stats.corrected += diff.corrected; 1496 mtd = mtd->parent; 1497 } 1498 } 1499 1500 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1501 u_char *buf) 1502 { 1503 struct mtd_oob_ops ops = { 1504 .len = len, 1505 .datbuf = buf, 1506 }; 1507 int ret; 1508 1509 ret = mtd_read_oob(mtd, from, &ops); 1510 *retlen = ops.retlen; 1511 1512 return ret; 1513 } 1514 EXPORT_SYMBOL_GPL(mtd_read); 1515 1516 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1517 const u_char *buf) 1518 { 1519 struct mtd_oob_ops ops = { 1520 .len = len, 1521 .datbuf = (u8 *)buf, 1522 }; 1523 int ret; 1524 1525 ret = mtd_write_oob(mtd, to, &ops); 1526 *retlen = ops.retlen; 1527 1528 return ret; 1529 } 1530 EXPORT_SYMBOL_GPL(mtd_write); 1531 1532 /* 1533 * In blackbox flight recorder like scenarios we want to make successful writes 1534 * in interrupt context. panic_write() is only intended to be called when its 1535 * known the kernel is about to panic and we need the write to succeed. Since 1536 * the kernel is not going to be running for much longer, this function can 1537 * break locks and delay to ensure the write succeeds (but not sleep). 1538 */ 1539 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1540 const u_char *buf) 1541 { 1542 struct mtd_info *master = mtd_get_master(mtd); 1543 1544 *retlen = 0; 1545 if (!master->_panic_write) 1546 return -EOPNOTSUPP; 1547 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1548 return -EINVAL; 1549 if (!(mtd->flags & MTD_WRITEABLE)) 1550 return -EROFS; 1551 if (!len) 1552 return 0; 1553 if (!master->oops_panic_write) 1554 master->oops_panic_write = true; 1555 1556 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1557 retlen, buf); 1558 } 1559 EXPORT_SYMBOL_GPL(mtd_panic_write); 1560 1561 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1562 struct mtd_oob_ops *ops) 1563 { 1564 /* 1565 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1566 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1567 * this case. 1568 */ 1569 if (!ops->datbuf) 1570 ops->len = 0; 1571 1572 if (!ops->oobbuf) 1573 ops->ooblen = 0; 1574 1575 if (offs < 0 || offs + ops->len > mtd->size) 1576 return -EINVAL; 1577 1578 if (ops->ooblen) { 1579 size_t maxooblen; 1580 1581 if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1582 return -EINVAL; 1583 1584 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - 1585 mtd_div_by_ws(offs, mtd)) * 1586 mtd_oobavail(mtd, ops)) - ops->ooboffs; 1587 if (ops->ooblen > maxooblen) 1588 return -EINVAL; 1589 } 1590 1591 return 0; 1592 } 1593 1594 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, 1595 struct mtd_oob_ops *ops) 1596 { 1597 struct mtd_info *master = mtd_get_master(mtd); 1598 int ret; 1599 1600 from = mtd_get_master_ofs(mtd, from); 1601 if (master->_read_oob) 1602 ret = master->_read_oob(master, from, ops); 1603 else 1604 ret = master->_read(master, from, ops->len, &ops->retlen, 1605 ops->datbuf); 1606 1607 return ret; 1608 } 1609 1610 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, 1611 struct mtd_oob_ops *ops) 1612 { 1613 struct mtd_info *master = mtd_get_master(mtd); 1614 int ret; 1615 1616 to = mtd_get_master_ofs(mtd, to); 1617 if (master->_write_oob) 1618 ret = master->_write_oob(master, to, ops); 1619 else 1620 ret = master->_write(master, to, ops->len, &ops->retlen, 1621 ops->datbuf); 1622 1623 return ret; 1624 } 1625 1626 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, 1627 struct mtd_oob_ops *ops) 1628 { 1629 struct mtd_info *master = mtd_get_master(mtd); 1630 int ngroups = mtd_pairing_groups(master); 1631 int npairs = mtd_wunit_per_eb(master) / ngroups; 1632 struct mtd_oob_ops adjops = *ops; 1633 unsigned int wunit, oobavail; 1634 struct mtd_pairing_info info; 1635 int max_bitflips = 0; 1636 u32 ebofs, pageofs; 1637 loff_t base, pos; 1638 1639 ebofs = mtd_mod_by_eb(start, mtd); 1640 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; 1641 info.group = 0; 1642 info.pair = mtd_div_by_ws(ebofs, mtd); 1643 pageofs = mtd_mod_by_ws(ebofs, mtd); 1644 oobavail = mtd_oobavail(mtd, ops); 1645 1646 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { 1647 int ret; 1648 1649 if (info.pair >= npairs) { 1650 info.pair = 0; 1651 base += master->erasesize; 1652 } 1653 1654 wunit = mtd_pairing_info_to_wunit(master, &info); 1655 pos = mtd_wunit_to_offset(mtd, base, wunit); 1656 1657 adjops.len = ops->len - ops->retlen; 1658 if (adjops.len > mtd->writesize - pageofs) 1659 adjops.len = mtd->writesize - pageofs; 1660 1661 adjops.ooblen = ops->ooblen - ops->oobretlen; 1662 if (adjops.ooblen > oobavail - adjops.ooboffs) 1663 adjops.ooblen = oobavail - adjops.ooboffs; 1664 1665 if (read) { 1666 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); 1667 if (ret > 0) 1668 max_bitflips = max(max_bitflips, ret); 1669 } else { 1670 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); 1671 } 1672 1673 if (ret < 0) 1674 return ret; 1675 1676 max_bitflips = max(max_bitflips, ret); 1677 ops->retlen += adjops.retlen; 1678 ops->oobretlen += adjops.oobretlen; 1679 adjops.datbuf += adjops.retlen; 1680 adjops.oobbuf += adjops.oobretlen; 1681 adjops.ooboffs = 0; 1682 pageofs = 0; 1683 info.pair++; 1684 } 1685 1686 return max_bitflips; 1687 } 1688 1689 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1690 { 1691 struct mtd_info *master = mtd_get_master(mtd); 1692 struct mtd_ecc_stats old_stats = master->ecc_stats; 1693 int ret_code; 1694 1695 ops->retlen = ops->oobretlen = 0; 1696 1697 ret_code = mtd_check_oob_ops(mtd, from, ops); 1698 if (ret_code) 1699 return ret_code; 1700 1701 ledtrig_mtd_activity(); 1702 1703 /* Check the validity of a potential fallback on mtd->_read */ 1704 if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1705 return -EOPNOTSUPP; 1706 1707 if (ops->stats) 1708 memset(ops->stats, 0, sizeof(*ops->stats)); 1709 1710 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1711 ret_code = mtd_io_emulated_slc(mtd, from, true, ops); 1712 else 1713 ret_code = mtd_read_oob_std(mtd, from, ops); 1714 1715 mtd_update_ecc_stats(mtd, master, &old_stats); 1716 1717 /* 1718 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 1719 * similar to mtd->_read(), returning a non-negative integer 1720 * representing max bitflips. In other cases, mtd->_read_oob() may 1721 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 1722 */ 1723 if (unlikely(ret_code < 0)) 1724 return ret_code; 1725 if (mtd->ecc_strength == 0) 1726 return 0; /* device lacks ecc */ 1727 if (ops->stats) 1728 ops->stats->max_bitflips = ret_code; 1729 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 1730 } 1731 EXPORT_SYMBOL_GPL(mtd_read_oob); 1732 1733 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1734 struct mtd_oob_ops *ops) 1735 { 1736 struct mtd_info *master = mtd_get_master(mtd); 1737 int ret; 1738 1739 ops->retlen = ops->oobretlen = 0; 1740 1741 if (!(mtd->flags & MTD_WRITEABLE)) 1742 return -EROFS; 1743 1744 ret = mtd_check_oob_ops(mtd, to, ops); 1745 if (ret) 1746 return ret; 1747 1748 ledtrig_mtd_activity(); 1749 1750 /* Check the validity of a potential fallback on mtd->_write */ 1751 if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1752 return -EOPNOTSUPP; 1753 1754 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1755 return mtd_io_emulated_slc(mtd, to, false, ops); 1756 1757 return mtd_write_oob_std(mtd, to, ops); 1758 } 1759 EXPORT_SYMBOL_GPL(mtd_write_oob); 1760 1761 /** 1762 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1763 * @mtd: MTD device structure 1764 * @section: ECC section. Depending on the layout you may have all the ECC 1765 * bytes stored in a single contiguous section, or one section 1766 * per ECC chunk (and sometime several sections for a single ECC 1767 * ECC chunk) 1768 * @oobecc: OOB region struct filled with the appropriate ECC position 1769 * information 1770 * 1771 * This function returns ECC section information in the OOB area. If you want 1772 * to get all the ECC bytes information, then you should call 1773 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1774 * 1775 * Returns zero on success, a negative error code otherwise. 1776 */ 1777 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1778 struct mtd_oob_region *oobecc) 1779 { 1780 struct mtd_info *master = mtd_get_master(mtd); 1781 1782 memset(oobecc, 0, sizeof(*oobecc)); 1783 1784 if (!master || section < 0) 1785 return -EINVAL; 1786 1787 if (!master->ooblayout || !master->ooblayout->ecc) 1788 return -ENOTSUPP; 1789 1790 return master->ooblayout->ecc(master, section, oobecc); 1791 } 1792 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1793 1794 /** 1795 * mtd_ooblayout_free - Get the OOB region definition of a specific free 1796 * section 1797 * @mtd: MTD device structure 1798 * @section: Free section you are interested in. Depending on the layout 1799 * you may have all the free bytes stored in a single contiguous 1800 * section, or one section per ECC chunk plus an extra section 1801 * for the remaining bytes (or other funky layout). 1802 * @oobfree: OOB region struct filled with the appropriate free position 1803 * information 1804 * 1805 * This function returns free bytes position in the OOB area. If you want 1806 * to get all the free bytes information, then you should call 1807 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1808 * 1809 * Returns zero on success, a negative error code otherwise. 1810 */ 1811 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1812 struct mtd_oob_region *oobfree) 1813 { 1814 struct mtd_info *master = mtd_get_master(mtd); 1815 1816 memset(oobfree, 0, sizeof(*oobfree)); 1817 1818 if (!master || section < 0) 1819 return -EINVAL; 1820 1821 if (!master->ooblayout || !master->ooblayout->free) 1822 return -ENOTSUPP; 1823 1824 return master->ooblayout->free(master, section, oobfree); 1825 } 1826 EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1827 1828 /** 1829 * mtd_ooblayout_find_region - Find the region attached to a specific byte 1830 * @mtd: mtd info structure 1831 * @byte: the byte we are searching for 1832 * @sectionp: pointer where the section id will be stored 1833 * @oobregion: used to retrieve the ECC position 1834 * @iter: iterator function. Should be either mtd_ooblayout_free or 1835 * mtd_ooblayout_ecc depending on the region type you're searching for 1836 * 1837 * This function returns the section id and oobregion information of a 1838 * specific byte. For example, say you want to know where the 4th ECC byte is 1839 * stored, you'll use: 1840 * 1841 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); 1842 * 1843 * Returns zero on success, a negative error code otherwise. 1844 */ 1845 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1846 int *sectionp, struct mtd_oob_region *oobregion, 1847 int (*iter)(struct mtd_info *, 1848 int section, 1849 struct mtd_oob_region *oobregion)) 1850 { 1851 int pos = 0, ret, section = 0; 1852 1853 memset(oobregion, 0, sizeof(*oobregion)); 1854 1855 while (1) { 1856 ret = iter(mtd, section, oobregion); 1857 if (ret) 1858 return ret; 1859 1860 if (pos + oobregion->length > byte) 1861 break; 1862 1863 pos += oobregion->length; 1864 section++; 1865 } 1866 1867 /* 1868 * Adjust region info to make it start at the beginning at the 1869 * 'start' ECC byte. 1870 */ 1871 oobregion->offset += byte - pos; 1872 oobregion->length -= byte - pos; 1873 *sectionp = section; 1874 1875 return 0; 1876 } 1877 1878 /** 1879 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1880 * ECC byte 1881 * @mtd: mtd info structure 1882 * @eccbyte: the byte we are searching for 1883 * @section: pointer where the section id will be stored 1884 * @oobregion: OOB region information 1885 * 1886 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1887 * byte. 1888 * 1889 * Returns zero on success, a negative error code otherwise. 1890 */ 1891 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1892 int *section, 1893 struct mtd_oob_region *oobregion) 1894 { 1895 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1896 mtd_ooblayout_ecc); 1897 } 1898 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1899 1900 /** 1901 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1902 * @mtd: mtd info structure 1903 * @buf: destination buffer to store OOB bytes 1904 * @oobbuf: OOB buffer 1905 * @start: first byte to retrieve 1906 * @nbytes: number of bytes to retrieve 1907 * @iter: section iterator 1908 * 1909 * Extract bytes attached to a specific category (ECC or free) 1910 * from the OOB buffer and copy them into buf. 1911 * 1912 * Returns zero on success, a negative error code otherwise. 1913 */ 1914 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1915 const u8 *oobbuf, int start, int nbytes, 1916 int (*iter)(struct mtd_info *, 1917 int section, 1918 struct mtd_oob_region *oobregion)) 1919 { 1920 struct mtd_oob_region oobregion; 1921 int section, ret; 1922 1923 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1924 &oobregion, iter); 1925 1926 while (!ret) { 1927 int cnt; 1928 1929 cnt = min_t(int, nbytes, oobregion.length); 1930 memcpy(buf, oobbuf + oobregion.offset, cnt); 1931 buf += cnt; 1932 nbytes -= cnt; 1933 1934 if (!nbytes) 1935 break; 1936 1937 ret = iter(mtd, ++section, &oobregion); 1938 } 1939 1940 return ret; 1941 } 1942 1943 /** 1944 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1945 * @mtd: mtd info structure 1946 * @buf: source buffer to get OOB bytes from 1947 * @oobbuf: OOB buffer 1948 * @start: first OOB byte to set 1949 * @nbytes: number of OOB bytes to set 1950 * @iter: section iterator 1951 * 1952 * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1953 * is selected by passing the appropriate iterator. 1954 * 1955 * Returns zero on success, a negative error code otherwise. 1956 */ 1957 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1958 u8 *oobbuf, int start, int nbytes, 1959 int (*iter)(struct mtd_info *, 1960 int section, 1961 struct mtd_oob_region *oobregion)) 1962 { 1963 struct mtd_oob_region oobregion; 1964 int section, ret; 1965 1966 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1967 &oobregion, iter); 1968 1969 while (!ret) { 1970 int cnt; 1971 1972 cnt = min_t(int, nbytes, oobregion.length); 1973 memcpy(oobbuf + oobregion.offset, buf, cnt); 1974 buf += cnt; 1975 nbytes -= cnt; 1976 1977 if (!nbytes) 1978 break; 1979 1980 ret = iter(mtd, ++section, &oobregion); 1981 } 1982 1983 return ret; 1984 } 1985 1986 /** 1987 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1988 * @mtd: mtd info structure 1989 * @iter: category iterator 1990 * 1991 * Count the number of bytes in a given category. 1992 * 1993 * Returns a positive value on success, a negative error code otherwise. 1994 */ 1995 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1996 int (*iter)(struct mtd_info *, 1997 int section, 1998 struct mtd_oob_region *oobregion)) 1999 { 2000 struct mtd_oob_region oobregion; 2001 int section = 0, ret, nbytes = 0; 2002 2003 while (1) { 2004 ret = iter(mtd, section++, &oobregion); 2005 if (ret) { 2006 if (ret == -ERANGE) 2007 ret = nbytes; 2008 break; 2009 } 2010 2011 nbytes += oobregion.length; 2012 } 2013 2014 return ret; 2015 } 2016 2017 /** 2018 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 2019 * @mtd: mtd info structure 2020 * @eccbuf: destination buffer to store ECC bytes 2021 * @oobbuf: OOB buffer 2022 * @start: first ECC byte to retrieve 2023 * @nbytes: number of ECC bytes to retrieve 2024 * 2025 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 2026 * 2027 * Returns zero on success, a negative error code otherwise. 2028 */ 2029 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 2030 const u8 *oobbuf, int start, int nbytes) 2031 { 2032 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 2033 mtd_ooblayout_ecc); 2034 } 2035 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 2036 2037 /** 2038 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 2039 * @mtd: mtd info structure 2040 * @eccbuf: source buffer to get ECC bytes from 2041 * @oobbuf: OOB buffer 2042 * @start: first ECC byte to set 2043 * @nbytes: number of ECC bytes to set 2044 * 2045 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 2046 * 2047 * Returns zero on success, a negative error code otherwise. 2048 */ 2049 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 2050 u8 *oobbuf, int start, int nbytes) 2051 { 2052 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 2053 mtd_ooblayout_ecc); 2054 } 2055 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 2056 2057 /** 2058 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 2059 * @mtd: mtd info structure 2060 * @databuf: destination buffer to store ECC bytes 2061 * @oobbuf: OOB buffer 2062 * @start: first ECC byte to retrieve 2063 * @nbytes: number of ECC bytes to retrieve 2064 * 2065 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 2066 * 2067 * Returns zero on success, a negative error code otherwise. 2068 */ 2069 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 2070 const u8 *oobbuf, int start, int nbytes) 2071 { 2072 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 2073 mtd_ooblayout_free); 2074 } 2075 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 2076 2077 /** 2078 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer 2079 * @mtd: mtd info structure 2080 * @databuf: source buffer to get data bytes from 2081 * @oobbuf: OOB buffer 2082 * @start: first ECC byte to set 2083 * @nbytes: number of ECC bytes to set 2084 * 2085 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. 2086 * 2087 * Returns zero on success, a negative error code otherwise. 2088 */ 2089 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 2090 u8 *oobbuf, int start, int nbytes) 2091 { 2092 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 2093 mtd_ooblayout_free); 2094 } 2095 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 2096 2097 /** 2098 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 2099 * @mtd: mtd info structure 2100 * 2101 * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 2102 * 2103 * Returns zero on success, a negative error code otherwise. 2104 */ 2105 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 2106 { 2107 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 2108 } 2109 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 2110 2111 /** 2112 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB 2113 * @mtd: mtd info structure 2114 * 2115 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 2116 * 2117 * Returns zero on success, a negative error code otherwise. 2118 */ 2119 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 2120 { 2121 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 2122 } 2123 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 2124 2125 /* 2126 * Method to access the protection register area, present in some flash 2127 * devices. The user data is one time programmable but the factory data is read 2128 * only. 2129 */ 2130 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 2131 struct otp_info *buf) 2132 { 2133 struct mtd_info *master = mtd_get_master(mtd); 2134 2135 if (!master->_get_fact_prot_info) 2136 return -EOPNOTSUPP; 2137 if (!len) 2138 return 0; 2139 return master->_get_fact_prot_info(master, len, retlen, buf); 2140 } 2141 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 2142 2143 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 2144 size_t *retlen, u_char *buf) 2145 { 2146 struct mtd_info *master = mtd_get_master(mtd); 2147 2148 *retlen = 0; 2149 if (!master->_read_fact_prot_reg) 2150 return -EOPNOTSUPP; 2151 if (!len) 2152 return 0; 2153 return master->_read_fact_prot_reg(master, from, len, retlen, buf); 2154 } 2155 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 2156 2157 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 2158 struct otp_info *buf) 2159 { 2160 struct mtd_info *master = mtd_get_master(mtd); 2161 2162 if (!master->_get_user_prot_info) 2163 return -EOPNOTSUPP; 2164 if (!len) 2165 return 0; 2166 return master->_get_user_prot_info(master, len, retlen, buf); 2167 } 2168 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 2169 2170 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 2171 size_t *retlen, u_char *buf) 2172 { 2173 struct mtd_info *master = mtd_get_master(mtd); 2174 2175 *retlen = 0; 2176 if (!master->_read_user_prot_reg) 2177 return -EOPNOTSUPP; 2178 if (!len) 2179 return 0; 2180 return master->_read_user_prot_reg(master, from, len, retlen, buf); 2181 } 2182 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 2183 2184 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 2185 size_t *retlen, const u_char *buf) 2186 { 2187 struct mtd_info *master = mtd_get_master(mtd); 2188 int ret; 2189 2190 *retlen = 0; 2191 if (!master->_write_user_prot_reg) 2192 return -EOPNOTSUPP; 2193 if (!len) 2194 return 0; 2195 ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 2196 if (ret) 2197 return ret; 2198 2199 /* 2200 * If no data could be written at all, we are out of memory and 2201 * must return -ENOSPC. 2202 */ 2203 return (*retlen) ? 0 : -ENOSPC; 2204 } 2205 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 2206 2207 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 2208 { 2209 struct mtd_info *master = mtd_get_master(mtd); 2210 2211 if (!master->_lock_user_prot_reg) 2212 return -EOPNOTSUPP; 2213 if (!len) 2214 return 0; 2215 return master->_lock_user_prot_reg(master, from, len); 2216 } 2217 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 2218 2219 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 2220 { 2221 struct mtd_info *master = mtd_get_master(mtd); 2222 2223 if (!master->_erase_user_prot_reg) 2224 return -EOPNOTSUPP; 2225 if (!len) 2226 return 0; 2227 return master->_erase_user_prot_reg(master, from, len); 2228 } 2229 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); 2230 2231 /* Chip-supported device locking */ 2232 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2233 { 2234 struct mtd_info *master = mtd_get_master(mtd); 2235 2236 if (!master->_lock) 2237 return -EOPNOTSUPP; 2238 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2239 return -EINVAL; 2240 if (!len) 2241 return 0; 2242 2243 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2244 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2245 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2246 } 2247 2248 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 2249 } 2250 EXPORT_SYMBOL_GPL(mtd_lock); 2251 2252 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2253 { 2254 struct mtd_info *master = mtd_get_master(mtd); 2255 2256 if (!master->_unlock) 2257 return -EOPNOTSUPP; 2258 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2259 return -EINVAL; 2260 if (!len) 2261 return 0; 2262 2263 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2264 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2265 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2266 } 2267 2268 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 2269 } 2270 EXPORT_SYMBOL_GPL(mtd_unlock); 2271 2272 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2273 { 2274 struct mtd_info *master = mtd_get_master(mtd); 2275 2276 if (!master->_is_locked) 2277 return -EOPNOTSUPP; 2278 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2279 return -EINVAL; 2280 if (!len) 2281 return 0; 2282 2283 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2284 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2285 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2286 } 2287 2288 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 2289 } 2290 EXPORT_SYMBOL_GPL(mtd_is_locked); 2291 2292 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 2293 { 2294 struct mtd_info *master = mtd_get_master(mtd); 2295 2296 if (ofs < 0 || ofs >= mtd->size) 2297 return -EINVAL; 2298 if (!master->_block_isreserved) 2299 return 0; 2300 2301 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2302 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2303 2304 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 2305 } 2306 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 2307 2308 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 2309 { 2310 struct mtd_info *master = mtd_get_master(mtd); 2311 2312 if (ofs < 0 || ofs >= mtd->size) 2313 return -EINVAL; 2314 if (!master->_block_isbad) 2315 return 0; 2316 2317 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2318 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2319 2320 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 2321 } 2322 EXPORT_SYMBOL_GPL(mtd_block_isbad); 2323 2324 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 2325 { 2326 struct mtd_info *master = mtd_get_master(mtd); 2327 int ret; 2328 2329 if (!master->_block_markbad) 2330 return -EOPNOTSUPP; 2331 if (ofs < 0 || ofs >= mtd->size) 2332 return -EINVAL; 2333 if (!(mtd->flags & MTD_WRITEABLE)) 2334 return -EROFS; 2335 2336 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2337 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2338 2339 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 2340 if (ret) 2341 return ret; 2342 2343 while (mtd->parent) { 2344 mtd->ecc_stats.badblocks++; 2345 mtd = mtd->parent; 2346 } 2347 2348 return 0; 2349 } 2350 EXPORT_SYMBOL_GPL(mtd_block_markbad); 2351 2352 /* 2353 * default_mtd_writev - the default writev method 2354 * @mtd: mtd device description object pointer 2355 * @vecs: the vectors to write 2356 * @count: count of vectors in @vecs 2357 * @to: the MTD device offset to write to 2358 * @retlen: on exit contains the count of bytes written to the MTD device. 2359 * 2360 * This function returns zero in case of success and a negative error code in 2361 * case of failure. 2362 */ 2363 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2364 unsigned long count, loff_t to, size_t *retlen) 2365 { 2366 unsigned long i; 2367 size_t totlen = 0, thislen; 2368 int ret = 0; 2369 2370 for (i = 0; i < count; i++) { 2371 if (!vecs[i].iov_len) 2372 continue; 2373 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 2374 vecs[i].iov_base); 2375 totlen += thislen; 2376 if (ret || thislen != vecs[i].iov_len) 2377 break; 2378 to += vecs[i].iov_len; 2379 } 2380 *retlen = totlen; 2381 return ret; 2382 } 2383 2384 /* 2385 * mtd_writev - the vector-based MTD write method 2386 * @mtd: mtd device description object pointer 2387 * @vecs: the vectors to write 2388 * @count: count of vectors in @vecs 2389 * @to: the MTD device offset to write to 2390 * @retlen: on exit contains the count of bytes written to the MTD device. 2391 * 2392 * This function returns zero in case of success and a negative error code in 2393 * case of failure. 2394 */ 2395 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2396 unsigned long count, loff_t to, size_t *retlen) 2397 { 2398 struct mtd_info *master = mtd_get_master(mtd); 2399 2400 *retlen = 0; 2401 if (!(mtd->flags & MTD_WRITEABLE)) 2402 return -EROFS; 2403 2404 if (!master->_writev) 2405 return default_mtd_writev(mtd, vecs, count, to, retlen); 2406 2407 return master->_writev(master, vecs, count, 2408 mtd_get_master_ofs(mtd, to), retlen); 2409 } 2410 EXPORT_SYMBOL_GPL(mtd_writev); 2411 2412 /** 2413 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 2414 * @mtd: mtd device description object pointer 2415 * @size: a pointer to the ideal or maximum size of the allocation, points 2416 * to the actual allocation size on success. 2417 * 2418 * This routine attempts to allocate a contiguous kernel buffer up to 2419 * the specified size, backing off the size of the request exponentially 2420 * until the request succeeds or until the allocation size falls below 2421 * the system page size. This attempts to make sure it does not adversely 2422 * impact system performance, so when allocating more than one page, we 2423 * ask the memory allocator to avoid re-trying, swapping, writing back 2424 * or performing I/O. 2425 * 2426 * Note, this function also makes sure that the allocated buffer is aligned to 2427 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 2428 * 2429 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 2430 * to handle smaller (i.e. degraded) buffer allocations under low- or 2431 * fragmented-memory situations where such reduced allocations, from a 2432 * requested ideal, are allowed. 2433 * 2434 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 2435 */ 2436 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 2437 { 2438 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 2439 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 2440 void *kbuf; 2441 2442 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 2443 2444 while (*size > min_alloc) { 2445 kbuf = kmalloc(*size, flags); 2446 if (kbuf) 2447 return kbuf; 2448 2449 *size >>= 1; 2450 *size = ALIGN(*size, mtd->writesize); 2451 } 2452 2453 /* 2454 * For the last resort allocation allow 'kmalloc()' to do all sorts of 2455 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 2456 */ 2457 return kmalloc(*size, GFP_KERNEL); 2458 } 2459 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 2460 2461 #ifdef CONFIG_PROC_FS 2462 2463 /*====================================================================*/ 2464 /* Support for /proc/mtd */ 2465 2466 static int mtd_proc_show(struct seq_file *m, void *v) 2467 { 2468 struct mtd_info *mtd; 2469 2470 seq_puts(m, "dev: size erasesize name\n"); 2471 mutex_lock(&mtd_table_mutex); 2472 mtd_for_each_device(mtd) { 2473 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 2474 mtd->index, (unsigned long long)mtd->size, 2475 mtd->erasesize, mtd->name); 2476 } 2477 mutex_unlock(&mtd_table_mutex); 2478 return 0; 2479 } 2480 #endif /* CONFIG_PROC_FS */ 2481 2482 /*====================================================================*/ 2483 /* Init code */ 2484 2485 static struct backing_dev_info * __init mtd_bdi_init(const char *name) 2486 { 2487 struct backing_dev_info *bdi; 2488 int ret; 2489 2490 bdi = bdi_alloc(NUMA_NO_NODE); 2491 if (!bdi) 2492 return ERR_PTR(-ENOMEM); 2493 bdi->ra_pages = 0; 2494 bdi->io_pages = 0; 2495 2496 /* 2497 * We put '-0' suffix to the name to get the same name format as we 2498 * used to get. Since this is called only once, we get a unique name. 2499 */ 2500 ret = bdi_register(bdi, "%.28s-0", name); 2501 if (ret) 2502 bdi_put(bdi); 2503 2504 return ret ? ERR_PTR(ret) : bdi; 2505 } 2506 2507 static struct proc_dir_entry *proc_mtd; 2508 2509 static int __init init_mtd(void) 2510 { 2511 int ret; 2512 2513 ret = class_register(&mtd_class); 2514 if (ret) 2515 goto err_reg; 2516 2517 mtd_bdi = mtd_bdi_init("mtd"); 2518 if (IS_ERR(mtd_bdi)) { 2519 ret = PTR_ERR(mtd_bdi); 2520 goto err_bdi; 2521 } 2522 2523 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); 2524 2525 ret = init_mtdchar(); 2526 if (ret) 2527 goto out_procfs; 2528 2529 dfs_dir_mtd = debugfs_create_dir("mtd", NULL); 2530 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd, 2531 &mtd_expert_analysis_mode); 2532 2533 return 0; 2534 2535 out_procfs: 2536 if (proc_mtd) 2537 remove_proc_entry("mtd", NULL); 2538 bdi_unregister(mtd_bdi); 2539 bdi_put(mtd_bdi); 2540 err_bdi: 2541 class_unregister(&mtd_class); 2542 err_reg: 2543 pr_err("Error registering mtd class or bdi: %d\n", ret); 2544 return ret; 2545 } 2546 2547 static void __exit cleanup_mtd(void) 2548 { 2549 debugfs_remove_recursive(dfs_dir_mtd); 2550 cleanup_mtdchar(); 2551 if (proc_mtd) 2552 remove_proc_entry("mtd", NULL); 2553 class_unregister(&mtd_class); 2554 bdi_unregister(mtd_bdi); 2555 bdi_put(mtd_bdi); 2556 idr_destroy(&mtd_idr); 2557 } 2558 2559 module_init(init_mtd); 2560 module_exit(cleanup_mtd); 2561 2562 MODULE_LICENSE("GPL"); 2563 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 2564 MODULE_DESCRIPTION("Core MTD registration and access routines"); 2565