1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Core registration and callback routines for MTD 4 * drivers and users. 5 * 6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 7 * Copyright © 2006 Red Hat UK Limited 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/ptrace.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/major.h> 17 #include <linux/fs.h> 18 #include <linux/err.h> 19 #include <linux/ioctl.h> 20 #include <linux/init.h> 21 #include <linux/of.h> 22 #include <linux/proc_fs.h> 23 #include <linux/idr.h> 24 #include <linux/backing-dev.h> 25 #include <linux/gfp.h> 26 #include <linux/slab.h> 27 #include <linux/reboot.h> 28 #include <linux/leds.h> 29 #include <linux/debugfs.h> 30 #include <linux/nvmem-provider.h> 31 32 #include <linux/mtd/mtd.h> 33 #include <linux/mtd/partitions.h> 34 35 #include "mtdcore.h" 36 37 struct backing_dev_info *mtd_bdi; 38 39 #ifdef CONFIG_PM_SLEEP 40 41 static int mtd_cls_suspend(struct device *dev) 42 { 43 struct mtd_info *mtd = dev_get_drvdata(dev); 44 45 return mtd ? mtd_suspend(mtd) : 0; 46 } 47 48 static int mtd_cls_resume(struct device *dev) 49 { 50 struct mtd_info *mtd = dev_get_drvdata(dev); 51 52 if (mtd) 53 mtd_resume(mtd); 54 return 0; 55 } 56 57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 59 #else 60 #define MTD_CLS_PM_OPS NULL 61 #endif 62 63 static struct class mtd_class = { 64 .name = "mtd", 65 .owner = THIS_MODULE, 66 .pm = MTD_CLS_PM_OPS, 67 }; 68 69 static DEFINE_IDR(mtd_idr); 70 71 /* These are exported solely for the purpose of mtd_blkdevs.c. You 72 should not use them for _anything_ else */ 73 DEFINE_MUTEX(mtd_table_mutex); 74 EXPORT_SYMBOL_GPL(mtd_table_mutex); 75 76 struct mtd_info *__mtd_next_device(int i) 77 { 78 return idr_get_next(&mtd_idr, &i); 79 } 80 EXPORT_SYMBOL_GPL(__mtd_next_device); 81 82 static LIST_HEAD(mtd_notifiers); 83 84 85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 86 87 /* REVISIT once MTD uses the driver model better, whoever allocates 88 * the mtd_info will probably want to use the release() hook... 89 */ 90 static void mtd_release(struct device *dev) 91 { 92 struct mtd_info *mtd = dev_get_drvdata(dev); 93 dev_t index = MTD_DEVT(mtd->index); 94 95 /* remove /dev/mtdXro node */ 96 device_destroy(&mtd_class, index + 1); 97 } 98 99 static ssize_t mtd_type_show(struct device *dev, 100 struct device_attribute *attr, char *buf) 101 { 102 struct mtd_info *mtd = dev_get_drvdata(dev); 103 char *type; 104 105 switch (mtd->type) { 106 case MTD_ABSENT: 107 type = "absent"; 108 break; 109 case MTD_RAM: 110 type = "ram"; 111 break; 112 case MTD_ROM: 113 type = "rom"; 114 break; 115 case MTD_NORFLASH: 116 type = "nor"; 117 break; 118 case MTD_NANDFLASH: 119 type = "nand"; 120 break; 121 case MTD_DATAFLASH: 122 type = "dataflash"; 123 break; 124 case MTD_UBIVOLUME: 125 type = "ubi"; 126 break; 127 case MTD_MLCNANDFLASH: 128 type = "mlc-nand"; 129 break; 130 default: 131 type = "unknown"; 132 } 133 134 return snprintf(buf, PAGE_SIZE, "%s\n", type); 135 } 136 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 137 138 static ssize_t mtd_flags_show(struct device *dev, 139 struct device_attribute *attr, char *buf) 140 { 141 struct mtd_info *mtd = dev_get_drvdata(dev); 142 143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 144 } 145 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 146 147 static ssize_t mtd_size_show(struct device *dev, 148 struct device_attribute *attr, char *buf) 149 { 150 struct mtd_info *mtd = dev_get_drvdata(dev); 151 152 return snprintf(buf, PAGE_SIZE, "%llu\n", 153 (unsigned long long)mtd->size); 154 } 155 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 156 157 static ssize_t mtd_erasesize_show(struct device *dev, 158 struct device_attribute *attr, char *buf) 159 { 160 struct mtd_info *mtd = dev_get_drvdata(dev); 161 162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 163 } 164 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 165 166 static ssize_t mtd_writesize_show(struct device *dev, 167 struct device_attribute *attr, char *buf) 168 { 169 struct mtd_info *mtd = dev_get_drvdata(dev); 170 171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 172 } 173 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 174 175 static ssize_t mtd_subpagesize_show(struct device *dev, 176 struct device_attribute *attr, char *buf) 177 { 178 struct mtd_info *mtd = dev_get_drvdata(dev); 179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 180 181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 182 } 183 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 184 185 static ssize_t mtd_oobsize_show(struct device *dev, 186 struct device_attribute *attr, char *buf) 187 { 188 struct mtd_info *mtd = dev_get_drvdata(dev); 189 190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 191 } 192 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 193 194 static ssize_t mtd_oobavail_show(struct device *dev, 195 struct device_attribute *attr, char *buf) 196 { 197 struct mtd_info *mtd = dev_get_drvdata(dev); 198 199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail); 200 } 201 static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL); 202 203 static ssize_t mtd_numeraseregions_show(struct device *dev, 204 struct device_attribute *attr, char *buf) 205 { 206 struct mtd_info *mtd = dev_get_drvdata(dev); 207 208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 209 } 210 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 211 NULL); 212 213 static ssize_t mtd_name_show(struct device *dev, 214 struct device_attribute *attr, char *buf) 215 { 216 struct mtd_info *mtd = dev_get_drvdata(dev); 217 218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 219 } 220 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 221 222 static ssize_t mtd_ecc_strength_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct mtd_info *mtd = dev_get_drvdata(dev); 226 227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 228 } 229 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 230 231 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 232 struct device_attribute *attr, 233 char *buf) 234 { 235 struct mtd_info *mtd = dev_get_drvdata(dev); 236 237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 238 } 239 240 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 241 struct device_attribute *attr, 242 const char *buf, size_t count) 243 { 244 struct mtd_info *mtd = dev_get_drvdata(dev); 245 unsigned int bitflip_threshold; 246 int retval; 247 248 retval = kstrtouint(buf, 0, &bitflip_threshold); 249 if (retval) 250 return retval; 251 252 mtd->bitflip_threshold = bitflip_threshold; 253 return count; 254 } 255 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 256 mtd_bitflip_threshold_show, 257 mtd_bitflip_threshold_store); 258 259 static ssize_t mtd_ecc_step_size_show(struct device *dev, 260 struct device_attribute *attr, char *buf) 261 { 262 struct mtd_info *mtd = dev_get_drvdata(dev); 263 264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); 265 266 } 267 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); 268 269 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, 270 struct device_attribute *attr, char *buf) 271 { 272 struct mtd_info *mtd = dev_get_drvdata(dev); 273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 274 275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); 276 } 277 static DEVICE_ATTR(corrected_bits, S_IRUGO, 278 mtd_ecc_stats_corrected_show, NULL); 279 280 static ssize_t mtd_ecc_stats_errors_show(struct device *dev, 281 struct device_attribute *attr, char *buf) 282 { 283 struct mtd_info *mtd = dev_get_drvdata(dev); 284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 285 286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); 287 } 288 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); 289 290 static ssize_t mtd_badblocks_show(struct device *dev, 291 struct device_attribute *attr, char *buf) 292 { 293 struct mtd_info *mtd = dev_get_drvdata(dev); 294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 295 296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); 297 } 298 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); 299 300 static ssize_t mtd_bbtblocks_show(struct device *dev, 301 struct device_attribute *attr, char *buf) 302 { 303 struct mtd_info *mtd = dev_get_drvdata(dev); 304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 305 306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); 307 } 308 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); 309 310 static struct attribute *mtd_attrs[] = { 311 &dev_attr_type.attr, 312 &dev_attr_flags.attr, 313 &dev_attr_size.attr, 314 &dev_attr_erasesize.attr, 315 &dev_attr_writesize.attr, 316 &dev_attr_subpagesize.attr, 317 &dev_attr_oobsize.attr, 318 &dev_attr_oobavail.attr, 319 &dev_attr_numeraseregions.attr, 320 &dev_attr_name.attr, 321 &dev_attr_ecc_strength.attr, 322 &dev_attr_ecc_step_size.attr, 323 &dev_attr_corrected_bits.attr, 324 &dev_attr_ecc_failures.attr, 325 &dev_attr_bad_blocks.attr, 326 &dev_attr_bbt_blocks.attr, 327 &dev_attr_bitflip_threshold.attr, 328 NULL, 329 }; 330 ATTRIBUTE_GROUPS(mtd); 331 332 static const struct device_type mtd_devtype = { 333 .name = "mtd", 334 .groups = mtd_groups, 335 .release = mtd_release, 336 }; 337 338 static int mtd_partid_debug_show(struct seq_file *s, void *p) 339 { 340 struct mtd_info *mtd = s->private; 341 342 seq_printf(s, "%s\n", mtd->dbg.partid); 343 344 return 0; 345 } 346 347 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug); 348 349 static int mtd_partname_debug_show(struct seq_file *s, void *p) 350 { 351 struct mtd_info *mtd = s->private; 352 353 seq_printf(s, "%s\n", mtd->dbg.partname); 354 355 return 0; 356 } 357 358 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); 359 360 static struct dentry *dfs_dir_mtd; 361 362 static void mtd_debugfs_populate(struct mtd_info *mtd) 363 { 364 struct device *dev = &mtd->dev; 365 struct dentry *root; 366 367 if (IS_ERR_OR_NULL(dfs_dir_mtd)) 368 return; 369 370 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); 371 mtd->dbg.dfs_dir = root; 372 373 if (mtd->dbg.partid) 374 debugfs_create_file("partid", 0400, root, mtd, 375 &mtd_partid_debug_fops); 376 377 if (mtd->dbg.partname) 378 debugfs_create_file("partname", 0400, root, mtd, 379 &mtd_partname_debug_fops); 380 } 381 382 #ifndef CONFIG_MMU 383 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 384 { 385 switch (mtd->type) { 386 case MTD_RAM: 387 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 388 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 389 case MTD_ROM: 390 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 391 NOMMU_MAP_READ; 392 default: 393 return NOMMU_MAP_COPY; 394 } 395 } 396 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 397 #endif 398 399 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 400 void *cmd) 401 { 402 struct mtd_info *mtd; 403 404 mtd = container_of(n, struct mtd_info, reboot_notifier); 405 mtd->_reboot(mtd); 406 407 return NOTIFY_DONE; 408 } 409 410 /** 411 * mtd_wunit_to_pairing_info - get pairing information of a wunit 412 * @mtd: pointer to new MTD device info structure 413 * @wunit: write unit we are interested in 414 * @info: returned pairing information 415 * 416 * Retrieve pairing information associated to the wunit. 417 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be 418 * paired together, and where programming a page may influence the page it is 419 * paired with. 420 * The notion of page is replaced by the term wunit (write-unit) to stay 421 * consistent with the ->writesize field. 422 * 423 * The @wunit argument can be extracted from an absolute offset using 424 * mtd_offset_to_wunit(). @info is filled with the pairing information attached 425 * to @wunit. 426 * 427 * From the pairing info the MTD user can find all the wunits paired with 428 * @wunit using the following loop: 429 * 430 * for (i = 0; i < mtd_pairing_groups(mtd); i++) { 431 * info.pair = i; 432 * mtd_pairing_info_to_wunit(mtd, &info); 433 * ... 434 * } 435 */ 436 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 437 struct mtd_pairing_info *info) 438 { 439 struct mtd_info *master = mtd_get_master(mtd); 440 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 441 442 if (wunit < 0 || wunit >= npairs) 443 return -EINVAL; 444 445 if (master->pairing && master->pairing->get_info) 446 return master->pairing->get_info(master, wunit, info); 447 448 info->group = 0; 449 info->pair = wunit; 450 451 return 0; 452 } 453 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); 454 455 /** 456 * mtd_pairing_info_to_wunit - get wunit from pairing information 457 * @mtd: pointer to new MTD device info structure 458 * @info: pairing information struct 459 * 460 * Returns a positive number representing the wunit associated to the info 461 * struct, or a negative error code. 462 * 463 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to 464 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() 465 * doc). 466 * 467 * It can also be used to only program the first page of each pair (i.e. 468 * page attached to group 0), which allows one to use an MLC NAND in 469 * software-emulated SLC mode: 470 * 471 * info.group = 0; 472 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 473 * for (info.pair = 0; info.pair < npairs; info.pair++) { 474 * wunit = mtd_pairing_info_to_wunit(mtd, &info); 475 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), 476 * mtd->writesize, &retlen, buf + (i * mtd->writesize)); 477 * } 478 */ 479 int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 480 const struct mtd_pairing_info *info) 481 { 482 struct mtd_info *master = mtd_get_master(mtd); 483 int ngroups = mtd_pairing_groups(master); 484 int npairs = mtd_wunit_per_eb(master) / ngroups; 485 486 if (!info || info->pair < 0 || info->pair >= npairs || 487 info->group < 0 || info->group >= ngroups) 488 return -EINVAL; 489 490 if (master->pairing && master->pairing->get_wunit) 491 return mtd->pairing->get_wunit(master, info); 492 493 return info->pair; 494 } 495 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); 496 497 /** 498 * mtd_pairing_groups - get the number of pairing groups 499 * @mtd: pointer to new MTD device info structure 500 * 501 * Returns the number of pairing groups. 502 * 503 * This number is usually equal to the number of bits exposed by a single 504 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() 505 * to iterate over all pages of a given pair. 506 */ 507 int mtd_pairing_groups(struct mtd_info *mtd) 508 { 509 struct mtd_info *master = mtd_get_master(mtd); 510 511 if (!master->pairing || !master->pairing->ngroups) 512 return 1; 513 514 return master->pairing->ngroups; 515 } 516 EXPORT_SYMBOL_GPL(mtd_pairing_groups); 517 518 static int mtd_nvmem_reg_read(void *priv, unsigned int offset, 519 void *val, size_t bytes) 520 { 521 struct mtd_info *mtd = priv; 522 size_t retlen; 523 int err; 524 525 err = mtd_read(mtd, offset, bytes, &retlen, val); 526 if (err && err != -EUCLEAN) 527 return err; 528 529 return retlen == bytes ? 0 : -EIO; 530 } 531 532 static int mtd_nvmem_add(struct mtd_info *mtd) 533 { 534 struct device_node *node = mtd_get_of_node(mtd); 535 struct nvmem_config config = {}; 536 537 config.id = -1; 538 config.dev = &mtd->dev; 539 config.name = dev_name(&mtd->dev); 540 config.owner = THIS_MODULE; 541 config.reg_read = mtd_nvmem_reg_read; 542 config.size = mtd->size; 543 config.word_size = 1; 544 config.stride = 1; 545 config.read_only = true; 546 config.root_only = true; 547 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); 548 config.priv = mtd; 549 550 mtd->nvmem = nvmem_register(&config); 551 if (IS_ERR(mtd->nvmem)) { 552 /* Just ignore if there is no NVMEM support in the kernel */ 553 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { 554 mtd->nvmem = NULL; 555 } else { 556 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 557 return PTR_ERR(mtd->nvmem); 558 } 559 } 560 561 return 0; 562 } 563 564 /** 565 * add_mtd_device - register an MTD device 566 * @mtd: pointer to new MTD device info structure 567 * 568 * Add a device to the list of MTD devices present in the system, and 569 * notify each currently active MTD 'user' of its arrival. Returns 570 * zero on success or non-zero on failure. 571 */ 572 573 int add_mtd_device(struct mtd_info *mtd) 574 { 575 struct mtd_info *master = mtd_get_master(mtd); 576 struct mtd_notifier *not; 577 int i, error; 578 579 /* 580 * May occur, for instance, on buggy drivers which call 581 * mtd_device_parse_register() multiple times on the same master MTD, 582 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 583 */ 584 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) 585 return -EEXIST; 586 587 BUG_ON(mtd->writesize == 0); 588 589 /* 590 * MTD drivers should implement ->_{write,read}() or 591 * ->_{write,read}_oob(), but not both. 592 */ 593 if (WARN_ON((mtd->_write && mtd->_write_oob) || 594 (mtd->_read && mtd->_read_oob))) 595 return -EINVAL; 596 597 if (WARN_ON((!mtd->erasesize || !master->_erase) && 598 !(mtd->flags & MTD_NO_ERASE))) 599 return -EINVAL; 600 601 /* 602 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the 603 * master is an MLC NAND and has a proper pairing scheme defined. 604 * We also reject masters that implement ->_writev() for now, because 605 * NAND controller drivers don't implement this hook, and adding the 606 * SLC -> MLC address/length conversion to this path is useless if we 607 * don't have a user. 608 */ 609 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && 610 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || 611 !master->pairing || master->_writev)) 612 return -EINVAL; 613 614 mutex_lock(&mtd_table_mutex); 615 616 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 617 if (i < 0) { 618 error = i; 619 goto fail_locked; 620 } 621 622 mtd->index = i; 623 mtd->usecount = 0; 624 625 /* default value if not set by driver */ 626 if (mtd->bitflip_threshold == 0) 627 mtd->bitflip_threshold = mtd->ecc_strength; 628 629 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 630 int ngroups = mtd_pairing_groups(master); 631 632 mtd->erasesize /= ngroups; 633 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * 634 mtd->erasesize; 635 } 636 637 if (is_power_of_2(mtd->erasesize)) 638 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 639 else 640 mtd->erasesize_shift = 0; 641 642 if (is_power_of_2(mtd->writesize)) 643 mtd->writesize_shift = ffs(mtd->writesize) - 1; 644 else 645 mtd->writesize_shift = 0; 646 647 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 648 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 649 650 /* Some chips always power up locked. Unlock them now */ 651 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 652 error = mtd_unlock(mtd, 0, mtd->size); 653 if (error && error != -EOPNOTSUPP) 654 printk(KERN_WARNING 655 "%s: unlock failed, writes may not work\n", 656 mtd->name); 657 /* Ignore unlock failures? */ 658 error = 0; 659 } 660 661 /* Caller should have set dev.parent to match the 662 * physical device, if appropriate. 663 */ 664 mtd->dev.type = &mtd_devtype; 665 mtd->dev.class = &mtd_class; 666 mtd->dev.devt = MTD_DEVT(i); 667 dev_set_name(&mtd->dev, "mtd%d", i); 668 dev_set_drvdata(&mtd->dev, mtd); 669 of_node_get(mtd_get_of_node(mtd)); 670 error = device_register(&mtd->dev); 671 if (error) 672 goto fail_added; 673 674 /* Add the nvmem provider */ 675 error = mtd_nvmem_add(mtd); 676 if (error) 677 goto fail_nvmem_add; 678 679 mtd_debugfs_populate(mtd); 680 681 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 682 "mtd%dro", i); 683 684 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 685 /* No need to get a refcount on the module containing 686 the notifier, since we hold the mtd_table_mutex */ 687 list_for_each_entry(not, &mtd_notifiers, list) 688 not->add(mtd); 689 690 mutex_unlock(&mtd_table_mutex); 691 /* We _know_ we aren't being removed, because 692 our caller is still holding us here. So none 693 of this try_ nonsense, and no bitching about it 694 either. :) */ 695 __module_get(THIS_MODULE); 696 return 0; 697 698 fail_nvmem_add: 699 device_unregister(&mtd->dev); 700 fail_added: 701 of_node_put(mtd_get_of_node(mtd)); 702 idr_remove(&mtd_idr, i); 703 fail_locked: 704 mutex_unlock(&mtd_table_mutex); 705 return error; 706 } 707 708 /** 709 * del_mtd_device - unregister an MTD device 710 * @mtd: pointer to MTD device info structure 711 * 712 * Remove a device from the list of MTD devices present in the system, 713 * and notify each currently active MTD 'user' of its departure. 714 * Returns zero on success or 1 on failure, which currently will happen 715 * if the requested device does not appear to be present in the list. 716 */ 717 718 int del_mtd_device(struct mtd_info *mtd) 719 { 720 int ret; 721 struct mtd_notifier *not; 722 723 mutex_lock(&mtd_table_mutex); 724 725 debugfs_remove_recursive(mtd->dbg.dfs_dir); 726 727 if (idr_find(&mtd_idr, mtd->index) != mtd) { 728 ret = -ENODEV; 729 goto out_error; 730 } 731 732 /* No need to get a refcount on the module containing 733 the notifier, since we hold the mtd_table_mutex */ 734 list_for_each_entry(not, &mtd_notifiers, list) 735 not->remove(mtd); 736 737 if (mtd->usecount) { 738 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 739 mtd->index, mtd->name, mtd->usecount); 740 ret = -EBUSY; 741 } else { 742 /* Try to remove the NVMEM provider */ 743 if (mtd->nvmem) 744 nvmem_unregister(mtd->nvmem); 745 746 device_unregister(&mtd->dev); 747 748 idr_remove(&mtd_idr, mtd->index); 749 of_node_put(mtd_get_of_node(mtd)); 750 751 module_put(THIS_MODULE); 752 ret = 0; 753 } 754 755 out_error: 756 mutex_unlock(&mtd_table_mutex); 757 return ret; 758 } 759 760 /* 761 * Set a few defaults based on the parent devices, if not provided by the 762 * driver 763 */ 764 static void mtd_set_dev_defaults(struct mtd_info *mtd) 765 { 766 if (mtd->dev.parent) { 767 if (!mtd->owner && mtd->dev.parent->driver) 768 mtd->owner = mtd->dev.parent->driver->owner; 769 if (!mtd->name) 770 mtd->name = dev_name(mtd->dev.parent); 771 } else { 772 pr_debug("mtd device won't show a device symlink in sysfs\n"); 773 } 774 775 INIT_LIST_HEAD(&mtd->partitions); 776 mutex_init(&mtd->master.partitions_lock); 777 mutex_init(&mtd->master.chrdev_lock); 778 } 779 780 /** 781 * mtd_device_parse_register - parse partitions and register an MTD device. 782 * 783 * @mtd: the MTD device to register 784 * @types: the list of MTD partition probes to try, see 785 * 'parse_mtd_partitions()' for more information 786 * @parser_data: MTD partition parser-specific data 787 * @parts: fallback partition information to register, if parsing fails; 788 * only valid if %nr_parts > %0 789 * @nr_parts: the number of partitions in parts, if zero then the full 790 * MTD device is registered if no partition info is found 791 * 792 * This function aggregates MTD partitions parsing (done by 793 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 794 * basically follows the most common pattern found in many MTD drivers: 795 * 796 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is 797 * registered first. 798 * * Then It tries to probe partitions on MTD device @mtd using parsers 799 * specified in @types (if @types is %NULL, then the default list of parsers 800 * is used, see 'parse_mtd_partitions()' for more information). If none are 801 * found this functions tries to fallback to information specified in 802 * @parts/@nr_parts. 803 * * If no partitions were found this function just registers the MTD device 804 * @mtd and exits. 805 * 806 * Returns zero in case of success and a negative error code in case of failure. 807 */ 808 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 809 struct mtd_part_parser_data *parser_data, 810 const struct mtd_partition *parts, 811 int nr_parts) 812 { 813 int ret; 814 815 mtd_set_dev_defaults(mtd); 816 817 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 818 ret = add_mtd_device(mtd); 819 if (ret) 820 return ret; 821 } 822 823 /* Prefer parsed partitions over driver-provided fallback */ 824 ret = parse_mtd_partitions(mtd, types, parser_data); 825 if (ret == -EPROBE_DEFER) 826 goto out; 827 828 if (ret > 0) 829 ret = 0; 830 else if (nr_parts) 831 ret = add_mtd_partitions(mtd, parts, nr_parts); 832 else if (!device_is_registered(&mtd->dev)) 833 ret = add_mtd_device(mtd); 834 else 835 ret = 0; 836 837 if (ret) 838 goto out; 839 840 /* 841 * FIXME: some drivers unfortunately call this function more than once. 842 * So we have to check if we've already assigned the reboot notifier. 843 * 844 * Generally, we can make multiple calls work for most cases, but it 845 * does cause problems with parse_mtd_partitions() above (e.g., 846 * cmdlineparts will register partitions more than once). 847 */ 848 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 849 "MTD already registered\n"); 850 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 851 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 852 register_reboot_notifier(&mtd->reboot_notifier); 853 } 854 855 out: 856 if (ret && device_is_registered(&mtd->dev)) 857 del_mtd_device(mtd); 858 859 return ret; 860 } 861 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 862 863 /** 864 * mtd_device_unregister - unregister an existing MTD device. 865 * 866 * @master: the MTD device to unregister. This will unregister both the master 867 * and any partitions if registered. 868 */ 869 int mtd_device_unregister(struct mtd_info *master) 870 { 871 int err; 872 873 if (master->_reboot) 874 unregister_reboot_notifier(&master->reboot_notifier); 875 876 err = del_mtd_partitions(master); 877 if (err) 878 return err; 879 880 if (!device_is_registered(&master->dev)) 881 return 0; 882 883 return del_mtd_device(master); 884 } 885 EXPORT_SYMBOL_GPL(mtd_device_unregister); 886 887 /** 888 * register_mtd_user - register a 'user' of MTD devices. 889 * @new: pointer to notifier info structure 890 * 891 * Registers a pair of callbacks function to be called upon addition 892 * or removal of MTD devices. Causes the 'add' callback to be immediately 893 * invoked for each MTD device currently present in the system. 894 */ 895 void register_mtd_user (struct mtd_notifier *new) 896 { 897 struct mtd_info *mtd; 898 899 mutex_lock(&mtd_table_mutex); 900 901 list_add(&new->list, &mtd_notifiers); 902 903 __module_get(THIS_MODULE); 904 905 mtd_for_each_device(mtd) 906 new->add(mtd); 907 908 mutex_unlock(&mtd_table_mutex); 909 } 910 EXPORT_SYMBOL_GPL(register_mtd_user); 911 912 /** 913 * unregister_mtd_user - unregister a 'user' of MTD devices. 914 * @old: pointer to notifier info structure 915 * 916 * Removes a callback function pair from the list of 'users' to be 917 * notified upon addition or removal of MTD devices. Causes the 918 * 'remove' callback to be immediately invoked for each MTD device 919 * currently present in the system. 920 */ 921 int unregister_mtd_user (struct mtd_notifier *old) 922 { 923 struct mtd_info *mtd; 924 925 mutex_lock(&mtd_table_mutex); 926 927 module_put(THIS_MODULE); 928 929 mtd_for_each_device(mtd) 930 old->remove(mtd); 931 932 list_del(&old->list); 933 mutex_unlock(&mtd_table_mutex); 934 return 0; 935 } 936 EXPORT_SYMBOL_GPL(unregister_mtd_user); 937 938 /** 939 * get_mtd_device - obtain a validated handle for an MTD device 940 * @mtd: last known address of the required MTD device 941 * @num: internal device number of the required MTD device 942 * 943 * Given a number and NULL address, return the num'th entry in the device 944 * table, if any. Given an address and num == -1, search the device table 945 * for a device with that address and return if it's still present. Given 946 * both, return the num'th driver only if its address matches. Return 947 * error code if not. 948 */ 949 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 950 { 951 struct mtd_info *ret = NULL, *other; 952 int err = -ENODEV; 953 954 mutex_lock(&mtd_table_mutex); 955 956 if (num == -1) { 957 mtd_for_each_device(other) { 958 if (other == mtd) { 959 ret = mtd; 960 break; 961 } 962 } 963 } else if (num >= 0) { 964 ret = idr_find(&mtd_idr, num); 965 if (mtd && mtd != ret) 966 ret = NULL; 967 } 968 969 if (!ret) { 970 ret = ERR_PTR(err); 971 goto out; 972 } 973 974 err = __get_mtd_device(ret); 975 if (err) 976 ret = ERR_PTR(err); 977 out: 978 mutex_unlock(&mtd_table_mutex); 979 return ret; 980 } 981 EXPORT_SYMBOL_GPL(get_mtd_device); 982 983 984 int __get_mtd_device(struct mtd_info *mtd) 985 { 986 struct mtd_info *master = mtd_get_master(mtd); 987 int err; 988 989 if (!try_module_get(master->owner)) 990 return -ENODEV; 991 992 if (master->_get_device) { 993 err = master->_get_device(mtd); 994 995 if (err) { 996 module_put(master->owner); 997 return err; 998 } 999 } 1000 1001 master->usecount++; 1002 1003 while (mtd->parent) { 1004 mtd->usecount++; 1005 mtd = mtd->parent; 1006 } 1007 1008 return 0; 1009 } 1010 EXPORT_SYMBOL_GPL(__get_mtd_device); 1011 1012 /** 1013 * get_mtd_device_nm - obtain a validated handle for an MTD device by 1014 * device name 1015 * @name: MTD device name to open 1016 * 1017 * This function returns MTD device description structure in case of 1018 * success and an error code in case of failure. 1019 */ 1020 struct mtd_info *get_mtd_device_nm(const char *name) 1021 { 1022 int err = -ENODEV; 1023 struct mtd_info *mtd = NULL, *other; 1024 1025 mutex_lock(&mtd_table_mutex); 1026 1027 mtd_for_each_device(other) { 1028 if (!strcmp(name, other->name)) { 1029 mtd = other; 1030 break; 1031 } 1032 } 1033 1034 if (!mtd) 1035 goto out_unlock; 1036 1037 err = __get_mtd_device(mtd); 1038 if (err) 1039 goto out_unlock; 1040 1041 mutex_unlock(&mtd_table_mutex); 1042 return mtd; 1043 1044 out_unlock: 1045 mutex_unlock(&mtd_table_mutex); 1046 return ERR_PTR(err); 1047 } 1048 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 1049 1050 void put_mtd_device(struct mtd_info *mtd) 1051 { 1052 mutex_lock(&mtd_table_mutex); 1053 __put_mtd_device(mtd); 1054 mutex_unlock(&mtd_table_mutex); 1055 1056 } 1057 EXPORT_SYMBOL_GPL(put_mtd_device); 1058 1059 void __put_mtd_device(struct mtd_info *mtd) 1060 { 1061 struct mtd_info *master = mtd_get_master(mtd); 1062 1063 while (mtd->parent) { 1064 --mtd->usecount; 1065 BUG_ON(mtd->usecount < 0); 1066 mtd = mtd->parent; 1067 } 1068 1069 master->usecount--; 1070 1071 if (master->_put_device) 1072 master->_put_device(master); 1073 1074 module_put(master->owner); 1075 } 1076 EXPORT_SYMBOL_GPL(__put_mtd_device); 1077 1078 /* 1079 * Erase is an synchronous operation. Device drivers are epected to return a 1080 * negative error code if the operation failed and update instr->fail_addr 1081 * to point the portion that was not properly erased. 1082 */ 1083 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1084 { 1085 struct mtd_info *master = mtd_get_master(mtd); 1086 u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1087 struct erase_info adjinstr; 1088 int ret; 1089 1090 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1091 adjinstr = *instr; 1092 1093 if (!mtd->erasesize || !master->_erase) 1094 return -ENOTSUPP; 1095 1096 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 1097 return -EINVAL; 1098 if (!(mtd->flags & MTD_WRITEABLE)) 1099 return -EROFS; 1100 1101 if (!instr->len) 1102 return 0; 1103 1104 ledtrig_mtd_activity(); 1105 1106 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1107 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * 1108 master->erasesize; 1109 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * 1110 master->erasesize) - 1111 adjinstr.addr; 1112 } 1113 1114 adjinstr.addr += mst_ofs; 1115 1116 ret = master->_erase(master, &adjinstr); 1117 1118 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { 1119 instr->fail_addr = adjinstr.fail_addr - mst_ofs; 1120 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1121 instr->fail_addr = mtd_div_by_eb(instr->fail_addr, 1122 master); 1123 instr->fail_addr *= mtd->erasesize; 1124 } 1125 } 1126 1127 return ret; 1128 } 1129 EXPORT_SYMBOL_GPL(mtd_erase); 1130 1131 /* 1132 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 1133 */ 1134 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1135 void **virt, resource_size_t *phys) 1136 { 1137 struct mtd_info *master = mtd_get_master(mtd); 1138 1139 *retlen = 0; 1140 *virt = NULL; 1141 if (phys) 1142 *phys = 0; 1143 if (!master->_point) 1144 return -EOPNOTSUPP; 1145 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1146 return -EINVAL; 1147 if (!len) 1148 return 0; 1149 1150 from = mtd_get_master_ofs(mtd, from); 1151 return master->_point(master, from, len, retlen, virt, phys); 1152 } 1153 EXPORT_SYMBOL_GPL(mtd_point); 1154 1155 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1156 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1157 { 1158 struct mtd_info *master = mtd_get_master(mtd); 1159 1160 if (!master->_unpoint) 1161 return -EOPNOTSUPP; 1162 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1163 return -EINVAL; 1164 if (!len) 1165 return 0; 1166 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1167 } 1168 EXPORT_SYMBOL_GPL(mtd_unpoint); 1169 1170 /* 1171 * Allow NOMMU mmap() to directly map the device (if not NULL) 1172 * - return the address to which the offset maps 1173 * - return -ENOSYS to indicate refusal to do the mapping 1174 */ 1175 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1176 unsigned long offset, unsigned long flags) 1177 { 1178 size_t retlen; 1179 void *virt; 1180 int ret; 1181 1182 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1183 if (ret) 1184 return ret; 1185 if (retlen != len) { 1186 mtd_unpoint(mtd, offset, retlen); 1187 return -ENOSYS; 1188 } 1189 return (unsigned long)virt; 1190 } 1191 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1192 1193 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1194 const struct mtd_ecc_stats *old_stats) 1195 { 1196 struct mtd_ecc_stats diff; 1197 1198 if (master == mtd) 1199 return; 1200 1201 diff = master->ecc_stats; 1202 diff.failed -= old_stats->failed; 1203 diff.corrected -= old_stats->corrected; 1204 1205 while (mtd->parent) { 1206 mtd->ecc_stats.failed += diff.failed; 1207 mtd->ecc_stats.corrected += diff.corrected; 1208 mtd = mtd->parent; 1209 } 1210 } 1211 1212 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1213 u_char *buf) 1214 { 1215 struct mtd_oob_ops ops = { 1216 .len = len, 1217 .datbuf = buf, 1218 }; 1219 int ret; 1220 1221 ret = mtd_read_oob(mtd, from, &ops); 1222 *retlen = ops.retlen; 1223 1224 return ret; 1225 } 1226 EXPORT_SYMBOL_GPL(mtd_read); 1227 1228 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1229 const u_char *buf) 1230 { 1231 struct mtd_oob_ops ops = { 1232 .len = len, 1233 .datbuf = (u8 *)buf, 1234 }; 1235 int ret; 1236 1237 ret = mtd_write_oob(mtd, to, &ops); 1238 *retlen = ops.retlen; 1239 1240 return ret; 1241 } 1242 EXPORT_SYMBOL_GPL(mtd_write); 1243 1244 /* 1245 * In blackbox flight recorder like scenarios we want to make successful writes 1246 * in interrupt context. panic_write() is only intended to be called when its 1247 * known the kernel is about to panic and we need the write to succeed. Since 1248 * the kernel is not going to be running for much longer, this function can 1249 * break locks and delay to ensure the write succeeds (but not sleep). 1250 */ 1251 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1252 const u_char *buf) 1253 { 1254 struct mtd_info *master = mtd_get_master(mtd); 1255 1256 *retlen = 0; 1257 if (!master->_panic_write) 1258 return -EOPNOTSUPP; 1259 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1260 return -EINVAL; 1261 if (!(mtd->flags & MTD_WRITEABLE)) 1262 return -EROFS; 1263 if (!len) 1264 return 0; 1265 if (!master->oops_panic_write) 1266 master->oops_panic_write = true; 1267 1268 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1269 retlen, buf); 1270 } 1271 EXPORT_SYMBOL_GPL(mtd_panic_write); 1272 1273 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1274 struct mtd_oob_ops *ops) 1275 { 1276 /* 1277 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1278 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1279 * this case. 1280 */ 1281 if (!ops->datbuf) 1282 ops->len = 0; 1283 1284 if (!ops->oobbuf) 1285 ops->ooblen = 0; 1286 1287 if (offs < 0 || offs + ops->len > mtd->size) 1288 return -EINVAL; 1289 1290 if (ops->ooblen) { 1291 size_t maxooblen; 1292 1293 if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1294 return -EINVAL; 1295 1296 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - 1297 mtd_div_by_ws(offs, mtd)) * 1298 mtd_oobavail(mtd, ops)) - ops->ooboffs; 1299 if (ops->ooblen > maxooblen) 1300 return -EINVAL; 1301 } 1302 1303 return 0; 1304 } 1305 1306 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, 1307 struct mtd_oob_ops *ops) 1308 { 1309 struct mtd_info *master = mtd_get_master(mtd); 1310 int ret; 1311 1312 from = mtd_get_master_ofs(mtd, from); 1313 if (master->_read_oob) 1314 ret = master->_read_oob(master, from, ops); 1315 else 1316 ret = master->_read(master, from, ops->len, &ops->retlen, 1317 ops->datbuf); 1318 1319 return ret; 1320 } 1321 1322 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, 1323 struct mtd_oob_ops *ops) 1324 { 1325 struct mtd_info *master = mtd_get_master(mtd); 1326 int ret; 1327 1328 to = mtd_get_master_ofs(mtd, to); 1329 if (master->_write_oob) 1330 ret = master->_write_oob(master, to, ops); 1331 else 1332 ret = master->_write(master, to, ops->len, &ops->retlen, 1333 ops->datbuf); 1334 1335 return ret; 1336 } 1337 1338 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, 1339 struct mtd_oob_ops *ops) 1340 { 1341 struct mtd_info *master = mtd_get_master(mtd); 1342 int ngroups = mtd_pairing_groups(master); 1343 int npairs = mtd_wunit_per_eb(master) / ngroups; 1344 struct mtd_oob_ops adjops = *ops; 1345 unsigned int wunit, oobavail; 1346 struct mtd_pairing_info info; 1347 int max_bitflips = 0; 1348 u32 ebofs, pageofs; 1349 loff_t base, pos; 1350 1351 ebofs = mtd_mod_by_eb(start, mtd); 1352 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; 1353 info.group = 0; 1354 info.pair = mtd_div_by_ws(ebofs, mtd); 1355 pageofs = mtd_mod_by_ws(ebofs, mtd); 1356 oobavail = mtd_oobavail(mtd, ops); 1357 1358 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { 1359 int ret; 1360 1361 if (info.pair >= npairs) { 1362 info.pair = 0; 1363 base += master->erasesize; 1364 } 1365 1366 wunit = mtd_pairing_info_to_wunit(master, &info); 1367 pos = mtd_wunit_to_offset(mtd, base, wunit); 1368 1369 adjops.len = ops->len - ops->retlen; 1370 if (adjops.len > mtd->writesize - pageofs) 1371 adjops.len = mtd->writesize - pageofs; 1372 1373 adjops.ooblen = ops->ooblen - ops->oobretlen; 1374 if (adjops.ooblen > oobavail - adjops.ooboffs) 1375 adjops.ooblen = oobavail - adjops.ooboffs; 1376 1377 if (read) { 1378 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); 1379 if (ret > 0) 1380 max_bitflips = max(max_bitflips, ret); 1381 } else { 1382 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); 1383 } 1384 1385 if (ret < 0) 1386 return ret; 1387 1388 max_bitflips = max(max_bitflips, ret); 1389 ops->retlen += adjops.retlen; 1390 ops->oobretlen += adjops.oobretlen; 1391 adjops.datbuf += adjops.retlen; 1392 adjops.oobbuf += adjops.oobretlen; 1393 adjops.ooboffs = 0; 1394 pageofs = 0; 1395 info.pair++; 1396 } 1397 1398 return max_bitflips; 1399 } 1400 1401 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1402 { 1403 struct mtd_info *master = mtd_get_master(mtd); 1404 struct mtd_ecc_stats old_stats = master->ecc_stats; 1405 int ret_code; 1406 1407 ops->retlen = ops->oobretlen = 0; 1408 1409 ret_code = mtd_check_oob_ops(mtd, from, ops); 1410 if (ret_code) 1411 return ret_code; 1412 1413 ledtrig_mtd_activity(); 1414 1415 /* Check the validity of a potential fallback on mtd->_read */ 1416 if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1417 return -EOPNOTSUPP; 1418 1419 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1420 ret_code = mtd_io_emulated_slc(mtd, from, true, ops); 1421 else 1422 ret_code = mtd_read_oob_std(mtd, from, ops); 1423 1424 mtd_update_ecc_stats(mtd, master, &old_stats); 1425 1426 /* 1427 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 1428 * similar to mtd->_read(), returning a non-negative integer 1429 * representing max bitflips. In other cases, mtd->_read_oob() may 1430 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 1431 */ 1432 if (unlikely(ret_code < 0)) 1433 return ret_code; 1434 if (mtd->ecc_strength == 0) 1435 return 0; /* device lacks ecc */ 1436 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 1437 } 1438 EXPORT_SYMBOL_GPL(mtd_read_oob); 1439 1440 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1441 struct mtd_oob_ops *ops) 1442 { 1443 struct mtd_info *master = mtd_get_master(mtd); 1444 int ret; 1445 1446 ops->retlen = ops->oobretlen = 0; 1447 1448 if (!(mtd->flags & MTD_WRITEABLE)) 1449 return -EROFS; 1450 1451 ret = mtd_check_oob_ops(mtd, to, ops); 1452 if (ret) 1453 return ret; 1454 1455 ledtrig_mtd_activity(); 1456 1457 /* Check the validity of a potential fallback on mtd->_write */ 1458 if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1459 return -EOPNOTSUPP; 1460 1461 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1462 return mtd_io_emulated_slc(mtd, to, false, ops); 1463 1464 return mtd_write_oob_std(mtd, to, ops); 1465 } 1466 EXPORT_SYMBOL_GPL(mtd_write_oob); 1467 1468 /** 1469 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1470 * @mtd: MTD device structure 1471 * @section: ECC section. Depending on the layout you may have all the ECC 1472 * bytes stored in a single contiguous section, or one section 1473 * per ECC chunk (and sometime several sections for a single ECC 1474 * ECC chunk) 1475 * @oobecc: OOB region struct filled with the appropriate ECC position 1476 * information 1477 * 1478 * This function returns ECC section information in the OOB area. If you want 1479 * to get all the ECC bytes information, then you should call 1480 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1481 * 1482 * Returns zero on success, a negative error code otherwise. 1483 */ 1484 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1485 struct mtd_oob_region *oobecc) 1486 { 1487 struct mtd_info *master = mtd_get_master(mtd); 1488 1489 memset(oobecc, 0, sizeof(*oobecc)); 1490 1491 if (!master || section < 0) 1492 return -EINVAL; 1493 1494 if (!master->ooblayout || !master->ooblayout->ecc) 1495 return -ENOTSUPP; 1496 1497 return master->ooblayout->ecc(master, section, oobecc); 1498 } 1499 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1500 1501 /** 1502 * mtd_ooblayout_free - Get the OOB region definition of a specific free 1503 * section 1504 * @mtd: MTD device structure 1505 * @section: Free section you are interested in. Depending on the layout 1506 * you may have all the free bytes stored in a single contiguous 1507 * section, or one section per ECC chunk plus an extra section 1508 * for the remaining bytes (or other funky layout). 1509 * @oobfree: OOB region struct filled with the appropriate free position 1510 * information 1511 * 1512 * This function returns free bytes position in the OOB area. If you want 1513 * to get all the free bytes information, then you should call 1514 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1515 * 1516 * Returns zero on success, a negative error code otherwise. 1517 */ 1518 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1519 struct mtd_oob_region *oobfree) 1520 { 1521 struct mtd_info *master = mtd_get_master(mtd); 1522 1523 memset(oobfree, 0, sizeof(*oobfree)); 1524 1525 if (!master || section < 0) 1526 return -EINVAL; 1527 1528 if (!master->ooblayout || !master->ooblayout->free) 1529 return -ENOTSUPP; 1530 1531 return master->ooblayout->free(master, section, oobfree); 1532 } 1533 EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1534 1535 /** 1536 * mtd_ooblayout_find_region - Find the region attached to a specific byte 1537 * @mtd: mtd info structure 1538 * @byte: the byte we are searching for 1539 * @sectionp: pointer where the section id will be stored 1540 * @oobregion: used to retrieve the ECC position 1541 * @iter: iterator function. Should be either mtd_ooblayout_free or 1542 * mtd_ooblayout_ecc depending on the region type you're searching for 1543 * 1544 * This function returns the section id and oobregion information of a 1545 * specific byte. For example, say you want to know where the 4th ECC byte is 1546 * stored, you'll use: 1547 * 1548 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); 1549 * 1550 * Returns zero on success, a negative error code otherwise. 1551 */ 1552 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1553 int *sectionp, struct mtd_oob_region *oobregion, 1554 int (*iter)(struct mtd_info *, 1555 int section, 1556 struct mtd_oob_region *oobregion)) 1557 { 1558 int pos = 0, ret, section = 0; 1559 1560 memset(oobregion, 0, sizeof(*oobregion)); 1561 1562 while (1) { 1563 ret = iter(mtd, section, oobregion); 1564 if (ret) 1565 return ret; 1566 1567 if (pos + oobregion->length > byte) 1568 break; 1569 1570 pos += oobregion->length; 1571 section++; 1572 } 1573 1574 /* 1575 * Adjust region info to make it start at the beginning at the 1576 * 'start' ECC byte. 1577 */ 1578 oobregion->offset += byte - pos; 1579 oobregion->length -= byte - pos; 1580 *sectionp = section; 1581 1582 return 0; 1583 } 1584 1585 /** 1586 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1587 * ECC byte 1588 * @mtd: mtd info structure 1589 * @eccbyte: the byte we are searching for 1590 * @section: pointer where the section id will be stored 1591 * @oobregion: OOB region information 1592 * 1593 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1594 * byte. 1595 * 1596 * Returns zero on success, a negative error code otherwise. 1597 */ 1598 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1599 int *section, 1600 struct mtd_oob_region *oobregion) 1601 { 1602 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1603 mtd_ooblayout_ecc); 1604 } 1605 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1606 1607 /** 1608 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1609 * @mtd: mtd info structure 1610 * @buf: destination buffer to store OOB bytes 1611 * @oobbuf: OOB buffer 1612 * @start: first byte to retrieve 1613 * @nbytes: number of bytes to retrieve 1614 * @iter: section iterator 1615 * 1616 * Extract bytes attached to a specific category (ECC or free) 1617 * from the OOB buffer and copy them into buf. 1618 * 1619 * Returns zero on success, a negative error code otherwise. 1620 */ 1621 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1622 const u8 *oobbuf, int start, int nbytes, 1623 int (*iter)(struct mtd_info *, 1624 int section, 1625 struct mtd_oob_region *oobregion)) 1626 { 1627 struct mtd_oob_region oobregion; 1628 int section, ret; 1629 1630 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1631 &oobregion, iter); 1632 1633 while (!ret) { 1634 int cnt; 1635 1636 cnt = min_t(int, nbytes, oobregion.length); 1637 memcpy(buf, oobbuf + oobregion.offset, cnt); 1638 buf += cnt; 1639 nbytes -= cnt; 1640 1641 if (!nbytes) 1642 break; 1643 1644 ret = iter(mtd, ++section, &oobregion); 1645 } 1646 1647 return ret; 1648 } 1649 1650 /** 1651 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1652 * @mtd: mtd info structure 1653 * @buf: source buffer to get OOB bytes from 1654 * @oobbuf: OOB buffer 1655 * @start: first OOB byte to set 1656 * @nbytes: number of OOB bytes to set 1657 * @iter: section iterator 1658 * 1659 * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1660 * is selected by passing the appropriate iterator. 1661 * 1662 * Returns zero on success, a negative error code otherwise. 1663 */ 1664 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1665 u8 *oobbuf, int start, int nbytes, 1666 int (*iter)(struct mtd_info *, 1667 int section, 1668 struct mtd_oob_region *oobregion)) 1669 { 1670 struct mtd_oob_region oobregion; 1671 int section, ret; 1672 1673 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1674 &oobregion, iter); 1675 1676 while (!ret) { 1677 int cnt; 1678 1679 cnt = min_t(int, nbytes, oobregion.length); 1680 memcpy(oobbuf + oobregion.offset, buf, cnt); 1681 buf += cnt; 1682 nbytes -= cnt; 1683 1684 if (!nbytes) 1685 break; 1686 1687 ret = iter(mtd, ++section, &oobregion); 1688 } 1689 1690 return ret; 1691 } 1692 1693 /** 1694 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1695 * @mtd: mtd info structure 1696 * @iter: category iterator 1697 * 1698 * Count the number of bytes in a given category. 1699 * 1700 * Returns a positive value on success, a negative error code otherwise. 1701 */ 1702 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1703 int (*iter)(struct mtd_info *, 1704 int section, 1705 struct mtd_oob_region *oobregion)) 1706 { 1707 struct mtd_oob_region oobregion; 1708 int section = 0, ret, nbytes = 0; 1709 1710 while (1) { 1711 ret = iter(mtd, section++, &oobregion); 1712 if (ret) { 1713 if (ret == -ERANGE) 1714 ret = nbytes; 1715 break; 1716 } 1717 1718 nbytes += oobregion.length; 1719 } 1720 1721 return ret; 1722 } 1723 1724 /** 1725 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 1726 * @mtd: mtd info structure 1727 * @eccbuf: destination buffer to store ECC bytes 1728 * @oobbuf: OOB buffer 1729 * @start: first ECC byte to retrieve 1730 * @nbytes: number of ECC bytes to retrieve 1731 * 1732 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 1733 * 1734 * Returns zero on success, a negative error code otherwise. 1735 */ 1736 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 1737 const u8 *oobbuf, int start, int nbytes) 1738 { 1739 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1740 mtd_ooblayout_ecc); 1741 } 1742 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 1743 1744 /** 1745 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 1746 * @mtd: mtd info structure 1747 * @eccbuf: source buffer to get ECC bytes from 1748 * @oobbuf: OOB buffer 1749 * @start: first ECC byte to set 1750 * @nbytes: number of ECC bytes to set 1751 * 1752 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 1753 * 1754 * Returns zero on success, a negative error code otherwise. 1755 */ 1756 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 1757 u8 *oobbuf, int start, int nbytes) 1758 { 1759 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1760 mtd_ooblayout_ecc); 1761 } 1762 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 1763 1764 /** 1765 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 1766 * @mtd: mtd info structure 1767 * @databuf: destination buffer to store ECC bytes 1768 * @oobbuf: OOB buffer 1769 * @start: first ECC byte to retrieve 1770 * @nbytes: number of ECC bytes to retrieve 1771 * 1772 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1773 * 1774 * Returns zero on success, a negative error code otherwise. 1775 */ 1776 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 1777 const u8 *oobbuf, int start, int nbytes) 1778 { 1779 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 1780 mtd_ooblayout_free); 1781 } 1782 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 1783 1784 /** 1785 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer 1786 * @mtd: mtd info structure 1787 * @databuf: source buffer to get data bytes from 1788 * @oobbuf: OOB buffer 1789 * @start: first ECC byte to set 1790 * @nbytes: number of ECC bytes to set 1791 * 1792 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. 1793 * 1794 * Returns zero on success, a negative error code otherwise. 1795 */ 1796 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 1797 u8 *oobbuf, int start, int nbytes) 1798 { 1799 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 1800 mtd_ooblayout_free); 1801 } 1802 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 1803 1804 /** 1805 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 1806 * @mtd: mtd info structure 1807 * 1808 * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 1809 * 1810 * Returns zero on success, a negative error code otherwise. 1811 */ 1812 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 1813 { 1814 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 1815 } 1816 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 1817 1818 /** 1819 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB 1820 * @mtd: mtd info structure 1821 * 1822 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 1823 * 1824 * Returns zero on success, a negative error code otherwise. 1825 */ 1826 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 1827 { 1828 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 1829 } 1830 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 1831 1832 /* 1833 * Method to access the protection register area, present in some flash 1834 * devices. The user data is one time programmable but the factory data is read 1835 * only. 1836 */ 1837 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1838 struct otp_info *buf) 1839 { 1840 struct mtd_info *master = mtd_get_master(mtd); 1841 1842 if (!master->_get_fact_prot_info) 1843 return -EOPNOTSUPP; 1844 if (!len) 1845 return 0; 1846 return master->_get_fact_prot_info(master, len, retlen, buf); 1847 } 1848 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1849 1850 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1851 size_t *retlen, u_char *buf) 1852 { 1853 struct mtd_info *master = mtd_get_master(mtd); 1854 1855 *retlen = 0; 1856 if (!master->_read_fact_prot_reg) 1857 return -EOPNOTSUPP; 1858 if (!len) 1859 return 0; 1860 return master->_read_fact_prot_reg(master, from, len, retlen, buf); 1861 } 1862 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1863 1864 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1865 struct otp_info *buf) 1866 { 1867 struct mtd_info *master = mtd_get_master(mtd); 1868 1869 if (!master->_get_user_prot_info) 1870 return -EOPNOTSUPP; 1871 if (!len) 1872 return 0; 1873 return master->_get_user_prot_info(master, len, retlen, buf); 1874 } 1875 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1876 1877 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1878 size_t *retlen, u_char *buf) 1879 { 1880 struct mtd_info *master = mtd_get_master(mtd); 1881 1882 *retlen = 0; 1883 if (!master->_read_user_prot_reg) 1884 return -EOPNOTSUPP; 1885 if (!len) 1886 return 0; 1887 return master->_read_user_prot_reg(master, from, len, retlen, buf); 1888 } 1889 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1890 1891 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1892 size_t *retlen, const u_char *buf) 1893 { 1894 struct mtd_info *master = mtd_get_master(mtd); 1895 int ret; 1896 1897 *retlen = 0; 1898 if (!master->_write_user_prot_reg) 1899 return -EOPNOTSUPP; 1900 if (!len) 1901 return 0; 1902 ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 1903 if (ret) 1904 return ret; 1905 1906 /* 1907 * If no data could be written at all, we are out of memory and 1908 * must return -ENOSPC. 1909 */ 1910 return (*retlen) ? 0 : -ENOSPC; 1911 } 1912 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 1913 1914 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1915 { 1916 struct mtd_info *master = mtd_get_master(mtd); 1917 1918 if (!master->_lock_user_prot_reg) 1919 return -EOPNOTSUPP; 1920 if (!len) 1921 return 0; 1922 return master->_lock_user_prot_reg(master, from, len); 1923 } 1924 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1925 1926 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1927 { 1928 struct mtd_info *master = mtd_get_master(mtd); 1929 1930 if (!master->_erase_user_prot_reg) 1931 return -EOPNOTSUPP; 1932 if (!len) 1933 return 0; 1934 return master->_erase_user_prot_reg(master, from, len); 1935 } 1936 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); 1937 1938 /* Chip-supported device locking */ 1939 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1940 { 1941 struct mtd_info *master = mtd_get_master(mtd); 1942 1943 if (!master->_lock) 1944 return -EOPNOTSUPP; 1945 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1946 return -EINVAL; 1947 if (!len) 1948 return 0; 1949 1950 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1951 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1952 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1953 } 1954 1955 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 1956 } 1957 EXPORT_SYMBOL_GPL(mtd_lock); 1958 1959 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1960 { 1961 struct mtd_info *master = mtd_get_master(mtd); 1962 1963 if (!master->_unlock) 1964 return -EOPNOTSUPP; 1965 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1966 return -EINVAL; 1967 if (!len) 1968 return 0; 1969 1970 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1971 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1972 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1973 } 1974 1975 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 1976 } 1977 EXPORT_SYMBOL_GPL(mtd_unlock); 1978 1979 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1980 { 1981 struct mtd_info *master = mtd_get_master(mtd); 1982 1983 if (!master->_is_locked) 1984 return -EOPNOTSUPP; 1985 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1986 return -EINVAL; 1987 if (!len) 1988 return 0; 1989 1990 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1991 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1992 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1993 } 1994 1995 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 1996 } 1997 EXPORT_SYMBOL_GPL(mtd_is_locked); 1998 1999 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 2000 { 2001 struct mtd_info *master = mtd_get_master(mtd); 2002 2003 if (ofs < 0 || ofs >= mtd->size) 2004 return -EINVAL; 2005 if (!master->_block_isreserved) 2006 return 0; 2007 2008 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2009 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2010 2011 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 2012 } 2013 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 2014 2015 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 2016 { 2017 struct mtd_info *master = mtd_get_master(mtd); 2018 2019 if (ofs < 0 || ofs >= mtd->size) 2020 return -EINVAL; 2021 if (!master->_block_isbad) 2022 return 0; 2023 2024 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2025 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2026 2027 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 2028 } 2029 EXPORT_SYMBOL_GPL(mtd_block_isbad); 2030 2031 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 2032 { 2033 struct mtd_info *master = mtd_get_master(mtd); 2034 int ret; 2035 2036 if (!master->_block_markbad) 2037 return -EOPNOTSUPP; 2038 if (ofs < 0 || ofs >= mtd->size) 2039 return -EINVAL; 2040 if (!(mtd->flags & MTD_WRITEABLE)) 2041 return -EROFS; 2042 2043 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2044 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2045 2046 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 2047 if (ret) 2048 return ret; 2049 2050 while (mtd->parent) { 2051 mtd->ecc_stats.badblocks++; 2052 mtd = mtd->parent; 2053 } 2054 2055 return 0; 2056 } 2057 EXPORT_SYMBOL_GPL(mtd_block_markbad); 2058 2059 /* 2060 * default_mtd_writev - the default writev method 2061 * @mtd: mtd device description object pointer 2062 * @vecs: the vectors to write 2063 * @count: count of vectors in @vecs 2064 * @to: the MTD device offset to write to 2065 * @retlen: on exit contains the count of bytes written to the MTD device. 2066 * 2067 * This function returns zero in case of success and a negative error code in 2068 * case of failure. 2069 */ 2070 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2071 unsigned long count, loff_t to, size_t *retlen) 2072 { 2073 unsigned long i; 2074 size_t totlen = 0, thislen; 2075 int ret = 0; 2076 2077 for (i = 0; i < count; i++) { 2078 if (!vecs[i].iov_len) 2079 continue; 2080 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 2081 vecs[i].iov_base); 2082 totlen += thislen; 2083 if (ret || thislen != vecs[i].iov_len) 2084 break; 2085 to += vecs[i].iov_len; 2086 } 2087 *retlen = totlen; 2088 return ret; 2089 } 2090 2091 /* 2092 * mtd_writev - the vector-based MTD write method 2093 * @mtd: mtd device description object pointer 2094 * @vecs: the vectors to write 2095 * @count: count of vectors in @vecs 2096 * @to: the MTD device offset to write to 2097 * @retlen: on exit contains the count of bytes written to the MTD device. 2098 * 2099 * This function returns zero in case of success and a negative error code in 2100 * case of failure. 2101 */ 2102 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2103 unsigned long count, loff_t to, size_t *retlen) 2104 { 2105 struct mtd_info *master = mtd_get_master(mtd); 2106 2107 *retlen = 0; 2108 if (!(mtd->flags & MTD_WRITEABLE)) 2109 return -EROFS; 2110 2111 if (!master->_writev) 2112 return default_mtd_writev(mtd, vecs, count, to, retlen); 2113 2114 return master->_writev(master, vecs, count, 2115 mtd_get_master_ofs(mtd, to), retlen); 2116 } 2117 EXPORT_SYMBOL_GPL(mtd_writev); 2118 2119 /** 2120 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 2121 * @mtd: mtd device description object pointer 2122 * @size: a pointer to the ideal or maximum size of the allocation, points 2123 * to the actual allocation size on success. 2124 * 2125 * This routine attempts to allocate a contiguous kernel buffer up to 2126 * the specified size, backing off the size of the request exponentially 2127 * until the request succeeds or until the allocation size falls below 2128 * the system page size. This attempts to make sure it does not adversely 2129 * impact system performance, so when allocating more than one page, we 2130 * ask the memory allocator to avoid re-trying, swapping, writing back 2131 * or performing I/O. 2132 * 2133 * Note, this function also makes sure that the allocated buffer is aligned to 2134 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 2135 * 2136 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 2137 * to handle smaller (i.e. degraded) buffer allocations under low- or 2138 * fragmented-memory situations where such reduced allocations, from a 2139 * requested ideal, are allowed. 2140 * 2141 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 2142 */ 2143 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 2144 { 2145 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 2146 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 2147 void *kbuf; 2148 2149 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 2150 2151 while (*size > min_alloc) { 2152 kbuf = kmalloc(*size, flags); 2153 if (kbuf) 2154 return kbuf; 2155 2156 *size >>= 1; 2157 *size = ALIGN(*size, mtd->writesize); 2158 } 2159 2160 /* 2161 * For the last resort allocation allow 'kmalloc()' to do all sorts of 2162 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 2163 */ 2164 return kmalloc(*size, GFP_KERNEL); 2165 } 2166 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 2167 2168 #ifdef CONFIG_PROC_FS 2169 2170 /*====================================================================*/ 2171 /* Support for /proc/mtd */ 2172 2173 static int mtd_proc_show(struct seq_file *m, void *v) 2174 { 2175 struct mtd_info *mtd; 2176 2177 seq_puts(m, "dev: size erasesize name\n"); 2178 mutex_lock(&mtd_table_mutex); 2179 mtd_for_each_device(mtd) { 2180 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 2181 mtd->index, (unsigned long long)mtd->size, 2182 mtd->erasesize, mtd->name); 2183 } 2184 mutex_unlock(&mtd_table_mutex); 2185 return 0; 2186 } 2187 #endif /* CONFIG_PROC_FS */ 2188 2189 /*====================================================================*/ 2190 /* Init code */ 2191 2192 static struct backing_dev_info * __init mtd_bdi_init(const char *name) 2193 { 2194 struct backing_dev_info *bdi; 2195 int ret; 2196 2197 bdi = bdi_alloc(NUMA_NO_NODE); 2198 if (!bdi) 2199 return ERR_PTR(-ENOMEM); 2200 bdi->ra_pages = 0; 2201 bdi->io_pages = 0; 2202 2203 /* 2204 * We put '-0' suffix to the name to get the same name format as we 2205 * used to get. Since this is called only once, we get a unique name. 2206 */ 2207 ret = bdi_register(bdi, "%.28s-0", name); 2208 if (ret) 2209 bdi_put(bdi); 2210 2211 return ret ? ERR_PTR(ret) : bdi; 2212 } 2213 2214 static struct proc_dir_entry *proc_mtd; 2215 2216 static int __init init_mtd(void) 2217 { 2218 int ret; 2219 2220 ret = class_register(&mtd_class); 2221 if (ret) 2222 goto err_reg; 2223 2224 mtd_bdi = mtd_bdi_init("mtd"); 2225 if (IS_ERR(mtd_bdi)) { 2226 ret = PTR_ERR(mtd_bdi); 2227 goto err_bdi; 2228 } 2229 2230 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); 2231 2232 ret = init_mtdchar(); 2233 if (ret) 2234 goto out_procfs; 2235 2236 dfs_dir_mtd = debugfs_create_dir("mtd", NULL); 2237 2238 return 0; 2239 2240 out_procfs: 2241 if (proc_mtd) 2242 remove_proc_entry("mtd", NULL); 2243 bdi_put(mtd_bdi); 2244 err_bdi: 2245 class_unregister(&mtd_class); 2246 err_reg: 2247 pr_err("Error registering mtd class or bdi: %d\n", ret); 2248 return ret; 2249 } 2250 2251 static void __exit cleanup_mtd(void) 2252 { 2253 debugfs_remove_recursive(dfs_dir_mtd); 2254 cleanup_mtdchar(); 2255 if (proc_mtd) 2256 remove_proc_entry("mtd", NULL); 2257 class_unregister(&mtd_class); 2258 bdi_put(mtd_bdi); 2259 idr_destroy(&mtd_idr); 2260 } 2261 2262 module_init(init_mtd); 2263 module_exit(cleanup_mtd); 2264 2265 MODULE_LICENSE("GPL"); 2266 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 2267 MODULE_DESCRIPTION("Core MTD registration and access routines"); 2268