1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Overview: 4 * This is the generic MTD driver for NAND flash devices. It should be 5 * capable of working with almost all NAND chips currently available. 6 * 7 * Additional technical information is available on 8 * http://www.linux-mtd.infradead.org/doc/nand.html 9 * 10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 12 * 13 * Credits: 14 * David Woodhouse for adding multichip support 15 * 16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 17 * rework for 2K page size chips 18 * 19 * TODO: 20 * Enable cached programming for 2k page size chips 21 * Check, if mtd->ecctype should be set to MTD_ECC_HW 22 * if we have HW ECC support. 23 * BBT table is not serialized, has to be fixed 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/err.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/types.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/nand.h> 38 #include <linux/mtd/nand-ecc-sw-hamming.h> 39 #include <linux/mtd/nand-ecc-sw-bch.h> 40 #include <linux/interrupt.h> 41 #include <linux/bitops.h> 42 #include <linux/io.h> 43 #include <linux/mtd/partitions.h> 44 #include <linux/of.h> 45 #include <linux/of_gpio.h> 46 #include <linux/gpio/consumer.h> 47 48 #include "internals.h" 49 50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, 51 struct mtd_pairing_info *info) 52 { 53 int lastpage = (mtd->erasesize / mtd->writesize) - 1; 54 int dist = 3; 55 56 if (page == lastpage) 57 dist = 2; 58 59 if (!page || (page & 1)) { 60 info->group = 0; 61 info->pair = (page + 1) / 2; 62 } else { 63 info->group = 1; 64 info->pair = (page + 1 - dist) / 2; 65 } 66 67 return 0; 68 } 69 70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, 71 const struct mtd_pairing_info *info) 72 { 73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; 74 int page = info->pair * 2; 75 int dist = 3; 76 77 if (!info->group && !info->pair) 78 return 0; 79 80 if (info->pair == lastpair && info->group) 81 dist = 2; 82 83 if (!info->group) 84 page--; 85 else if (info->pair) 86 page += dist - 1; 87 88 if (page >= mtd->erasesize / mtd->writesize) 89 return -EINVAL; 90 91 return page; 92 } 93 94 const struct mtd_pairing_scheme dist3_pairing_scheme = { 95 .ngroups = 2, 96 .get_info = nand_pairing_dist3_get_info, 97 .get_wunit = nand_pairing_dist3_get_wunit, 98 }; 99 100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) 101 { 102 int ret = 0; 103 104 /* Start address must align on block boundary */ 105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 106 pr_debug("%s: unaligned address\n", __func__); 107 ret = -EINVAL; 108 } 109 110 /* Length must align on block boundary */ 111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 112 pr_debug("%s: length not block aligned\n", __func__); 113 ret = -EINVAL; 114 } 115 116 return ret; 117 } 118 119 /** 120 * nand_extract_bits - Copy unaligned bits from one buffer to another one 121 * @dst: destination buffer 122 * @dst_off: bit offset at which the writing starts 123 * @src: source buffer 124 * @src_off: bit offset at which the reading starts 125 * @nbits: number of bits to copy from @src to @dst 126 * 127 * Copy bits from one memory region to another (overlap authorized). 128 */ 129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, 130 unsigned int src_off, unsigned int nbits) 131 { 132 unsigned int tmp, n; 133 134 dst += dst_off / 8; 135 dst_off %= 8; 136 src += src_off / 8; 137 src_off %= 8; 138 139 while (nbits) { 140 n = min3(8 - dst_off, 8 - src_off, nbits); 141 142 tmp = (*src >> src_off) & GENMASK(n - 1, 0); 143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off); 144 *dst |= tmp << dst_off; 145 146 dst_off += n; 147 if (dst_off >= 8) { 148 dst++; 149 dst_off -= 8; 150 } 151 152 src_off += n; 153 if (src_off >= 8) { 154 src++; 155 src_off -= 8; 156 } 157 158 nbits -= n; 159 } 160 } 161 EXPORT_SYMBOL_GPL(nand_extract_bits); 162 163 /** 164 * nand_select_target() - Select a NAND target (A.K.A. die) 165 * @chip: NAND chip object 166 * @cs: the CS line to select. Note that this CS id is always from the chip 167 * PoV, not the controller one 168 * 169 * Select a NAND target so that further operations executed on @chip go to the 170 * selected NAND target. 171 */ 172 void nand_select_target(struct nand_chip *chip, unsigned int cs) 173 { 174 /* 175 * cs should always lie between 0 and nanddev_ntargets(), when that's 176 * not the case it's a bug and the caller should be fixed. 177 */ 178 if (WARN_ON(cs > nanddev_ntargets(&chip->base))) 179 return; 180 181 chip->cur_cs = cs; 182 183 if (chip->legacy.select_chip) 184 chip->legacy.select_chip(chip, cs); 185 } 186 EXPORT_SYMBOL_GPL(nand_select_target); 187 188 /** 189 * nand_deselect_target() - Deselect the currently selected target 190 * @chip: NAND chip object 191 * 192 * Deselect the currently selected NAND target. The result of operations 193 * executed on @chip after the target has been deselected is undefined. 194 */ 195 void nand_deselect_target(struct nand_chip *chip) 196 { 197 if (chip->legacy.select_chip) 198 chip->legacy.select_chip(chip, -1); 199 200 chip->cur_cs = -1; 201 } 202 EXPORT_SYMBOL_GPL(nand_deselect_target); 203 204 /** 205 * nand_release_device - [GENERIC] release chip 206 * @chip: NAND chip object 207 * 208 * Release chip lock and wake up anyone waiting on the device. 209 */ 210 static void nand_release_device(struct nand_chip *chip) 211 { 212 /* Release the controller and the chip */ 213 mutex_unlock(&chip->controller->lock); 214 mutex_unlock(&chip->lock); 215 } 216 217 /** 218 * nand_bbm_get_next_page - Get the next page for bad block markers 219 * @chip: NAND chip object 220 * @page: First page to start checking for bad block marker usage 221 * 222 * Returns an integer that corresponds to the page offset within a block, for 223 * a page that is used to store bad block markers. If no more pages are 224 * available, -EINVAL is returned. 225 */ 226 int nand_bbm_get_next_page(struct nand_chip *chip, int page) 227 { 228 struct mtd_info *mtd = nand_to_mtd(chip); 229 int last_page = ((mtd->erasesize - mtd->writesize) >> 230 chip->page_shift) & chip->pagemask; 231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 232 | NAND_BBM_LASTPAGE; 233 234 if (page == 0 && !(chip->options & bbm_flags)) 235 return 0; 236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 237 return 0; 238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 239 return 1; 240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 241 return last_page; 242 243 return -EINVAL; 244 } 245 246 /** 247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 248 * @chip: NAND chip object 249 * @ofs: offset from device start 250 * 251 * Check, if the block is bad. 252 */ 253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 254 { 255 int first_page, page_offset; 256 int res; 257 u8 bad; 258 259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; 260 page_offset = nand_bbm_get_next_page(chip, 0); 261 262 while (page_offset >= 0) { 263 res = chip->ecc.read_oob(chip, first_page + page_offset); 264 if (res < 0) 265 return res; 266 267 bad = chip->oob_poi[chip->badblockpos]; 268 269 if (likely(chip->badblockbits == 8)) 270 res = bad != 0xFF; 271 else 272 res = hweight8(bad) < chip->badblockbits; 273 if (res) 274 return res; 275 276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 277 } 278 279 return 0; 280 } 281 282 /** 283 * nand_region_is_secured() - Check if the region is secured 284 * @chip: NAND chip object 285 * @offset: Offset of the region to check 286 * @size: Size of the region to check 287 * 288 * Checks if the region is secured by comparing the offset and size with the 289 * list of secure regions obtained from DT. Returns true if the region is 290 * secured else false. 291 */ 292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size) 293 { 294 int i; 295 296 /* Skip touching the secure regions if present */ 297 for (i = 0; i < chip->nr_secure_regions; i++) { 298 const struct nand_secure_region *region = &chip->secure_regions[i]; 299 300 if (offset + size <= region->offset || 301 offset >= region->offset + region->size) 302 continue; 303 304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!", 305 __func__, offset, offset + size); 306 307 return true; 308 } 309 310 return false; 311 } 312 313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 314 { 315 struct mtd_info *mtd = nand_to_mtd(chip); 316 317 if (chip->options & NAND_NO_BBM_QUIRK) 318 return 0; 319 320 /* Check if the region is secured */ 321 if (nand_region_is_secured(chip, ofs, mtd->erasesize)) 322 return -EIO; 323 324 if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning)) 325 return 0; 326 327 if (chip->legacy.block_bad) 328 return chip->legacy.block_bad(chip, ofs); 329 330 return nand_block_bad(chip, ofs); 331 } 332 333 /** 334 * nand_get_device - [GENERIC] Get chip for selected access 335 * @chip: NAND chip structure 336 * 337 * Lock the device and its controller for exclusive access 338 * 339 * Return: -EBUSY if the chip has been suspended, 0 otherwise 340 */ 341 static int nand_get_device(struct nand_chip *chip) 342 { 343 mutex_lock(&chip->lock); 344 if (chip->suspended) { 345 mutex_unlock(&chip->lock); 346 return -EBUSY; 347 } 348 mutex_lock(&chip->controller->lock); 349 350 return 0; 351 } 352 353 /** 354 * nand_check_wp - [GENERIC] check if the chip is write protected 355 * @chip: NAND chip object 356 * 357 * Check, if the device is write protected. The function expects, that the 358 * device is already selected. 359 */ 360 static int nand_check_wp(struct nand_chip *chip) 361 { 362 u8 status; 363 int ret; 364 365 /* Broken xD cards report WP despite being writable */ 366 if (chip->options & NAND_BROKEN_XD) 367 return 0; 368 369 /* Check the WP bit */ 370 ret = nand_status_op(chip, &status); 371 if (ret) 372 return ret; 373 374 return status & NAND_STATUS_WP ? 0 : 1; 375 } 376 377 /** 378 * nand_fill_oob - [INTERN] Transfer client buffer to oob 379 * @chip: NAND chip object 380 * @oob: oob data buffer 381 * @len: oob data write length 382 * @ops: oob ops structure 383 */ 384 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 385 struct mtd_oob_ops *ops) 386 { 387 struct mtd_info *mtd = nand_to_mtd(chip); 388 int ret; 389 390 /* 391 * Initialise to all 0xFF, to avoid the possibility of left over OOB 392 * data from a previous OOB read. 393 */ 394 memset(chip->oob_poi, 0xff, mtd->oobsize); 395 396 switch (ops->mode) { 397 398 case MTD_OPS_PLACE_OOB: 399 case MTD_OPS_RAW: 400 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 401 return oob + len; 402 403 case MTD_OPS_AUTO_OOB: 404 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 405 ops->ooboffs, len); 406 BUG_ON(ret); 407 return oob + len; 408 409 default: 410 BUG(); 411 } 412 return NULL; 413 } 414 415 /** 416 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 417 * @chip: NAND chip object 418 * @to: offset to write to 419 * @ops: oob operation description structure 420 * 421 * NAND write out-of-band. 422 */ 423 static int nand_do_write_oob(struct nand_chip *chip, loff_t to, 424 struct mtd_oob_ops *ops) 425 { 426 struct mtd_info *mtd = nand_to_mtd(chip); 427 int chipnr, page, status, len, ret; 428 429 pr_debug("%s: to = 0x%08x, len = %i\n", 430 __func__, (unsigned int)to, (int)ops->ooblen); 431 432 len = mtd_oobavail(mtd, ops); 433 434 /* Do not allow write past end of page */ 435 if ((ops->ooboffs + ops->ooblen) > len) { 436 pr_debug("%s: attempt to write past end of page\n", 437 __func__); 438 return -EINVAL; 439 } 440 441 /* Check if the region is secured */ 442 if (nand_region_is_secured(chip, to, ops->ooblen)) 443 return -EIO; 444 445 chipnr = (int)(to >> chip->chip_shift); 446 447 /* 448 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 449 * of my DiskOnChip 2000 test units) will clear the whole data page too 450 * if we don't do this. I have no clue why, but I seem to have 'fixed' 451 * it in the doc2000 driver in August 1999. dwmw2. 452 */ 453 ret = nand_reset(chip, chipnr); 454 if (ret) 455 return ret; 456 457 nand_select_target(chip, chipnr); 458 459 /* Shift to get page */ 460 page = (int)(to >> chip->page_shift); 461 462 /* Check, if it is write protected */ 463 if (nand_check_wp(chip)) { 464 nand_deselect_target(chip); 465 return -EROFS; 466 } 467 468 /* Invalidate the page cache, if we write to the cached page */ 469 if (page == chip->pagecache.page) 470 chip->pagecache.page = -1; 471 472 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 473 474 if (ops->mode == MTD_OPS_RAW) 475 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 476 else 477 status = chip->ecc.write_oob(chip, page & chip->pagemask); 478 479 nand_deselect_target(chip); 480 481 if (status) 482 return status; 483 484 ops->oobretlen = ops->ooblen; 485 486 return 0; 487 } 488 489 /** 490 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 491 * @chip: NAND chip object 492 * @ofs: offset from device start 493 * 494 * This is the default implementation, which can be overridden by a hardware 495 * specific driver. It provides the details for writing a bad block marker to a 496 * block. 497 */ 498 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 499 { 500 struct mtd_info *mtd = nand_to_mtd(chip); 501 struct mtd_oob_ops ops; 502 uint8_t buf[2] = { 0, 0 }; 503 int ret = 0, res, page_offset; 504 505 memset(&ops, 0, sizeof(ops)); 506 ops.oobbuf = buf; 507 ops.ooboffs = chip->badblockpos; 508 if (chip->options & NAND_BUSWIDTH_16) { 509 ops.ooboffs &= ~0x01; 510 ops.len = ops.ooblen = 2; 511 } else { 512 ops.len = ops.ooblen = 1; 513 } 514 ops.mode = MTD_OPS_PLACE_OOB; 515 516 page_offset = nand_bbm_get_next_page(chip, 0); 517 518 while (page_offset >= 0) { 519 res = nand_do_write_oob(chip, 520 ofs + (page_offset * mtd->writesize), 521 &ops); 522 523 if (!ret) 524 ret = res; 525 526 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 527 } 528 529 return ret; 530 } 531 532 /** 533 * nand_markbad_bbm - mark a block by updating the BBM 534 * @chip: NAND chip object 535 * @ofs: offset of the block to mark bad 536 */ 537 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 538 { 539 if (chip->legacy.block_markbad) 540 return chip->legacy.block_markbad(chip, ofs); 541 542 return nand_default_block_markbad(chip, ofs); 543 } 544 545 /** 546 * nand_block_markbad_lowlevel - mark a block bad 547 * @chip: NAND chip object 548 * @ofs: offset from device start 549 * 550 * This function performs the generic NAND bad block marking steps (i.e., bad 551 * block table(s) and/or marker(s)). We only allow the hardware driver to 552 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 553 * 554 * We try operations in the following order: 555 * 556 * (1) erase the affected block, to allow OOB marker to be written cleanly 557 * (2) write bad block marker to OOB area of affected block (unless flag 558 * NAND_BBT_NO_OOB_BBM is present) 559 * (3) update the BBT 560 * 561 * Note that we retain the first error encountered in (2) or (3), finish the 562 * procedures, and dump the error in the end. 563 */ 564 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) 565 { 566 struct mtd_info *mtd = nand_to_mtd(chip); 567 int res, ret = 0; 568 569 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 570 struct erase_info einfo; 571 572 /* Attempt erase before marking OOB */ 573 memset(&einfo, 0, sizeof(einfo)); 574 einfo.addr = ofs; 575 einfo.len = 1ULL << chip->phys_erase_shift; 576 nand_erase_nand(chip, &einfo, 0); 577 578 /* Write bad block marker to OOB */ 579 ret = nand_get_device(chip); 580 if (ret) 581 return ret; 582 583 ret = nand_markbad_bbm(chip, ofs); 584 nand_release_device(chip); 585 } 586 587 /* Mark block bad in BBT */ 588 if (chip->bbt) { 589 res = nand_markbad_bbt(chip, ofs); 590 if (!ret) 591 ret = res; 592 } 593 594 if (!ret) 595 mtd->ecc_stats.badblocks++; 596 597 return ret; 598 } 599 600 /** 601 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 602 * @mtd: MTD device structure 603 * @ofs: offset from device start 604 * 605 * Check if the block is marked as reserved. 606 */ 607 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 608 { 609 struct nand_chip *chip = mtd_to_nand(mtd); 610 611 if (!chip->bbt) 612 return 0; 613 /* Return info from the table */ 614 return nand_isreserved_bbt(chip, ofs); 615 } 616 617 /** 618 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 619 * @chip: NAND chip object 620 * @ofs: offset from device start 621 * @allowbbt: 1, if its allowed to access the bbt area 622 * 623 * Check, if the block is bad. Either by reading the bad block table or 624 * calling of the scan function. 625 */ 626 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) 627 { 628 /* Return info from the table */ 629 if (chip->bbt) 630 return nand_isbad_bbt(chip, ofs, allowbbt); 631 632 return nand_isbad_bbm(chip, ofs); 633 } 634 635 /** 636 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 637 * @chip: NAND chip structure 638 * @timeout_ms: Timeout in ms 639 * 640 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 641 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 642 * returned. 643 * 644 * This helper is intended to be used when the controller does not have access 645 * to the NAND R/B pin. 646 * 647 * Be aware that calling this helper from an ->exec_op() implementation means 648 * ->exec_op() must be re-entrant. 649 * 650 * Return 0 if the NAND chip is ready, a negative error otherwise. 651 */ 652 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 653 { 654 const struct nand_interface_config *conf; 655 u8 status = 0; 656 int ret; 657 658 if (!nand_has_exec_op(chip)) 659 return -ENOTSUPP; 660 661 /* Wait tWB before polling the STATUS reg. */ 662 conf = nand_get_interface_config(chip); 663 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); 664 665 ret = nand_status_op(chip, NULL); 666 if (ret) 667 return ret; 668 669 /* 670 * +1 below is necessary because if we are now in the last fraction 671 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 672 * small jiffy fraction - possibly leading to false timeout 673 */ 674 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 675 do { 676 ret = nand_read_data_op(chip, &status, sizeof(status), true, 677 false); 678 if (ret) 679 break; 680 681 if (status & NAND_STATUS_READY) 682 break; 683 684 /* 685 * Typical lowest execution time for a tR on most NANDs is 10us, 686 * use this as polling delay before doing something smarter (ie. 687 * deriving a delay from the timeout value, timeout_ms/ratio). 688 */ 689 udelay(10); 690 } while (time_before(jiffies, timeout_ms)); 691 692 /* 693 * We have to exit READ_STATUS mode in order to read real data on the 694 * bus in case the WAITRDY instruction is preceding a DATA_IN 695 * instruction. 696 */ 697 nand_exit_status_op(chip); 698 699 if (ret) 700 return ret; 701 702 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 703 }; 704 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 705 706 /** 707 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready 708 * @chip: NAND chip structure 709 * @gpiod: GPIO descriptor of R/B pin 710 * @timeout_ms: Timeout in ms 711 * 712 * Poll the R/B GPIO pin until it becomes ready. If that does not happen 713 * whitin the specified timeout, -ETIMEDOUT is returned. 714 * 715 * This helper is intended to be used when the controller has access to the 716 * NAND R/B pin over GPIO. 717 * 718 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. 719 */ 720 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, 721 unsigned long timeout_ms) 722 { 723 724 /* 725 * Wait until R/B pin indicates chip is ready or timeout occurs. 726 * +1 below is necessary because if we are now in the last fraction 727 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 728 * small jiffy fraction - possibly leading to false timeout. 729 */ 730 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 731 do { 732 if (gpiod_get_value_cansleep(gpiod)) 733 return 0; 734 735 cond_resched(); 736 } while (time_before(jiffies, timeout_ms)); 737 738 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; 739 }; 740 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); 741 742 /** 743 * panic_nand_wait - [GENERIC] wait until the command is done 744 * @chip: NAND chip structure 745 * @timeo: timeout 746 * 747 * Wait for command done. This is a helper function for nand_wait used when 748 * we are in interrupt context. May happen when in panic and trying to write 749 * an oops through mtdoops. 750 */ 751 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 752 { 753 int i; 754 for (i = 0; i < timeo; i++) { 755 if (chip->legacy.dev_ready) { 756 if (chip->legacy.dev_ready(chip)) 757 break; 758 } else { 759 int ret; 760 u8 status; 761 762 ret = nand_read_data_op(chip, &status, sizeof(status), 763 true, false); 764 if (ret) 765 return; 766 767 if (status & NAND_STATUS_READY) 768 break; 769 } 770 mdelay(1); 771 } 772 } 773 774 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 775 { 776 return (chip->parameters.supports_set_get_features && 777 test_bit(addr, chip->parameters.get_feature_list)); 778 } 779 780 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 781 { 782 return (chip->parameters.supports_set_get_features && 783 test_bit(addr, chip->parameters.set_feature_list)); 784 } 785 786 /** 787 * nand_reset_interface - Reset data interface and timings 788 * @chip: The NAND chip 789 * @chipnr: Internal die id 790 * 791 * Reset the Data interface and timings to ONFI mode 0. 792 * 793 * Returns 0 for success or negative error code otherwise. 794 */ 795 static int nand_reset_interface(struct nand_chip *chip, int chipnr) 796 { 797 const struct nand_controller_ops *ops = chip->controller->ops; 798 int ret; 799 800 if (!nand_controller_can_setup_interface(chip)) 801 return 0; 802 803 /* 804 * The ONFI specification says: 805 * " 806 * To transition from NV-DDR or NV-DDR2 to the SDR data 807 * interface, the host shall use the Reset (FFh) command 808 * using SDR timing mode 0. A device in any timing mode is 809 * required to recognize Reset (FFh) command issued in SDR 810 * timing mode 0. 811 * " 812 * 813 * Configure the data interface in SDR mode and set the 814 * timings to timing mode 0. 815 */ 816 817 chip->current_interface_config = nand_get_reset_interface_config(); 818 ret = ops->setup_interface(chip, chipnr, 819 chip->current_interface_config); 820 if (ret) 821 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 822 823 return ret; 824 } 825 826 /** 827 * nand_setup_interface - Setup the best data interface and timings 828 * @chip: The NAND chip 829 * @chipnr: Internal die id 830 * 831 * Configure what has been reported to be the best data interface and NAND 832 * timings supported by the chip and the driver. 833 * 834 * Returns 0 for success or negative error code otherwise. 835 */ 836 static int nand_setup_interface(struct nand_chip *chip, int chipnr) 837 { 838 const struct nand_controller_ops *ops = chip->controller->ops; 839 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; 840 int ret; 841 842 if (!nand_controller_can_setup_interface(chip)) 843 return 0; 844 845 /* 846 * A nand_reset_interface() put both the NAND chip and the NAND 847 * controller in timings mode 0. If the default mode for this chip is 848 * also 0, no need to proceed to the change again. Plus, at probe time, 849 * nand_setup_interface() uses ->set/get_features() which would 850 * fail anyway as the parameter page is not available yet. 851 */ 852 if (!chip->best_interface_config) 853 return 0; 854 855 request = chip->best_interface_config->timings.mode; 856 if (nand_interface_is_sdr(chip->best_interface_config)) 857 request |= ONFI_DATA_INTERFACE_SDR; 858 else 859 request |= ONFI_DATA_INTERFACE_NVDDR; 860 tmode_param[0] = request; 861 862 /* Change the mode on the chip side (if supported by the NAND chip) */ 863 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 864 nand_select_target(chip, chipnr); 865 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 866 tmode_param); 867 nand_deselect_target(chip); 868 if (ret) 869 return ret; 870 } 871 872 /* Change the mode on the controller side */ 873 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); 874 if (ret) 875 return ret; 876 877 /* Check the mode has been accepted by the chip, if supported */ 878 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 879 goto update_interface_config; 880 881 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 882 nand_select_target(chip, chipnr); 883 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 884 tmode_param); 885 nand_deselect_target(chip); 886 if (ret) 887 goto err_reset_chip; 888 889 if (request != tmode_param[0]) { 890 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", 891 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", 892 chip->best_interface_config->timings.mode); 893 pr_debug("NAND chip would work in %s timing mode %d\n", 894 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", 895 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); 896 goto err_reset_chip; 897 } 898 899 update_interface_config: 900 chip->current_interface_config = chip->best_interface_config; 901 902 return 0; 903 904 err_reset_chip: 905 /* 906 * Fallback to mode 0 if the chip explicitly did not ack the chosen 907 * timing mode. 908 */ 909 nand_reset_interface(chip, chipnr); 910 nand_select_target(chip, chipnr); 911 nand_reset_op(chip); 912 nand_deselect_target(chip); 913 914 return ret; 915 } 916 917 /** 918 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the 919 * NAND controller and the NAND chip support 920 * @chip: the NAND chip 921 * @iface: the interface configuration (can eventually be updated) 922 * @spec_timings: specific timings, when not fitting the ONFI specification 923 * 924 * If specific timings are provided, use them. Otherwise, retrieve supported 925 * timing modes from ONFI information. 926 */ 927 int nand_choose_best_sdr_timings(struct nand_chip *chip, 928 struct nand_interface_config *iface, 929 struct nand_sdr_timings *spec_timings) 930 { 931 const struct nand_controller_ops *ops = chip->controller->ops; 932 int best_mode = 0, mode, ret = -EOPNOTSUPP; 933 934 iface->type = NAND_SDR_IFACE; 935 936 if (spec_timings) { 937 iface->timings.sdr = *spec_timings; 938 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); 939 940 /* Verify the controller supports the requested interface */ 941 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 942 iface); 943 if (!ret) { 944 chip->best_interface_config = iface; 945 return ret; 946 } 947 948 /* Fallback to slower modes */ 949 best_mode = iface->timings.mode; 950 } else if (chip->parameters.onfi) { 951 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; 952 } 953 954 for (mode = best_mode; mode >= 0; mode--) { 955 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); 956 957 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 958 iface); 959 if (!ret) { 960 chip->best_interface_config = iface; 961 break; 962 } 963 } 964 965 return ret; 966 } 967 968 /** 969 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the 970 * NAND controller and the NAND chip support 971 * @chip: the NAND chip 972 * @iface: the interface configuration (can eventually be updated) 973 * @spec_timings: specific timings, when not fitting the ONFI specification 974 * 975 * If specific timings are provided, use them. Otherwise, retrieve supported 976 * timing modes from ONFI information. 977 */ 978 int nand_choose_best_nvddr_timings(struct nand_chip *chip, 979 struct nand_interface_config *iface, 980 struct nand_nvddr_timings *spec_timings) 981 { 982 const struct nand_controller_ops *ops = chip->controller->ops; 983 int best_mode = 0, mode, ret = -EOPNOTSUPP; 984 985 iface->type = NAND_NVDDR_IFACE; 986 987 if (spec_timings) { 988 iface->timings.nvddr = *spec_timings; 989 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); 990 991 /* Verify the controller supports the requested interface */ 992 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 993 iface); 994 if (!ret) { 995 chip->best_interface_config = iface; 996 return ret; 997 } 998 999 /* Fallback to slower modes */ 1000 best_mode = iface->timings.mode; 1001 } else if (chip->parameters.onfi) { 1002 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; 1003 } 1004 1005 for (mode = best_mode; mode >= 0; mode--) { 1006 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); 1007 1008 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 1009 iface); 1010 if (!ret) { 1011 chip->best_interface_config = iface; 1012 break; 1013 } 1014 } 1015 1016 return ret; 1017 } 1018 1019 /** 1020 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both 1021 * NAND controller and the NAND chip support 1022 * @chip: the NAND chip 1023 * @iface: the interface configuration (can eventually be updated) 1024 * 1025 * If specific timings are provided, use them. Otherwise, retrieve supported 1026 * timing modes from ONFI information. 1027 */ 1028 static int nand_choose_best_timings(struct nand_chip *chip, 1029 struct nand_interface_config *iface) 1030 { 1031 int ret; 1032 1033 /* Try the fastest timings: NV-DDR */ 1034 ret = nand_choose_best_nvddr_timings(chip, iface, NULL); 1035 if (!ret) 1036 return 0; 1037 1038 /* Fallback to SDR timings otherwise */ 1039 return nand_choose_best_sdr_timings(chip, iface, NULL); 1040 } 1041 1042 /** 1043 * nand_choose_interface_config - find the best data interface and timings 1044 * @chip: The NAND chip 1045 * 1046 * Find the best data interface and NAND timings supported by the chip 1047 * and the driver. Eventually let the NAND manufacturer driver propose his own 1048 * set of timings. 1049 * 1050 * After this function nand_chip->interface_config is initialized with the best 1051 * timing mode available. 1052 * 1053 * Returns 0 for success or negative error code otherwise. 1054 */ 1055 static int nand_choose_interface_config(struct nand_chip *chip) 1056 { 1057 struct nand_interface_config *iface; 1058 int ret; 1059 1060 if (!nand_controller_can_setup_interface(chip)) 1061 return 0; 1062 1063 iface = kzalloc(sizeof(*iface), GFP_KERNEL); 1064 if (!iface) 1065 return -ENOMEM; 1066 1067 if (chip->ops.choose_interface_config) 1068 ret = chip->ops.choose_interface_config(chip, iface); 1069 else 1070 ret = nand_choose_best_timings(chip, iface); 1071 1072 if (ret) 1073 kfree(iface); 1074 1075 return ret; 1076 } 1077 1078 /** 1079 * nand_fill_column_cycles - fill the column cycles of an address 1080 * @chip: The NAND chip 1081 * @addrs: Array of address cycles to fill 1082 * @offset_in_page: The offset in the page 1083 * 1084 * Fills the first or the first two bytes of the @addrs field depending 1085 * on the NAND bus width and the page size. 1086 * 1087 * Returns the number of cycles needed to encode the column, or a negative 1088 * error code in case one of the arguments is invalid. 1089 */ 1090 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 1091 unsigned int offset_in_page) 1092 { 1093 struct mtd_info *mtd = nand_to_mtd(chip); 1094 1095 /* Make sure the offset is less than the actual page size. */ 1096 if (offset_in_page > mtd->writesize + mtd->oobsize) 1097 return -EINVAL; 1098 1099 /* 1100 * On small page NANDs, there's a dedicated command to access the OOB 1101 * area, and the column address is relative to the start of the OOB 1102 * area, not the start of the page. Asjust the address accordingly. 1103 */ 1104 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 1105 offset_in_page -= mtd->writesize; 1106 1107 /* 1108 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 1109 * wide, then it must be divided by 2. 1110 */ 1111 if (chip->options & NAND_BUSWIDTH_16) { 1112 if (WARN_ON(offset_in_page % 2)) 1113 return -EINVAL; 1114 1115 offset_in_page /= 2; 1116 } 1117 1118 addrs[0] = offset_in_page; 1119 1120 /* 1121 * Small page NANDs use 1 cycle for the columns, while large page NANDs 1122 * need 2 1123 */ 1124 if (mtd->writesize <= 512) 1125 return 1; 1126 1127 addrs[1] = offset_in_page >> 8; 1128 1129 return 2; 1130 } 1131 1132 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1133 unsigned int offset_in_page, void *buf, 1134 unsigned int len) 1135 { 1136 const struct nand_interface_config *conf = 1137 nand_get_interface_config(chip); 1138 struct mtd_info *mtd = nand_to_mtd(chip); 1139 u8 addrs[4]; 1140 struct nand_op_instr instrs[] = { 1141 NAND_OP_CMD(NAND_CMD_READ0, 0), 1142 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1143 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1144 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1145 NAND_OP_DATA_IN(len, buf, 0), 1146 }; 1147 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1148 int ret; 1149 1150 /* Drop the DATA_IN instruction if len is set to 0. */ 1151 if (!len) 1152 op.ninstrs--; 1153 1154 if (offset_in_page >= mtd->writesize) 1155 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1156 else if (offset_in_page >= 256 && 1157 !(chip->options & NAND_BUSWIDTH_16)) 1158 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1159 1160 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1161 if (ret < 0) 1162 return ret; 1163 1164 addrs[1] = page; 1165 addrs[2] = page >> 8; 1166 1167 if (chip->options & NAND_ROW_ADDR_3) { 1168 addrs[3] = page >> 16; 1169 instrs[1].ctx.addr.naddrs++; 1170 } 1171 1172 return nand_exec_op(chip, &op); 1173 } 1174 1175 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1176 unsigned int offset_in_page, void *buf, 1177 unsigned int len) 1178 { 1179 const struct nand_interface_config *conf = 1180 nand_get_interface_config(chip); 1181 u8 addrs[5]; 1182 struct nand_op_instr instrs[] = { 1183 NAND_OP_CMD(NAND_CMD_READ0, 0), 1184 NAND_OP_ADDR(4, addrs, 0), 1185 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1186 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1187 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1188 NAND_OP_DATA_IN(len, buf, 0), 1189 }; 1190 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1191 int ret; 1192 1193 /* Drop the DATA_IN instruction if len is set to 0. */ 1194 if (!len) 1195 op.ninstrs--; 1196 1197 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1198 if (ret < 0) 1199 return ret; 1200 1201 addrs[2] = page; 1202 addrs[3] = page >> 8; 1203 1204 if (chip->options & NAND_ROW_ADDR_3) { 1205 addrs[4] = page >> 16; 1206 instrs[1].ctx.addr.naddrs++; 1207 } 1208 1209 return nand_exec_op(chip, &op); 1210 } 1211 1212 /** 1213 * nand_read_page_op - Do a READ PAGE operation 1214 * @chip: The NAND chip 1215 * @page: page to read 1216 * @offset_in_page: offset within the page 1217 * @buf: buffer used to store the data 1218 * @len: length of the buffer 1219 * 1220 * This function issues a READ PAGE operation. 1221 * This function does not select/unselect the CS line. 1222 * 1223 * Returns 0 on success, a negative error code otherwise. 1224 */ 1225 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1226 unsigned int offset_in_page, void *buf, unsigned int len) 1227 { 1228 struct mtd_info *mtd = nand_to_mtd(chip); 1229 1230 if (len && !buf) 1231 return -EINVAL; 1232 1233 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1234 return -EINVAL; 1235 1236 if (nand_has_exec_op(chip)) { 1237 if (mtd->writesize > 512) 1238 return nand_lp_exec_read_page_op(chip, page, 1239 offset_in_page, buf, 1240 len); 1241 1242 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 1243 buf, len); 1244 } 1245 1246 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 1247 if (len) 1248 chip->legacy.read_buf(chip, buf, len); 1249 1250 return 0; 1251 } 1252 EXPORT_SYMBOL_GPL(nand_read_page_op); 1253 1254 /** 1255 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 1256 * @chip: The NAND chip 1257 * @page: parameter page to read 1258 * @buf: buffer used to store the data 1259 * @len: length of the buffer 1260 * 1261 * This function issues a READ PARAMETER PAGE operation. 1262 * This function does not select/unselect the CS line. 1263 * 1264 * Returns 0 on success, a negative error code otherwise. 1265 */ 1266 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 1267 unsigned int len) 1268 { 1269 unsigned int i; 1270 u8 *p = buf; 1271 1272 if (len && !buf) 1273 return -EINVAL; 1274 1275 if (nand_has_exec_op(chip)) { 1276 const struct nand_interface_config *conf = 1277 nand_get_interface_config(chip); 1278 struct nand_op_instr instrs[] = { 1279 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1280 NAND_OP_ADDR(1, &page, 1281 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1282 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1283 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1284 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1285 }; 1286 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1287 1288 /* Drop the DATA_IN instruction if len is set to 0. */ 1289 if (!len) 1290 op.ninstrs--; 1291 1292 return nand_exec_op(chip, &op); 1293 } 1294 1295 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1296 for (i = 0; i < len; i++) 1297 p[i] = chip->legacy.read_byte(chip); 1298 1299 return 0; 1300 } 1301 1302 /** 1303 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1304 * @chip: The NAND chip 1305 * @offset_in_page: offset within the page 1306 * @buf: buffer used to store the data 1307 * @len: length of the buffer 1308 * @force_8bit: force 8-bit bus access 1309 * 1310 * This function issues a CHANGE READ COLUMN operation. 1311 * This function does not select/unselect the CS line. 1312 * 1313 * Returns 0 on success, a negative error code otherwise. 1314 */ 1315 int nand_change_read_column_op(struct nand_chip *chip, 1316 unsigned int offset_in_page, void *buf, 1317 unsigned int len, bool force_8bit) 1318 { 1319 struct mtd_info *mtd = nand_to_mtd(chip); 1320 1321 if (len && !buf) 1322 return -EINVAL; 1323 1324 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1325 return -EINVAL; 1326 1327 /* Small page NANDs do not support column change. */ 1328 if (mtd->writesize <= 512) 1329 return -ENOTSUPP; 1330 1331 if (nand_has_exec_op(chip)) { 1332 const struct nand_interface_config *conf = 1333 nand_get_interface_config(chip); 1334 u8 addrs[2] = {}; 1335 struct nand_op_instr instrs[] = { 1336 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1337 NAND_OP_ADDR(2, addrs, 0), 1338 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1339 NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1340 NAND_OP_DATA_IN(len, buf, 0), 1341 }; 1342 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1343 int ret; 1344 1345 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1346 if (ret < 0) 1347 return ret; 1348 1349 /* Drop the DATA_IN instruction if len is set to 0. */ 1350 if (!len) 1351 op.ninstrs--; 1352 1353 instrs[3].ctx.data.force_8bit = force_8bit; 1354 1355 return nand_exec_op(chip, &op); 1356 } 1357 1358 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1359 if (len) 1360 chip->legacy.read_buf(chip, buf, len); 1361 1362 return 0; 1363 } 1364 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1365 1366 /** 1367 * nand_read_oob_op - Do a READ OOB operation 1368 * @chip: The NAND chip 1369 * @page: page to read 1370 * @offset_in_oob: offset within the OOB area 1371 * @buf: buffer used to store the data 1372 * @len: length of the buffer 1373 * 1374 * This function issues a READ OOB operation. 1375 * This function does not select/unselect the CS line. 1376 * 1377 * Returns 0 on success, a negative error code otherwise. 1378 */ 1379 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1380 unsigned int offset_in_oob, void *buf, unsigned int len) 1381 { 1382 struct mtd_info *mtd = nand_to_mtd(chip); 1383 1384 if (len && !buf) 1385 return -EINVAL; 1386 1387 if (offset_in_oob + len > mtd->oobsize) 1388 return -EINVAL; 1389 1390 if (nand_has_exec_op(chip)) 1391 return nand_read_page_op(chip, page, 1392 mtd->writesize + offset_in_oob, 1393 buf, len); 1394 1395 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1396 if (len) 1397 chip->legacy.read_buf(chip, buf, len); 1398 1399 return 0; 1400 } 1401 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1402 1403 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1404 unsigned int offset_in_page, const void *buf, 1405 unsigned int len, bool prog) 1406 { 1407 const struct nand_interface_config *conf = 1408 nand_get_interface_config(chip); 1409 struct mtd_info *mtd = nand_to_mtd(chip); 1410 u8 addrs[5] = {}; 1411 struct nand_op_instr instrs[] = { 1412 /* 1413 * The first instruction will be dropped if we're dealing 1414 * with a large page NAND and adjusted if we're dealing 1415 * with a small page NAND and the page offset is > 255. 1416 */ 1417 NAND_OP_CMD(NAND_CMD_READ0, 0), 1418 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1419 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), 1420 NAND_OP_DATA_OUT(len, buf, 0), 1421 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1422 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1423 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), 1424 }; 1425 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1426 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1427 1428 if (naddrs < 0) 1429 return naddrs; 1430 1431 addrs[naddrs++] = page; 1432 addrs[naddrs++] = page >> 8; 1433 if (chip->options & NAND_ROW_ADDR_3) 1434 addrs[naddrs++] = page >> 16; 1435 1436 instrs[2].ctx.addr.naddrs = naddrs; 1437 1438 /* Drop the last two instructions if we're not programming the page. */ 1439 if (!prog) { 1440 op.ninstrs -= 2; 1441 /* Also drop the DATA_OUT instruction if empty. */ 1442 if (!len) 1443 op.ninstrs--; 1444 } 1445 1446 if (mtd->writesize <= 512) { 1447 /* 1448 * Small pages need some more tweaking: we have to adjust the 1449 * first instruction depending on the page offset we're trying 1450 * to access. 1451 */ 1452 if (offset_in_page >= mtd->writesize) 1453 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1454 else if (offset_in_page >= 256 && 1455 !(chip->options & NAND_BUSWIDTH_16)) 1456 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1457 } else { 1458 /* 1459 * Drop the first command if we're dealing with a large page 1460 * NAND. 1461 */ 1462 op.instrs++; 1463 op.ninstrs--; 1464 } 1465 1466 return nand_exec_op(chip, &op); 1467 } 1468 1469 /** 1470 * nand_prog_page_begin_op - starts a PROG PAGE operation 1471 * @chip: The NAND chip 1472 * @page: page to write 1473 * @offset_in_page: offset within the page 1474 * @buf: buffer containing the data to write to the page 1475 * @len: length of the buffer 1476 * 1477 * This function issues the first half of a PROG PAGE operation. 1478 * This function does not select/unselect the CS line. 1479 * 1480 * Returns 0 on success, a negative error code otherwise. 1481 */ 1482 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1483 unsigned int offset_in_page, const void *buf, 1484 unsigned int len) 1485 { 1486 struct mtd_info *mtd = nand_to_mtd(chip); 1487 1488 if (len && !buf) 1489 return -EINVAL; 1490 1491 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1492 return -EINVAL; 1493 1494 if (nand_has_exec_op(chip)) 1495 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1496 len, false); 1497 1498 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1499 1500 if (buf) 1501 chip->legacy.write_buf(chip, buf, len); 1502 1503 return 0; 1504 } 1505 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1506 1507 /** 1508 * nand_prog_page_end_op - ends a PROG PAGE operation 1509 * @chip: The NAND chip 1510 * 1511 * This function issues the second half of a PROG PAGE operation. 1512 * This function does not select/unselect the CS line. 1513 * 1514 * Returns 0 on success, a negative error code otherwise. 1515 */ 1516 int nand_prog_page_end_op(struct nand_chip *chip) 1517 { 1518 int ret; 1519 u8 status; 1520 1521 if (nand_has_exec_op(chip)) { 1522 const struct nand_interface_config *conf = 1523 nand_get_interface_config(chip); 1524 struct nand_op_instr instrs[] = { 1525 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1526 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1527 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 1528 0), 1529 }; 1530 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1531 1532 ret = nand_exec_op(chip, &op); 1533 if (ret) 1534 return ret; 1535 1536 ret = nand_status_op(chip, &status); 1537 if (ret) 1538 return ret; 1539 } else { 1540 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1541 ret = chip->legacy.waitfunc(chip); 1542 if (ret < 0) 1543 return ret; 1544 1545 status = ret; 1546 } 1547 1548 if (status & NAND_STATUS_FAIL) 1549 return -EIO; 1550 1551 return 0; 1552 } 1553 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1554 1555 /** 1556 * nand_prog_page_op - Do a full PROG PAGE operation 1557 * @chip: The NAND chip 1558 * @page: page to write 1559 * @offset_in_page: offset within the page 1560 * @buf: buffer containing the data to write to the page 1561 * @len: length of the buffer 1562 * 1563 * This function issues a full PROG PAGE operation. 1564 * This function does not select/unselect the CS line. 1565 * 1566 * Returns 0 on success, a negative error code otherwise. 1567 */ 1568 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1569 unsigned int offset_in_page, const void *buf, 1570 unsigned int len) 1571 { 1572 struct mtd_info *mtd = nand_to_mtd(chip); 1573 u8 status; 1574 int ret; 1575 1576 if (!len || !buf) 1577 return -EINVAL; 1578 1579 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1580 return -EINVAL; 1581 1582 if (nand_has_exec_op(chip)) { 1583 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1584 len, true); 1585 if (ret) 1586 return ret; 1587 1588 ret = nand_status_op(chip, &status); 1589 if (ret) 1590 return ret; 1591 } else { 1592 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1593 page); 1594 chip->legacy.write_buf(chip, buf, len); 1595 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1596 ret = chip->legacy.waitfunc(chip); 1597 if (ret < 0) 1598 return ret; 1599 1600 status = ret; 1601 } 1602 1603 if (status & NAND_STATUS_FAIL) 1604 return -EIO; 1605 1606 return 0; 1607 } 1608 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1609 1610 /** 1611 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1612 * @chip: The NAND chip 1613 * @offset_in_page: offset within the page 1614 * @buf: buffer containing the data to send to the NAND 1615 * @len: length of the buffer 1616 * @force_8bit: force 8-bit bus access 1617 * 1618 * This function issues a CHANGE WRITE COLUMN operation. 1619 * This function does not select/unselect the CS line. 1620 * 1621 * Returns 0 on success, a negative error code otherwise. 1622 */ 1623 int nand_change_write_column_op(struct nand_chip *chip, 1624 unsigned int offset_in_page, 1625 const void *buf, unsigned int len, 1626 bool force_8bit) 1627 { 1628 struct mtd_info *mtd = nand_to_mtd(chip); 1629 1630 if (len && !buf) 1631 return -EINVAL; 1632 1633 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1634 return -EINVAL; 1635 1636 /* Small page NANDs do not support column change. */ 1637 if (mtd->writesize <= 512) 1638 return -ENOTSUPP; 1639 1640 if (nand_has_exec_op(chip)) { 1641 const struct nand_interface_config *conf = 1642 nand_get_interface_config(chip); 1643 u8 addrs[2]; 1644 struct nand_op_instr instrs[] = { 1645 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1646 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1647 NAND_OP_DATA_OUT(len, buf, 0), 1648 }; 1649 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1650 int ret; 1651 1652 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1653 if (ret < 0) 1654 return ret; 1655 1656 instrs[2].ctx.data.force_8bit = force_8bit; 1657 1658 /* Drop the DATA_OUT instruction if len is set to 0. */ 1659 if (!len) 1660 op.ninstrs--; 1661 1662 return nand_exec_op(chip, &op); 1663 } 1664 1665 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1666 if (len) 1667 chip->legacy.write_buf(chip, buf, len); 1668 1669 return 0; 1670 } 1671 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1672 1673 /** 1674 * nand_readid_op - Do a READID operation 1675 * @chip: The NAND chip 1676 * @addr: address cycle to pass after the READID command 1677 * @buf: buffer used to store the ID 1678 * @len: length of the buffer 1679 * 1680 * This function sends a READID command and reads back the ID returned by the 1681 * NAND. 1682 * This function does not select/unselect the CS line. 1683 * 1684 * Returns 0 on success, a negative error code otherwise. 1685 */ 1686 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1687 unsigned int len) 1688 { 1689 unsigned int i; 1690 u8 *id = buf, *ddrbuf = NULL; 1691 1692 if (len && !buf) 1693 return -EINVAL; 1694 1695 if (nand_has_exec_op(chip)) { 1696 const struct nand_interface_config *conf = 1697 nand_get_interface_config(chip); 1698 struct nand_op_instr instrs[] = { 1699 NAND_OP_CMD(NAND_CMD_READID, 0), 1700 NAND_OP_ADDR(1, &addr, 1701 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1702 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1703 }; 1704 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1705 int ret; 1706 1707 /* READ_ID data bytes are received twice in NV-DDR mode */ 1708 if (len && nand_interface_is_nvddr(conf)) { 1709 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 1710 if (!ddrbuf) 1711 return -ENOMEM; 1712 1713 instrs[2].ctx.data.len *= 2; 1714 instrs[2].ctx.data.buf.in = ddrbuf; 1715 } 1716 1717 /* Drop the DATA_IN instruction if len is set to 0. */ 1718 if (!len) 1719 op.ninstrs--; 1720 1721 ret = nand_exec_op(chip, &op); 1722 if (!ret && len && nand_interface_is_nvddr(conf)) { 1723 for (i = 0; i < len; i++) 1724 id[i] = ddrbuf[i * 2]; 1725 } 1726 1727 kfree(ddrbuf); 1728 1729 return ret; 1730 } 1731 1732 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1733 1734 for (i = 0; i < len; i++) 1735 id[i] = chip->legacy.read_byte(chip); 1736 1737 return 0; 1738 } 1739 EXPORT_SYMBOL_GPL(nand_readid_op); 1740 1741 /** 1742 * nand_status_op - Do a STATUS operation 1743 * @chip: The NAND chip 1744 * @status: out variable to store the NAND status 1745 * 1746 * This function sends a STATUS command and reads back the status returned by 1747 * the NAND. 1748 * This function does not select/unselect the CS line. 1749 * 1750 * Returns 0 on success, a negative error code otherwise. 1751 */ 1752 int nand_status_op(struct nand_chip *chip, u8 *status) 1753 { 1754 if (nand_has_exec_op(chip)) { 1755 const struct nand_interface_config *conf = 1756 nand_get_interface_config(chip); 1757 u8 ddrstatus[2]; 1758 struct nand_op_instr instrs[] = { 1759 NAND_OP_CMD(NAND_CMD_STATUS, 1760 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1761 NAND_OP_8BIT_DATA_IN(1, status, 0), 1762 }; 1763 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1764 int ret; 1765 1766 /* The status data byte will be received twice in NV-DDR mode */ 1767 if (status && nand_interface_is_nvddr(conf)) { 1768 instrs[1].ctx.data.len *= 2; 1769 instrs[1].ctx.data.buf.in = ddrstatus; 1770 } 1771 1772 if (!status) 1773 op.ninstrs--; 1774 1775 ret = nand_exec_op(chip, &op); 1776 if (!ret && status && nand_interface_is_nvddr(conf)) 1777 *status = ddrstatus[0]; 1778 1779 return ret; 1780 } 1781 1782 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1783 if (status) 1784 *status = chip->legacy.read_byte(chip); 1785 1786 return 0; 1787 } 1788 EXPORT_SYMBOL_GPL(nand_status_op); 1789 1790 /** 1791 * nand_exit_status_op - Exit a STATUS operation 1792 * @chip: The NAND chip 1793 * 1794 * This function sends a READ0 command to cancel the effect of the STATUS 1795 * command to avoid reading only the status until a new read command is sent. 1796 * 1797 * This function does not select/unselect the CS line. 1798 * 1799 * Returns 0 on success, a negative error code otherwise. 1800 */ 1801 int nand_exit_status_op(struct nand_chip *chip) 1802 { 1803 if (nand_has_exec_op(chip)) { 1804 struct nand_op_instr instrs[] = { 1805 NAND_OP_CMD(NAND_CMD_READ0, 0), 1806 }; 1807 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1808 1809 return nand_exec_op(chip, &op); 1810 } 1811 1812 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1813 1814 return 0; 1815 } 1816 1817 /** 1818 * nand_erase_op - Do an erase operation 1819 * @chip: The NAND chip 1820 * @eraseblock: block to erase 1821 * 1822 * This function sends an ERASE command and waits for the NAND to be ready 1823 * before returning. 1824 * This function does not select/unselect the CS line. 1825 * 1826 * Returns 0 on success, a negative error code otherwise. 1827 */ 1828 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1829 { 1830 unsigned int page = eraseblock << 1831 (chip->phys_erase_shift - chip->page_shift); 1832 int ret; 1833 u8 status; 1834 1835 if (nand_has_exec_op(chip)) { 1836 const struct nand_interface_config *conf = 1837 nand_get_interface_config(chip); 1838 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1839 struct nand_op_instr instrs[] = { 1840 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1841 NAND_OP_ADDR(2, addrs, 0), 1842 NAND_OP_CMD(NAND_CMD_ERASE2, 1843 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1844 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 1845 0), 1846 }; 1847 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1848 1849 if (chip->options & NAND_ROW_ADDR_3) 1850 instrs[1].ctx.addr.naddrs++; 1851 1852 ret = nand_exec_op(chip, &op); 1853 if (ret) 1854 return ret; 1855 1856 ret = nand_status_op(chip, &status); 1857 if (ret) 1858 return ret; 1859 } else { 1860 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1861 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1862 1863 ret = chip->legacy.waitfunc(chip); 1864 if (ret < 0) 1865 return ret; 1866 1867 status = ret; 1868 } 1869 1870 if (status & NAND_STATUS_FAIL) 1871 return -EIO; 1872 1873 return 0; 1874 } 1875 EXPORT_SYMBOL_GPL(nand_erase_op); 1876 1877 /** 1878 * nand_set_features_op - Do a SET FEATURES operation 1879 * @chip: The NAND chip 1880 * @feature: feature id 1881 * @data: 4 bytes of data 1882 * 1883 * This function sends a SET FEATURES command and waits for the NAND to be 1884 * ready before returning. 1885 * This function does not select/unselect the CS line. 1886 * 1887 * Returns 0 on success, a negative error code otherwise. 1888 */ 1889 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 1890 const void *data) 1891 { 1892 const u8 *params = data; 1893 int i, ret; 1894 1895 if (nand_has_exec_op(chip)) { 1896 const struct nand_interface_config *conf = 1897 nand_get_interface_config(chip); 1898 struct nand_op_instr instrs[] = { 1899 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 1900 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, 1901 tADL_min)), 1902 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 1903 NAND_COMMON_TIMING_NS(conf, 1904 tWB_max)), 1905 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 1906 0), 1907 }; 1908 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1909 1910 return nand_exec_op(chip, &op); 1911 } 1912 1913 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 1914 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1915 chip->legacy.write_byte(chip, params[i]); 1916 1917 ret = chip->legacy.waitfunc(chip); 1918 if (ret < 0) 1919 return ret; 1920 1921 if (ret & NAND_STATUS_FAIL) 1922 return -EIO; 1923 1924 return 0; 1925 } 1926 1927 /** 1928 * nand_get_features_op - Do a GET FEATURES operation 1929 * @chip: The NAND chip 1930 * @feature: feature id 1931 * @data: 4 bytes of data 1932 * 1933 * This function sends a GET FEATURES command and waits for the NAND to be 1934 * ready before returning. 1935 * This function does not select/unselect the CS line. 1936 * 1937 * Returns 0 on success, a negative error code otherwise. 1938 */ 1939 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 1940 void *data) 1941 { 1942 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; 1943 int i; 1944 1945 if (nand_has_exec_op(chip)) { 1946 const struct nand_interface_config *conf = 1947 nand_get_interface_config(chip); 1948 struct nand_op_instr instrs[] = { 1949 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 1950 NAND_OP_ADDR(1, &feature, 1951 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1952 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 1953 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1954 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 1955 data, 0), 1956 }; 1957 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1958 int ret; 1959 1960 /* GET_FEATURE data bytes are received twice in NV-DDR mode */ 1961 if (nand_interface_is_nvddr(conf)) { 1962 instrs[3].ctx.data.len *= 2; 1963 instrs[3].ctx.data.buf.in = ddrbuf; 1964 } 1965 1966 ret = nand_exec_op(chip, &op); 1967 if (nand_interface_is_nvddr(conf)) { 1968 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) 1969 params[i] = ddrbuf[i * 2]; 1970 } 1971 1972 return ret; 1973 } 1974 1975 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 1976 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1977 params[i] = chip->legacy.read_byte(chip); 1978 1979 return 0; 1980 } 1981 1982 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 1983 unsigned int delay_ns) 1984 { 1985 if (nand_has_exec_op(chip)) { 1986 struct nand_op_instr instrs[] = { 1987 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 1988 PSEC_TO_NSEC(delay_ns)), 1989 }; 1990 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1991 1992 return nand_exec_op(chip, &op); 1993 } 1994 1995 /* Apply delay or wait for ready/busy pin */ 1996 if (!chip->legacy.dev_ready) 1997 udelay(chip->legacy.chip_delay); 1998 else 1999 nand_wait_ready(chip); 2000 2001 return 0; 2002 } 2003 2004 /** 2005 * nand_reset_op - Do a reset operation 2006 * @chip: The NAND chip 2007 * 2008 * This function sends a RESET command and waits for the NAND to be ready 2009 * before returning. 2010 * This function does not select/unselect the CS line. 2011 * 2012 * Returns 0 on success, a negative error code otherwise. 2013 */ 2014 int nand_reset_op(struct nand_chip *chip) 2015 { 2016 if (nand_has_exec_op(chip)) { 2017 const struct nand_interface_config *conf = 2018 nand_get_interface_config(chip); 2019 struct nand_op_instr instrs[] = { 2020 NAND_OP_CMD(NAND_CMD_RESET, 2021 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2022 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), 2023 0), 2024 }; 2025 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2026 2027 return nand_exec_op(chip, &op); 2028 } 2029 2030 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 2031 2032 return 0; 2033 } 2034 EXPORT_SYMBOL_GPL(nand_reset_op); 2035 2036 /** 2037 * nand_read_data_op - Read data from the NAND 2038 * @chip: The NAND chip 2039 * @buf: buffer used to store the data 2040 * @len: length of the buffer 2041 * @force_8bit: force 8-bit bus access 2042 * @check_only: do not actually run the command, only checks if the 2043 * controller driver supports it 2044 * 2045 * This function does a raw data read on the bus. Usually used after launching 2046 * another NAND operation like nand_read_page_op(). 2047 * This function does not select/unselect the CS line. 2048 * 2049 * Returns 0 on success, a negative error code otherwise. 2050 */ 2051 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 2052 bool force_8bit, bool check_only) 2053 { 2054 if (!len || !buf) 2055 return -EINVAL; 2056 2057 if (nand_has_exec_op(chip)) { 2058 const struct nand_interface_config *conf = 2059 nand_get_interface_config(chip); 2060 struct nand_op_instr instrs[] = { 2061 NAND_OP_DATA_IN(len, buf, 0), 2062 }; 2063 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2064 u8 *ddrbuf = NULL; 2065 int ret, i; 2066 2067 instrs[0].ctx.data.force_8bit = force_8bit; 2068 2069 /* 2070 * Parameter payloads (ID, status, features, etc) do not go 2071 * through the same pipeline as regular data, hence the 2072 * force_8bit flag must be set and this also indicates that in 2073 * case NV-DDR timings are being used the data will be received 2074 * twice. 2075 */ 2076 if (force_8bit && nand_interface_is_nvddr(conf)) { 2077 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 2078 if (!ddrbuf) 2079 return -ENOMEM; 2080 2081 instrs[0].ctx.data.len *= 2; 2082 instrs[0].ctx.data.buf.in = ddrbuf; 2083 } 2084 2085 if (check_only) { 2086 ret = nand_check_op(chip, &op); 2087 kfree(ddrbuf); 2088 return ret; 2089 } 2090 2091 ret = nand_exec_op(chip, &op); 2092 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { 2093 u8 *dst = buf; 2094 2095 for (i = 0; i < len; i++) 2096 dst[i] = ddrbuf[i * 2]; 2097 } 2098 2099 kfree(ddrbuf); 2100 2101 return ret; 2102 } 2103 2104 if (check_only) 2105 return 0; 2106 2107 if (force_8bit) { 2108 u8 *p = buf; 2109 unsigned int i; 2110 2111 for (i = 0; i < len; i++) 2112 p[i] = chip->legacy.read_byte(chip); 2113 } else { 2114 chip->legacy.read_buf(chip, buf, len); 2115 } 2116 2117 return 0; 2118 } 2119 EXPORT_SYMBOL_GPL(nand_read_data_op); 2120 2121 /** 2122 * nand_write_data_op - Write data from the NAND 2123 * @chip: The NAND chip 2124 * @buf: buffer containing the data to send on the bus 2125 * @len: length of the buffer 2126 * @force_8bit: force 8-bit bus access 2127 * 2128 * This function does a raw data write on the bus. Usually used after launching 2129 * another NAND operation like nand_write_page_begin_op(). 2130 * This function does not select/unselect the CS line. 2131 * 2132 * Returns 0 on success, a negative error code otherwise. 2133 */ 2134 int nand_write_data_op(struct nand_chip *chip, const void *buf, 2135 unsigned int len, bool force_8bit) 2136 { 2137 if (!len || !buf) 2138 return -EINVAL; 2139 2140 if (nand_has_exec_op(chip)) { 2141 struct nand_op_instr instrs[] = { 2142 NAND_OP_DATA_OUT(len, buf, 0), 2143 }; 2144 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2145 2146 instrs[0].ctx.data.force_8bit = force_8bit; 2147 2148 return nand_exec_op(chip, &op); 2149 } 2150 2151 if (force_8bit) { 2152 const u8 *p = buf; 2153 unsigned int i; 2154 2155 for (i = 0; i < len; i++) 2156 chip->legacy.write_byte(chip, p[i]); 2157 } else { 2158 chip->legacy.write_buf(chip, buf, len); 2159 } 2160 2161 return 0; 2162 } 2163 EXPORT_SYMBOL_GPL(nand_write_data_op); 2164 2165 /** 2166 * struct nand_op_parser_ctx - Context used by the parser 2167 * @instrs: array of all the instructions that must be addressed 2168 * @ninstrs: length of the @instrs array 2169 * @subop: Sub-operation to be passed to the NAND controller 2170 * 2171 * This structure is used by the core to split NAND operations into 2172 * sub-operations that can be handled by the NAND controller. 2173 */ 2174 struct nand_op_parser_ctx { 2175 const struct nand_op_instr *instrs; 2176 unsigned int ninstrs; 2177 struct nand_subop subop; 2178 }; 2179 2180 /** 2181 * nand_op_parser_must_split_instr - Checks if an instruction must be split 2182 * @pat: the parser pattern element that matches @instr 2183 * @instr: pointer to the instruction to check 2184 * @start_offset: this is an in/out parameter. If @instr has already been 2185 * split, then @start_offset is the offset from which to start 2186 * (either an address cycle or an offset in the data buffer). 2187 * Conversely, if the function returns true (ie. instr must be 2188 * split), this parameter is updated to point to the first 2189 * data/address cycle that has not been taken care of. 2190 * 2191 * Some NAND controllers are limited and cannot send X address cycles with a 2192 * unique operation, or cannot read/write more than Y bytes at the same time. 2193 * In this case, split the instruction that does not fit in a single 2194 * controller-operation into two or more chunks. 2195 * 2196 * Returns true if the instruction must be split, false otherwise. 2197 * The @start_offset parameter is also updated to the offset at which the next 2198 * bundle of instruction must start (if an address or a data instruction). 2199 */ 2200 static bool 2201 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 2202 const struct nand_op_instr *instr, 2203 unsigned int *start_offset) 2204 { 2205 switch (pat->type) { 2206 case NAND_OP_ADDR_INSTR: 2207 if (!pat->ctx.addr.maxcycles) 2208 break; 2209 2210 if (instr->ctx.addr.naddrs - *start_offset > 2211 pat->ctx.addr.maxcycles) { 2212 *start_offset += pat->ctx.addr.maxcycles; 2213 return true; 2214 } 2215 break; 2216 2217 case NAND_OP_DATA_IN_INSTR: 2218 case NAND_OP_DATA_OUT_INSTR: 2219 if (!pat->ctx.data.maxlen) 2220 break; 2221 2222 if (instr->ctx.data.len - *start_offset > 2223 pat->ctx.data.maxlen) { 2224 *start_offset += pat->ctx.data.maxlen; 2225 return true; 2226 } 2227 break; 2228 2229 default: 2230 break; 2231 } 2232 2233 return false; 2234 } 2235 2236 /** 2237 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 2238 * remaining in the parser context 2239 * @pat: the pattern to test 2240 * @ctx: the parser context structure to match with the pattern @pat 2241 * 2242 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 2243 * Returns true if this is the case, false ortherwise. When true is returned, 2244 * @ctx->subop is updated with the set of instructions to be passed to the 2245 * controller driver. 2246 */ 2247 static bool 2248 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 2249 struct nand_op_parser_ctx *ctx) 2250 { 2251 unsigned int instr_offset = ctx->subop.first_instr_start_off; 2252 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 2253 const struct nand_op_instr *instr = ctx->subop.instrs; 2254 unsigned int i, ninstrs; 2255 2256 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 2257 /* 2258 * The pattern instruction does not match the operation 2259 * instruction. If the instruction is marked optional in the 2260 * pattern definition, we skip the pattern element and continue 2261 * to the next one. If the element is mandatory, there's no 2262 * match and we can return false directly. 2263 */ 2264 if (instr->type != pat->elems[i].type) { 2265 if (!pat->elems[i].optional) 2266 return false; 2267 2268 continue; 2269 } 2270 2271 /* 2272 * Now check the pattern element constraints. If the pattern is 2273 * not able to handle the whole instruction in a single step, 2274 * we have to split it. 2275 * The last_instr_end_off value comes back updated to point to 2276 * the position where we have to split the instruction (the 2277 * start of the next subop chunk). 2278 */ 2279 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 2280 &instr_offset)) { 2281 ninstrs++; 2282 i++; 2283 break; 2284 } 2285 2286 instr++; 2287 ninstrs++; 2288 instr_offset = 0; 2289 } 2290 2291 /* 2292 * This can happen if all instructions of a pattern are optional. 2293 * Still, if there's not at least one instruction handled by this 2294 * pattern, this is not a match, and we should try the next one (if 2295 * any). 2296 */ 2297 if (!ninstrs) 2298 return false; 2299 2300 /* 2301 * We had a match on the pattern head, but the pattern may be longer 2302 * than the instructions we're asked to execute. We need to make sure 2303 * there's no mandatory elements in the pattern tail. 2304 */ 2305 for (; i < pat->nelems; i++) { 2306 if (!pat->elems[i].optional) 2307 return false; 2308 } 2309 2310 /* 2311 * We have a match: update the subop structure accordingly and return 2312 * true. 2313 */ 2314 ctx->subop.ninstrs = ninstrs; 2315 ctx->subop.last_instr_end_off = instr_offset; 2316 2317 return true; 2318 } 2319 2320 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 2321 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2322 { 2323 const struct nand_op_instr *instr; 2324 char *prefix = " "; 2325 unsigned int i; 2326 2327 pr_debug("executing subop (CS%d):\n", ctx->subop.cs); 2328 2329 for (i = 0; i < ctx->ninstrs; i++) { 2330 instr = &ctx->instrs[i]; 2331 2332 if (instr == &ctx->subop.instrs[0]) 2333 prefix = " ->"; 2334 2335 nand_op_trace(prefix, instr); 2336 2337 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2338 prefix = " "; 2339 } 2340 } 2341 #else 2342 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2343 { 2344 /* NOP */ 2345 } 2346 #endif 2347 2348 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, 2349 const struct nand_op_parser_ctx *b) 2350 { 2351 if (a->subop.ninstrs < b->subop.ninstrs) 2352 return -1; 2353 else if (a->subop.ninstrs > b->subop.ninstrs) 2354 return 1; 2355 2356 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) 2357 return -1; 2358 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) 2359 return 1; 2360 2361 return 0; 2362 } 2363 2364 /** 2365 * nand_op_parser_exec_op - exec_op parser 2366 * @chip: the NAND chip 2367 * @parser: patterns description provided by the controller driver 2368 * @op: the NAND operation to address 2369 * @check_only: when true, the function only checks if @op can be handled but 2370 * does not execute the operation 2371 * 2372 * Helper function designed to ease integration of NAND controller drivers that 2373 * only support a limited set of instruction sequences. The supported sequences 2374 * are described in @parser, and the framework takes care of splitting @op into 2375 * multiple sub-operations (if required) and pass them back to the ->exec() 2376 * callback of the matching pattern if @check_only is set to false. 2377 * 2378 * NAND controller drivers should call this function from their own ->exec_op() 2379 * implementation. 2380 * 2381 * Returns 0 on success, a negative error code otherwise. A failure can be 2382 * caused by an unsupported operation (none of the supported patterns is able 2383 * to handle the requested operation), or an error returned by one of the 2384 * matching pattern->exec() hook. 2385 */ 2386 int nand_op_parser_exec_op(struct nand_chip *chip, 2387 const struct nand_op_parser *parser, 2388 const struct nand_operation *op, bool check_only) 2389 { 2390 struct nand_op_parser_ctx ctx = { 2391 .subop.cs = op->cs, 2392 .subop.instrs = op->instrs, 2393 .instrs = op->instrs, 2394 .ninstrs = op->ninstrs, 2395 }; 2396 unsigned int i; 2397 2398 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2399 const struct nand_op_parser_pattern *pattern; 2400 struct nand_op_parser_ctx best_ctx; 2401 int ret, best_pattern = -1; 2402 2403 for (i = 0; i < parser->npatterns; i++) { 2404 struct nand_op_parser_ctx test_ctx = ctx; 2405 2406 pattern = &parser->patterns[i]; 2407 if (!nand_op_parser_match_pat(pattern, &test_ctx)) 2408 continue; 2409 2410 if (best_pattern >= 0 && 2411 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) 2412 continue; 2413 2414 best_pattern = i; 2415 best_ctx = test_ctx; 2416 } 2417 2418 if (best_pattern < 0) { 2419 pr_debug("->exec_op() parser: pattern not found!\n"); 2420 return -ENOTSUPP; 2421 } 2422 2423 ctx = best_ctx; 2424 nand_op_parser_trace(&ctx); 2425 2426 if (!check_only) { 2427 pattern = &parser->patterns[best_pattern]; 2428 ret = pattern->exec(chip, &ctx.subop); 2429 if (ret) 2430 return ret; 2431 } 2432 2433 /* 2434 * Update the context structure by pointing to the start of the 2435 * next subop. 2436 */ 2437 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2438 if (ctx.subop.last_instr_end_off) 2439 ctx.subop.instrs -= 1; 2440 2441 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2442 } 2443 2444 return 0; 2445 } 2446 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2447 2448 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2449 { 2450 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2451 instr->type == NAND_OP_DATA_OUT_INSTR); 2452 } 2453 2454 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2455 unsigned int instr_idx) 2456 { 2457 return subop && instr_idx < subop->ninstrs; 2458 } 2459 2460 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2461 unsigned int instr_idx) 2462 { 2463 if (instr_idx) 2464 return 0; 2465 2466 return subop->first_instr_start_off; 2467 } 2468 2469 /** 2470 * nand_subop_get_addr_start_off - Get the start offset in an address array 2471 * @subop: The entire sub-operation 2472 * @instr_idx: Index of the instruction inside the sub-operation 2473 * 2474 * During driver development, one could be tempted to directly use the 2475 * ->addr.addrs field of address instructions. This is wrong as address 2476 * instructions might be split. 2477 * 2478 * Given an address instruction, returns the offset of the first cycle to issue. 2479 */ 2480 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2481 unsigned int instr_idx) 2482 { 2483 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2484 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2485 return 0; 2486 2487 return nand_subop_get_start_off(subop, instr_idx); 2488 } 2489 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2490 2491 /** 2492 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2493 * @subop: The entire sub-operation 2494 * @instr_idx: Index of the instruction inside the sub-operation 2495 * 2496 * During driver development, one could be tempted to directly use the 2497 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2498 * might be split. 2499 * 2500 * Given an address instruction, returns the number of address cycle to issue. 2501 */ 2502 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2503 unsigned int instr_idx) 2504 { 2505 int start_off, end_off; 2506 2507 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2508 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2509 return 0; 2510 2511 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2512 2513 if (instr_idx == subop->ninstrs - 1 && 2514 subop->last_instr_end_off) 2515 end_off = subop->last_instr_end_off; 2516 else 2517 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2518 2519 return end_off - start_off; 2520 } 2521 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2522 2523 /** 2524 * nand_subop_get_data_start_off - Get the start offset in a data array 2525 * @subop: The entire sub-operation 2526 * @instr_idx: Index of the instruction inside the sub-operation 2527 * 2528 * During driver development, one could be tempted to directly use the 2529 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2530 * instructions might be split. 2531 * 2532 * Given a data instruction, returns the offset to start from. 2533 */ 2534 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2535 unsigned int instr_idx) 2536 { 2537 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2538 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2539 return 0; 2540 2541 return nand_subop_get_start_off(subop, instr_idx); 2542 } 2543 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2544 2545 /** 2546 * nand_subop_get_data_len - Get the number of bytes to retrieve 2547 * @subop: The entire sub-operation 2548 * @instr_idx: Index of the instruction inside the sub-operation 2549 * 2550 * During driver development, one could be tempted to directly use the 2551 * ->data->len field of a data instruction. This is wrong as data instructions 2552 * might be split. 2553 * 2554 * Returns the length of the chunk of data to send/receive. 2555 */ 2556 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2557 unsigned int instr_idx) 2558 { 2559 int start_off = 0, end_off; 2560 2561 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2562 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2563 return 0; 2564 2565 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2566 2567 if (instr_idx == subop->ninstrs - 1 && 2568 subop->last_instr_end_off) 2569 end_off = subop->last_instr_end_off; 2570 else 2571 end_off = subop->instrs[instr_idx].ctx.data.len; 2572 2573 return end_off - start_off; 2574 } 2575 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2576 2577 /** 2578 * nand_reset - Reset and initialize a NAND device 2579 * @chip: The NAND chip 2580 * @chipnr: Internal die id 2581 * 2582 * Save the timings data structure, then apply SDR timings mode 0 (see 2583 * nand_reset_interface for details), do the reset operation, and apply 2584 * back the previous timings. 2585 * 2586 * Returns 0 on success, a negative error code otherwise. 2587 */ 2588 int nand_reset(struct nand_chip *chip, int chipnr) 2589 { 2590 int ret; 2591 2592 ret = nand_reset_interface(chip, chipnr); 2593 if (ret) 2594 return ret; 2595 2596 /* 2597 * The CS line has to be released before we can apply the new NAND 2598 * interface settings, hence this weird nand_select_target() 2599 * nand_deselect_target() dance. 2600 */ 2601 nand_select_target(chip, chipnr); 2602 ret = nand_reset_op(chip); 2603 nand_deselect_target(chip); 2604 if (ret) 2605 return ret; 2606 2607 ret = nand_setup_interface(chip, chipnr); 2608 if (ret) 2609 return ret; 2610 2611 return 0; 2612 } 2613 EXPORT_SYMBOL_GPL(nand_reset); 2614 2615 /** 2616 * nand_get_features - wrapper to perform a GET_FEATURE 2617 * @chip: NAND chip info structure 2618 * @addr: feature address 2619 * @subfeature_param: the subfeature parameters, a four bytes array 2620 * 2621 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2622 * operation cannot be handled. 2623 */ 2624 int nand_get_features(struct nand_chip *chip, int addr, 2625 u8 *subfeature_param) 2626 { 2627 if (!nand_supports_get_features(chip, addr)) 2628 return -ENOTSUPP; 2629 2630 if (chip->legacy.get_features) 2631 return chip->legacy.get_features(chip, addr, subfeature_param); 2632 2633 return nand_get_features_op(chip, addr, subfeature_param); 2634 } 2635 2636 /** 2637 * nand_set_features - wrapper to perform a SET_FEATURE 2638 * @chip: NAND chip info structure 2639 * @addr: feature address 2640 * @subfeature_param: the subfeature parameters, a four bytes array 2641 * 2642 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2643 * operation cannot be handled. 2644 */ 2645 int nand_set_features(struct nand_chip *chip, int addr, 2646 u8 *subfeature_param) 2647 { 2648 if (!nand_supports_set_features(chip, addr)) 2649 return -ENOTSUPP; 2650 2651 if (chip->legacy.set_features) 2652 return chip->legacy.set_features(chip, addr, subfeature_param); 2653 2654 return nand_set_features_op(chip, addr, subfeature_param); 2655 } 2656 2657 /** 2658 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2659 * @buf: buffer to test 2660 * @len: buffer length 2661 * @bitflips_threshold: maximum number of bitflips 2662 * 2663 * Check if a buffer contains only 0xff, which means the underlying region 2664 * has been erased and is ready to be programmed. 2665 * The bitflips_threshold specify the maximum number of bitflips before 2666 * considering the region is not erased. 2667 * Note: The logic of this function has been extracted from the memweight 2668 * implementation, except that nand_check_erased_buf function exit before 2669 * testing the whole buffer if the number of bitflips exceed the 2670 * bitflips_threshold value. 2671 * 2672 * Returns a positive number of bitflips less than or equal to 2673 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2674 * threshold. 2675 */ 2676 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2677 { 2678 const unsigned char *bitmap = buf; 2679 int bitflips = 0; 2680 int weight; 2681 2682 for (; len && ((uintptr_t)bitmap) % sizeof(long); 2683 len--, bitmap++) { 2684 weight = hweight8(*bitmap); 2685 bitflips += BITS_PER_BYTE - weight; 2686 if (unlikely(bitflips > bitflips_threshold)) 2687 return -EBADMSG; 2688 } 2689 2690 for (; len >= sizeof(long); 2691 len -= sizeof(long), bitmap += sizeof(long)) { 2692 unsigned long d = *((unsigned long *)bitmap); 2693 if (d == ~0UL) 2694 continue; 2695 weight = hweight_long(d); 2696 bitflips += BITS_PER_LONG - weight; 2697 if (unlikely(bitflips > bitflips_threshold)) 2698 return -EBADMSG; 2699 } 2700 2701 for (; len > 0; len--, bitmap++) { 2702 weight = hweight8(*bitmap); 2703 bitflips += BITS_PER_BYTE - weight; 2704 if (unlikely(bitflips > bitflips_threshold)) 2705 return -EBADMSG; 2706 } 2707 2708 return bitflips; 2709 } 2710 2711 /** 2712 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2713 * 0xff data 2714 * @data: data buffer to test 2715 * @datalen: data length 2716 * @ecc: ECC buffer 2717 * @ecclen: ECC length 2718 * @extraoob: extra OOB buffer 2719 * @extraooblen: extra OOB length 2720 * @bitflips_threshold: maximum number of bitflips 2721 * 2722 * Check if a data buffer and its associated ECC and OOB data contains only 2723 * 0xff pattern, which means the underlying region has been erased and is 2724 * ready to be programmed. 2725 * The bitflips_threshold specify the maximum number of bitflips before 2726 * considering the region as not erased. 2727 * 2728 * Note: 2729 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2730 * different from the NAND page size. When fixing bitflips, ECC engines will 2731 * report the number of errors per chunk, and the NAND core infrastructure 2732 * expect you to return the maximum number of bitflips for the whole page. 2733 * This is why you should always use this function on a single chunk and 2734 * not on the whole page. After checking each chunk you should update your 2735 * max_bitflips value accordingly. 2736 * 2/ When checking for bitflips in erased pages you should not only check 2737 * the payload data but also their associated ECC data, because a user might 2738 * have programmed almost all bits to 1 but a few. In this case, we 2739 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2740 * this case. 2741 * 3/ The extraoob argument is optional, and should be used if some of your OOB 2742 * data are protected by the ECC engine. 2743 * It could also be used if you support subpages and want to attach some 2744 * extra OOB data to an ECC chunk. 2745 * 2746 * Returns a positive number of bitflips less than or equal to 2747 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2748 * threshold. In case of success, the passed buffers are filled with 0xff. 2749 */ 2750 int nand_check_erased_ecc_chunk(void *data, int datalen, 2751 void *ecc, int ecclen, 2752 void *extraoob, int extraooblen, 2753 int bitflips_threshold) 2754 { 2755 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2756 2757 data_bitflips = nand_check_erased_buf(data, datalen, 2758 bitflips_threshold); 2759 if (data_bitflips < 0) 2760 return data_bitflips; 2761 2762 bitflips_threshold -= data_bitflips; 2763 2764 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2765 if (ecc_bitflips < 0) 2766 return ecc_bitflips; 2767 2768 bitflips_threshold -= ecc_bitflips; 2769 2770 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2771 bitflips_threshold); 2772 if (extraoob_bitflips < 0) 2773 return extraoob_bitflips; 2774 2775 if (data_bitflips) 2776 memset(data, 0xff, datalen); 2777 2778 if (ecc_bitflips) 2779 memset(ecc, 0xff, ecclen); 2780 2781 if (extraoob_bitflips) 2782 memset(extraoob, 0xff, extraooblen); 2783 2784 return data_bitflips + ecc_bitflips + extraoob_bitflips; 2785 } 2786 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2787 2788 /** 2789 * nand_read_page_raw_notsupp - dummy read raw page function 2790 * @chip: nand chip info structure 2791 * @buf: buffer to store read data 2792 * @oob_required: caller requires OOB data read to chip->oob_poi 2793 * @page: page number to read 2794 * 2795 * Returns -ENOTSUPP unconditionally. 2796 */ 2797 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2798 int oob_required, int page) 2799 { 2800 return -ENOTSUPP; 2801 } 2802 2803 /** 2804 * nand_read_page_raw - [INTERN] read raw page data without ecc 2805 * @chip: nand chip info structure 2806 * @buf: buffer to store read data 2807 * @oob_required: caller requires OOB data read to chip->oob_poi 2808 * @page: page number to read 2809 * 2810 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2811 */ 2812 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2813 int page) 2814 { 2815 struct mtd_info *mtd = nand_to_mtd(chip); 2816 int ret; 2817 2818 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2819 if (ret) 2820 return ret; 2821 2822 if (oob_required) { 2823 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2824 false, false); 2825 if (ret) 2826 return ret; 2827 } 2828 2829 return 0; 2830 } 2831 EXPORT_SYMBOL(nand_read_page_raw); 2832 2833 /** 2834 * nand_monolithic_read_page_raw - Monolithic page read in raw mode 2835 * @chip: NAND chip info structure 2836 * @buf: buffer to store read data 2837 * @oob_required: caller requires OOB data read to chip->oob_poi 2838 * @page: page number to read 2839 * 2840 * This is a raw page read, ie. without any error detection/correction. 2841 * Monolithic means we are requesting all the relevant data (main plus 2842 * eventually OOB) to be loaded in the NAND cache and sent over the 2843 * bus (from the NAND chip to the NAND controller) in a single 2844 * operation. This is an alternative to nand_read_page_raw(), which 2845 * first reads the main data, and if the OOB data is requested too, 2846 * then reads more data on the bus. 2847 */ 2848 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, 2849 int oob_required, int page) 2850 { 2851 struct mtd_info *mtd = nand_to_mtd(chip); 2852 unsigned int size = mtd->writesize; 2853 u8 *read_buf = buf; 2854 int ret; 2855 2856 if (oob_required) { 2857 size += mtd->oobsize; 2858 2859 if (buf != chip->data_buf) 2860 read_buf = nand_get_data_buf(chip); 2861 } 2862 2863 ret = nand_read_page_op(chip, page, 0, read_buf, size); 2864 if (ret) 2865 return ret; 2866 2867 if (buf != chip->data_buf) 2868 memcpy(buf, read_buf, mtd->writesize); 2869 2870 return 0; 2871 } 2872 EXPORT_SYMBOL(nand_monolithic_read_page_raw); 2873 2874 /** 2875 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2876 * @chip: nand chip info structure 2877 * @buf: buffer to store read data 2878 * @oob_required: caller requires OOB data read to chip->oob_poi 2879 * @page: page number to read 2880 * 2881 * We need a special oob layout and handling even when OOB isn't used. 2882 */ 2883 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2884 int oob_required, int page) 2885 { 2886 struct mtd_info *mtd = nand_to_mtd(chip); 2887 int eccsize = chip->ecc.size; 2888 int eccbytes = chip->ecc.bytes; 2889 uint8_t *oob = chip->oob_poi; 2890 int steps, size, ret; 2891 2892 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2893 if (ret) 2894 return ret; 2895 2896 for (steps = chip->ecc.steps; steps > 0; steps--) { 2897 ret = nand_read_data_op(chip, buf, eccsize, false, false); 2898 if (ret) 2899 return ret; 2900 2901 buf += eccsize; 2902 2903 if (chip->ecc.prepad) { 2904 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2905 false, false); 2906 if (ret) 2907 return ret; 2908 2909 oob += chip->ecc.prepad; 2910 } 2911 2912 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 2913 if (ret) 2914 return ret; 2915 2916 oob += eccbytes; 2917 2918 if (chip->ecc.postpad) { 2919 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2920 false, false); 2921 if (ret) 2922 return ret; 2923 2924 oob += chip->ecc.postpad; 2925 } 2926 } 2927 2928 size = mtd->oobsize - (oob - chip->oob_poi); 2929 if (size) { 2930 ret = nand_read_data_op(chip, oob, size, false, false); 2931 if (ret) 2932 return ret; 2933 } 2934 2935 return 0; 2936 } 2937 2938 /** 2939 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 2940 * @chip: nand chip info structure 2941 * @buf: buffer to store read data 2942 * @oob_required: caller requires OOB data read to chip->oob_poi 2943 * @page: page number to read 2944 */ 2945 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 2946 int oob_required, int page) 2947 { 2948 struct mtd_info *mtd = nand_to_mtd(chip); 2949 int i, eccsize = chip->ecc.size, ret; 2950 int eccbytes = chip->ecc.bytes; 2951 int eccsteps = chip->ecc.steps; 2952 uint8_t *p = buf; 2953 uint8_t *ecc_calc = chip->ecc.calc_buf; 2954 uint8_t *ecc_code = chip->ecc.code_buf; 2955 unsigned int max_bitflips = 0; 2956 2957 chip->ecc.read_page_raw(chip, buf, 1, page); 2958 2959 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2960 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2961 2962 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2963 chip->ecc.total); 2964 if (ret) 2965 return ret; 2966 2967 eccsteps = chip->ecc.steps; 2968 p = buf; 2969 2970 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2971 int stat; 2972 2973 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2974 if (stat < 0) { 2975 mtd->ecc_stats.failed++; 2976 } else { 2977 mtd->ecc_stats.corrected += stat; 2978 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2979 } 2980 } 2981 return max_bitflips; 2982 } 2983 2984 /** 2985 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 2986 * @chip: nand chip info structure 2987 * @data_offs: offset of requested data within the page 2988 * @readlen: data length 2989 * @bufpoi: buffer to store read data 2990 * @page: page number to read 2991 */ 2992 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 2993 uint32_t readlen, uint8_t *bufpoi, int page) 2994 { 2995 struct mtd_info *mtd = nand_to_mtd(chip); 2996 int start_step, end_step, num_steps, ret; 2997 uint8_t *p; 2998 int data_col_addr, i, gaps = 0; 2999 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 3000 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 3001 int index, section = 0; 3002 unsigned int max_bitflips = 0; 3003 struct mtd_oob_region oobregion = { }; 3004 3005 /* Column address within the page aligned to ECC size (256bytes) */ 3006 start_step = data_offs / chip->ecc.size; 3007 end_step = (data_offs + readlen - 1) / chip->ecc.size; 3008 num_steps = end_step - start_step + 1; 3009 index = start_step * chip->ecc.bytes; 3010 3011 /* Data size aligned to ECC ecc.size */ 3012 datafrag_len = num_steps * chip->ecc.size; 3013 eccfrag_len = num_steps * chip->ecc.bytes; 3014 3015 data_col_addr = start_step * chip->ecc.size; 3016 /* If we read not a page aligned data */ 3017 p = bufpoi + data_col_addr; 3018 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 3019 if (ret) 3020 return ret; 3021 3022 /* Calculate ECC */ 3023 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 3024 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 3025 3026 /* 3027 * The performance is faster if we position offsets according to 3028 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 3029 */ 3030 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 3031 if (ret) 3032 return ret; 3033 3034 if (oobregion.length < eccfrag_len) 3035 gaps = 1; 3036 3037 if (gaps) { 3038 ret = nand_change_read_column_op(chip, mtd->writesize, 3039 chip->oob_poi, mtd->oobsize, 3040 false); 3041 if (ret) 3042 return ret; 3043 } else { 3044 /* 3045 * Send the command to read the particular ECC bytes take care 3046 * about buswidth alignment in read_buf. 3047 */ 3048 aligned_pos = oobregion.offset & ~(busw - 1); 3049 aligned_len = eccfrag_len; 3050 if (oobregion.offset & (busw - 1)) 3051 aligned_len++; 3052 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 3053 (busw - 1)) 3054 aligned_len++; 3055 3056 ret = nand_change_read_column_op(chip, 3057 mtd->writesize + aligned_pos, 3058 &chip->oob_poi[aligned_pos], 3059 aligned_len, false); 3060 if (ret) 3061 return ret; 3062 } 3063 3064 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 3065 chip->oob_poi, index, eccfrag_len); 3066 if (ret) 3067 return ret; 3068 3069 p = bufpoi + data_col_addr; 3070 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 3071 int stat; 3072 3073 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 3074 &chip->ecc.calc_buf[i]); 3075 if (stat == -EBADMSG && 3076 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3077 /* check for empty pages with bitflips */ 3078 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3079 &chip->ecc.code_buf[i], 3080 chip->ecc.bytes, 3081 NULL, 0, 3082 chip->ecc.strength); 3083 } 3084 3085 if (stat < 0) { 3086 mtd->ecc_stats.failed++; 3087 } else { 3088 mtd->ecc_stats.corrected += stat; 3089 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3090 } 3091 } 3092 return max_bitflips; 3093 } 3094 3095 /** 3096 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 3097 * @chip: nand chip info structure 3098 * @buf: buffer to store read data 3099 * @oob_required: caller requires OOB data read to chip->oob_poi 3100 * @page: page number to read 3101 * 3102 * Not for syndrome calculating ECC controllers which need a special oob layout. 3103 */ 3104 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 3105 int oob_required, int page) 3106 { 3107 struct mtd_info *mtd = nand_to_mtd(chip); 3108 int i, eccsize = chip->ecc.size, ret; 3109 int eccbytes = chip->ecc.bytes; 3110 int eccsteps = chip->ecc.steps; 3111 uint8_t *p = buf; 3112 uint8_t *ecc_calc = chip->ecc.calc_buf; 3113 uint8_t *ecc_code = chip->ecc.code_buf; 3114 unsigned int max_bitflips = 0; 3115 3116 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3117 if (ret) 3118 return ret; 3119 3120 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3121 chip->ecc.hwctl(chip, NAND_ECC_READ); 3122 3123 ret = nand_read_data_op(chip, p, eccsize, false, false); 3124 if (ret) 3125 return ret; 3126 3127 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3128 } 3129 3130 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 3131 false); 3132 if (ret) 3133 return ret; 3134 3135 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3136 chip->ecc.total); 3137 if (ret) 3138 return ret; 3139 3140 eccsteps = chip->ecc.steps; 3141 p = buf; 3142 3143 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3144 int stat; 3145 3146 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3147 if (stat == -EBADMSG && 3148 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3149 /* check for empty pages with bitflips */ 3150 stat = nand_check_erased_ecc_chunk(p, eccsize, 3151 &ecc_code[i], eccbytes, 3152 NULL, 0, 3153 chip->ecc.strength); 3154 } 3155 3156 if (stat < 0) { 3157 mtd->ecc_stats.failed++; 3158 } else { 3159 mtd->ecc_stats.corrected += stat; 3160 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3161 } 3162 } 3163 return max_bitflips; 3164 } 3165 3166 /** 3167 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC 3168 * data read from OOB area 3169 * @chip: nand chip info structure 3170 * @buf: buffer to store read data 3171 * @oob_required: caller requires OOB data read to chip->oob_poi 3172 * @page: page number to read 3173 * 3174 * Hardware ECC for large page chips, which requires the ECC data to be 3175 * extracted from the OOB before the actual data is read. 3176 */ 3177 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, 3178 int oob_required, int page) 3179 { 3180 struct mtd_info *mtd = nand_to_mtd(chip); 3181 int i, eccsize = chip->ecc.size, ret; 3182 int eccbytes = chip->ecc.bytes; 3183 int eccsteps = chip->ecc.steps; 3184 uint8_t *p = buf; 3185 uint8_t *ecc_code = chip->ecc.code_buf; 3186 unsigned int max_bitflips = 0; 3187 3188 /* Read the OOB area first */ 3189 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3190 if (ret) 3191 return ret; 3192 3193 /* Move read cursor to start of page */ 3194 ret = nand_change_read_column_op(chip, 0, NULL, 0, false); 3195 if (ret) 3196 return ret; 3197 3198 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3199 chip->ecc.total); 3200 if (ret) 3201 return ret; 3202 3203 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3204 int stat; 3205 3206 chip->ecc.hwctl(chip, NAND_ECC_READ); 3207 3208 ret = nand_read_data_op(chip, p, eccsize, false, false); 3209 if (ret) 3210 return ret; 3211 3212 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 3213 if (stat == -EBADMSG && 3214 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3215 /* check for empty pages with bitflips */ 3216 stat = nand_check_erased_ecc_chunk(p, eccsize, 3217 &ecc_code[i], 3218 eccbytes, NULL, 0, 3219 chip->ecc.strength); 3220 } 3221 3222 if (stat < 0) { 3223 mtd->ecc_stats.failed++; 3224 } else { 3225 mtd->ecc_stats.corrected += stat; 3226 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3227 } 3228 } 3229 return max_bitflips; 3230 } 3231 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first); 3232 3233 /** 3234 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 3235 * @chip: nand chip info structure 3236 * @buf: buffer to store read data 3237 * @oob_required: caller requires OOB data read to chip->oob_poi 3238 * @page: page number to read 3239 * 3240 * The hw generator calculates the error syndrome automatically. Therefore we 3241 * need a special oob layout and handling. 3242 */ 3243 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 3244 int oob_required, int page) 3245 { 3246 struct mtd_info *mtd = nand_to_mtd(chip); 3247 int ret, i, eccsize = chip->ecc.size; 3248 int eccbytes = chip->ecc.bytes; 3249 int eccsteps = chip->ecc.steps; 3250 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3251 uint8_t *p = buf; 3252 uint8_t *oob = chip->oob_poi; 3253 unsigned int max_bitflips = 0; 3254 3255 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3256 if (ret) 3257 return ret; 3258 3259 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3260 int stat; 3261 3262 chip->ecc.hwctl(chip, NAND_ECC_READ); 3263 3264 ret = nand_read_data_op(chip, p, eccsize, false, false); 3265 if (ret) 3266 return ret; 3267 3268 if (chip->ecc.prepad) { 3269 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3270 false, false); 3271 if (ret) 3272 return ret; 3273 3274 oob += chip->ecc.prepad; 3275 } 3276 3277 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 3278 3279 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3280 if (ret) 3281 return ret; 3282 3283 stat = chip->ecc.correct(chip, p, oob, NULL); 3284 3285 oob += eccbytes; 3286 3287 if (chip->ecc.postpad) { 3288 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3289 false, false); 3290 if (ret) 3291 return ret; 3292 3293 oob += chip->ecc.postpad; 3294 } 3295 3296 if (stat == -EBADMSG && 3297 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3298 /* check for empty pages with bitflips */ 3299 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3300 oob - eccpadbytes, 3301 eccpadbytes, 3302 NULL, 0, 3303 chip->ecc.strength); 3304 } 3305 3306 if (stat < 0) { 3307 mtd->ecc_stats.failed++; 3308 } else { 3309 mtd->ecc_stats.corrected += stat; 3310 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3311 } 3312 } 3313 3314 /* Calculate remaining oob bytes */ 3315 i = mtd->oobsize - (oob - chip->oob_poi); 3316 if (i) { 3317 ret = nand_read_data_op(chip, oob, i, false, false); 3318 if (ret) 3319 return ret; 3320 } 3321 3322 return max_bitflips; 3323 } 3324 3325 /** 3326 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 3327 * @chip: NAND chip object 3328 * @oob: oob destination address 3329 * @ops: oob ops structure 3330 * @len: size of oob to transfer 3331 */ 3332 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 3333 struct mtd_oob_ops *ops, size_t len) 3334 { 3335 struct mtd_info *mtd = nand_to_mtd(chip); 3336 int ret; 3337 3338 switch (ops->mode) { 3339 3340 case MTD_OPS_PLACE_OOB: 3341 case MTD_OPS_RAW: 3342 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 3343 return oob + len; 3344 3345 case MTD_OPS_AUTO_OOB: 3346 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 3347 ops->ooboffs, len); 3348 BUG_ON(ret); 3349 return oob + len; 3350 3351 default: 3352 BUG(); 3353 } 3354 return NULL; 3355 } 3356 3357 /** 3358 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 3359 * @chip: NAND chip object 3360 * @retry_mode: the retry mode to use 3361 * 3362 * Some vendors supply a special command to shift the Vt threshold, to be used 3363 * when there are too many bitflips in a page (i.e., ECC error). After setting 3364 * a new threshold, the host should retry reading the page. 3365 */ 3366 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 3367 { 3368 pr_debug("setting READ RETRY mode %d\n", retry_mode); 3369 3370 if (retry_mode >= chip->read_retries) 3371 return -EINVAL; 3372 3373 if (!chip->ops.setup_read_retry) 3374 return -EOPNOTSUPP; 3375 3376 return chip->ops.setup_read_retry(chip, retry_mode); 3377 } 3378 3379 static void nand_wait_readrdy(struct nand_chip *chip) 3380 { 3381 const struct nand_interface_config *conf; 3382 3383 if (!(chip->options & NAND_NEED_READRDY)) 3384 return; 3385 3386 conf = nand_get_interface_config(chip); 3387 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); 3388 } 3389 3390 /** 3391 * nand_do_read_ops - [INTERN] Read data with ECC 3392 * @chip: NAND chip object 3393 * @from: offset to read from 3394 * @ops: oob ops structure 3395 * 3396 * Internal function. Called with chip held. 3397 */ 3398 static int nand_do_read_ops(struct nand_chip *chip, loff_t from, 3399 struct mtd_oob_ops *ops) 3400 { 3401 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3402 struct mtd_info *mtd = nand_to_mtd(chip); 3403 int ret = 0; 3404 uint32_t readlen = ops->len; 3405 uint32_t oobreadlen = ops->ooblen; 3406 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3407 3408 uint8_t *bufpoi, *oob, *buf; 3409 int use_bounce_buf; 3410 unsigned int max_bitflips = 0; 3411 int retry_mode = 0; 3412 bool ecc_fail = false; 3413 3414 /* Check if the region is secured */ 3415 if (nand_region_is_secured(chip, from, readlen)) 3416 return -EIO; 3417 3418 chipnr = (int)(from >> chip->chip_shift); 3419 nand_select_target(chip, chipnr); 3420 3421 realpage = (int)(from >> chip->page_shift); 3422 page = realpage & chip->pagemask; 3423 3424 col = (int)(from & (mtd->writesize - 1)); 3425 3426 buf = ops->datbuf; 3427 oob = ops->oobbuf; 3428 oob_required = oob ? 1 : 0; 3429 3430 while (1) { 3431 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; 3432 3433 bytes = min(mtd->writesize - col, readlen); 3434 aligned = (bytes == mtd->writesize); 3435 3436 if (!aligned) 3437 use_bounce_buf = 1; 3438 else if (chip->options & NAND_USES_DMA) 3439 use_bounce_buf = !virt_addr_valid(buf) || 3440 !IS_ALIGNED((unsigned long)buf, 3441 chip->buf_align); 3442 else 3443 use_bounce_buf = 0; 3444 3445 /* Is the current page in the buffer? */ 3446 if (realpage != chip->pagecache.page || oob) { 3447 bufpoi = use_bounce_buf ? chip->data_buf : buf; 3448 3449 if (use_bounce_buf && aligned) 3450 pr_debug("%s: using read bounce buffer for buf@%p\n", 3451 __func__, buf); 3452 3453 read_retry: 3454 /* 3455 * Now read the page into the buffer. Absent an error, 3456 * the read methods return max bitflips per ecc step. 3457 */ 3458 if (unlikely(ops->mode == MTD_OPS_RAW)) 3459 ret = chip->ecc.read_page_raw(chip, bufpoi, 3460 oob_required, 3461 page); 3462 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3463 !oob) 3464 ret = chip->ecc.read_subpage(chip, col, bytes, 3465 bufpoi, page); 3466 else 3467 ret = chip->ecc.read_page(chip, bufpoi, 3468 oob_required, page); 3469 if (ret < 0) { 3470 if (use_bounce_buf) 3471 /* Invalidate page cache */ 3472 chip->pagecache.page = -1; 3473 break; 3474 } 3475 3476 /* 3477 * Copy back the data in the initial buffer when reading 3478 * partial pages or when a bounce buffer is required. 3479 */ 3480 if (use_bounce_buf) { 3481 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3482 !(mtd->ecc_stats.failed - ecc_stats.failed) && 3483 (ops->mode != MTD_OPS_RAW)) { 3484 chip->pagecache.page = realpage; 3485 chip->pagecache.bitflips = ret; 3486 } else { 3487 /* Invalidate page cache */ 3488 chip->pagecache.page = -1; 3489 } 3490 memcpy(buf, bufpoi + col, bytes); 3491 } 3492 3493 if (unlikely(oob)) { 3494 int toread = min(oobreadlen, max_oobsize); 3495 3496 if (toread) { 3497 oob = nand_transfer_oob(chip, oob, ops, 3498 toread); 3499 oobreadlen -= toread; 3500 } 3501 } 3502 3503 nand_wait_readrdy(chip); 3504 3505 if (mtd->ecc_stats.failed - ecc_stats.failed) { 3506 if (retry_mode + 1 < chip->read_retries) { 3507 retry_mode++; 3508 ret = nand_setup_read_retry(chip, 3509 retry_mode); 3510 if (ret < 0) 3511 break; 3512 3513 /* Reset ecc_stats; retry */ 3514 mtd->ecc_stats = ecc_stats; 3515 goto read_retry; 3516 } else { 3517 /* No more retry modes; real failure */ 3518 ecc_fail = true; 3519 } 3520 } 3521 3522 buf += bytes; 3523 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3524 } else { 3525 memcpy(buf, chip->data_buf + col, bytes); 3526 buf += bytes; 3527 max_bitflips = max_t(unsigned int, max_bitflips, 3528 chip->pagecache.bitflips); 3529 } 3530 3531 readlen -= bytes; 3532 3533 /* Reset to retry mode 0 */ 3534 if (retry_mode) { 3535 ret = nand_setup_read_retry(chip, 0); 3536 if (ret < 0) 3537 break; 3538 retry_mode = 0; 3539 } 3540 3541 if (!readlen) 3542 break; 3543 3544 /* For subsequent reads align to page boundary */ 3545 col = 0; 3546 /* Increment page address */ 3547 realpage++; 3548 3549 page = realpage & chip->pagemask; 3550 /* Check, if we cross a chip boundary */ 3551 if (!page) { 3552 chipnr++; 3553 nand_deselect_target(chip); 3554 nand_select_target(chip, chipnr); 3555 } 3556 } 3557 nand_deselect_target(chip); 3558 3559 ops->retlen = ops->len - (size_t) readlen; 3560 if (oob) 3561 ops->oobretlen = ops->ooblen - oobreadlen; 3562 3563 if (ret < 0) 3564 return ret; 3565 3566 if (ecc_fail) 3567 return -EBADMSG; 3568 3569 return max_bitflips; 3570 } 3571 3572 /** 3573 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3574 * @chip: nand chip info structure 3575 * @page: page number to read 3576 */ 3577 int nand_read_oob_std(struct nand_chip *chip, int page) 3578 { 3579 struct mtd_info *mtd = nand_to_mtd(chip); 3580 3581 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3582 } 3583 EXPORT_SYMBOL(nand_read_oob_std); 3584 3585 /** 3586 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3587 * with syndromes 3588 * @chip: nand chip info structure 3589 * @page: page number to read 3590 */ 3591 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3592 { 3593 struct mtd_info *mtd = nand_to_mtd(chip); 3594 int length = mtd->oobsize; 3595 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3596 int eccsize = chip->ecc.size; 3597 uint8_t *bufpoi = chip->oob_poi; 3598 int i, toread, sndrnd = 0, pos, ret; 3599 3600 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3601 if (ret) 3602 return ret; 3603 3604 for (i = 0; i < chip->ecc.steps; i++) { 3605 if (sndrnd) { 3606 int ret; 3607 3608 pos = eccsize + i * (eccsize + chunk); 3609 if (mtd->writesize > 512) 3610 ret = nand_change_read_column_op(chip, pos, 3611 NULL, 0, 3612 false); 3613 else 3614 ret = nand_read_page_op(chip, page, pos, NULL, 3615 0); 3616 3617 if (ret) 3618 return ret; 3619 } else 3620 sndrnd = 1; 3621 toread = min_t(int, length, chunk); 3622 3623 ret = nand_read_data_op(chip, bufpoi, toread, false, false); 3624 if (ret) 3625 return ret; 3626 3627 bufpoi += toread; 3628 length -= toread; 3629 } 3630 if (length > 0) { 3631 ret = nand_read_data_op(chip, bufpoi, length, false, false); 3632 if (ret) 3633 return ret; 3634 } 3635 3636 return 0; 3637 } 3638 3639 /** 3640 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3641 * @chip: nand chip info structure 3642 * @page: page number to write 3643 */ 3644 int nand_write_oob_std(struct nand_chip *chip, int page) 3645 { 3646 struct mtd_info *mtd = nand_to_mtd(chip); 3647 3648 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3649 mtd->oobsize); 3650 } 3651 EXPORT_SYMBOL(nand_write_oob_std); 3652 3653 /** 3654 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3655 * with syndrome - only for large page flash 3656 * @chip: nand chip info structure 3657 * @page: page number to write 3658 */ 3659 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3660 { 3661 struct mtd_info *mtd = nand_to_mtd(chip); 3662 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3663 int eccsize = chip->ecc.size, length = mtd->oobsize; 3664 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3665 const uint8_t *bufpoi = chip->oob_poi; 3666 3667 /* 3668 * data-ecc-data-ecc ... ecc-oob 3669 * or 3670 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3671 */ 3672 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3673 pos = steps * (eccsize + chunk); 3674 steps = 0; 3675 } else 3676 pos = eccsize; 3677 3678 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3679 if (ret) 3680 return ret; 3681 3682 for (i = 0; i < steps; i++) { 3683 if (sndcmd) { 3684 if (mtd->writesize <= 512) { 3685 uint32_t fill = 0xFFFFFFFF; 3686 3687 len = eccsize; 3688 while (len > 0) { 3689 int num = min_t(int, len, 4); 3690 3691 ret = nand_write_data_op(chip, &fill, 3692 num, false); 3693 if (ret) 3694 return ret; 3695 3696 len -= num; 3697 } 3698 } else { 3699 pos = eccsize + i * (eccsize + chunk); 3700 ret = nand_change_write_column_op(chip, pos, 3701 NULL, 0, 3702 false); 3703 if (ret) 3704 return ret; 3705 } 3706 } else 3707 sndcmd = 1; 3708 len = min_t(int, length, chunk); 3709 3710 ret = nand_write_data_op(chip, bufpoi, len, false); 3711 if (ret) 3712 return ret; 3713 3714 bufpoi += len; 3715 length -= len; 3716 } 3717 if (length > 0) { 3718 ret = nand_write_data_op(chip, bufpoi, length, false); 3719 if (ret) 3720 return ret; 3721 } 3722 3723 return nand_prog_page_end_op(chip); 3724 } 3725 3726 /** 3727 * nand_do_read_oob - [INTERN] NAND read out-of-band 3728 * @chip: NAND chip object 3729 * @from: offset to read from 3730 * @ops: oob operations description structure 3731 * 3732 * NAND read out-of-band data from the spare area. 3733 */ 3734 static int nand_do_read_oob(struct nand_chip *chip, loff_t from, 3735 struct mtd_oob_ops *ops) 3736 { 3737 struct mtd_info *mtd = nand_to_mtd(chip); 3738 unsigned int max_bitflips = 0; 3739 int page, realpage, chipnr; 3740 struct mtd_ecc_stats stats; 3741 int readlen = ops->ooblen; 3742 int len; 3743 uint8_t *buf = ops->oobbuf; 3744 int ret = 0; 3745 3746 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3747 __func__, (unsigned long long)from, readlen); 3748 3749 /* Check if the region is secured */ 3750 if (nand_region_is_secured(chip, from, readlen)) 3751 return -EIO; 3752 3753 stats = mtd->ecc_stats; 3754 3755 len = mtd_oobavail(mtd, ops); 3756 3757 chipnr = (int)(from >> chip->chip_shift); 3758 nand_select_target(chip, chipnr); 3759 3760 /* Shift to get page */ 3761 realpage = (int)(from >> chip->page_shift); 3762 page = realpage & chip->pagemask; 3763 3764 while (1) { 3765 if (ops->mode == MTD_OPS_RAW) 3766 ret = chip->ecc.read_oob_raw(chip, page); 3767 else 3768 ret = chip->ecc.read_oob(chip, page); 3769 3770 if (ret < 0) 3771 break; 3772 3773 len = min(len, readlen); 3774 buf = nand_transfer_oob(chip, buf, ops, len); 3775 3776 nand_wait_readrdy(chip); 3777 3778 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3779 3780 readlen -= len; 3781 if (!readlen) 3782 break; 3783 3784 /* Increment page address */ 3785 realpage++; 3786 3787 page = realpage & chip->pagemask; 3788 /* Check, if we cross a chip boundary */ 3789 if (!page) { 3790 chipnr++; 3791 nand_deselect_target(chip); 3792 nand_select_target(chip, chipnr); 3793 } 3794 } 3795 nand_deselect_target(chip); 3796 3797 ops->oobretlen = ops->ooblen - readlen; 3798 3799 if (ret < 0) 3800 return ret; 3801 3802 if (mtd->ecc_stats.failed - stats.failed) 3803 return -EBADMSG; 3804 3805 return max_bitflips; 3806 } 3807 3808 /** 3809 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3810 * @mtd: MTD device structure 3811 * @from: offset to read from 3812 * @ops: oob operation description structure 3813 * 3814 * NAND read data and/or out-of-band data. 3815 */ 3816 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3817 struct mtd_oob_ops *ops) 3818 { 3819 struct nand_chip *chip = mtd_to_nand(mtd); 3820 int ret; 3821 3822 ops->retlen = 0; 3823 3824 if (ops->mode != MTD_OPS_PLACE_OOB && 3825 ops->mode != MTD_OPS_AUTO_OOB && 3826 ops->mode != MTD_OPS_RAW) 3827 return -ENOTSUPP; 3828 3829 ret = nand_get_device(chip); 3830 if (ret) 3831 return ret; 3832 3833 if (!ops->datbuf) 3834 ret = nand_do_read_oob(chip, from, ops); 3835 else 3836 ret = nand_do_read_ops(chip, from, ops); 3837 3838 nand_release_device(chip); 3839 return ret; 3840 } 3841 3842 /** 3843 * nand_write_page_raw_notsupp - dummy raw page write function 3844 * @chip: nand chip info structure 3845 * @buf: data buffer 3846 * @oob_required: must write chip->oob_poi to OOB 3847 * @page: page number to write 3848 * 3849 * Returns -ENOTSUPP unconditionally. 3850 */ 3851 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 3852 int oob_required, int page) 3853 { 3854 return -ENOTSUPP; 3855 } 3856 3857 /** 3858 * nand_write_page_raw - [INTERN] raw page write function 3859 * @chip: nand chip info structure 3860 * @buf: data buffer 3861 * @oob_required: must write chip->oob_poi to OOB 3862 * @page: page number to write 3863 * 3864 * Not for syndrome calculating ECC controllers, which use a special oob layout. 3865 */ 3866 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 3867 int oob_required, int page) 3868 { 3869 struct mtd_info *mtd = nand_to_mtd(chip); 3870 int ret; 3871 3872 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 3873 if (ret) 3874 return ret; 3875 3876 if (oob_required) { 3877 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 3878 false); 3879 if (ret) 3880 return ret; 3881 } 3882 3883 return nand_prog_page_end_op(chip); 3884 } 3885 EXPORT_SYMBOL(nand_write_page_raw); 3886 3887 /** 3888 * nand_monolithic_write_page_raw - Monolithic page write in raw mode 3889 * @chip: NAND chip info structure 3890 * @buf: data buffer to write 3891 * @oob_required: must write chip->oob_poi to OOB 3892 * @page: page number to write 3893 * 3894 * This is a raw page write, ie. without any error detection/correction. 3895 * Monolithic means we are requesting all the relevant data (main plus 3896 * eventually OOB) to be sent over the bus and effectively programmed 3897 * into the NAND chip arrays in a single operation. This is an 3898 * alternative to nand_write_page_raw(), which first sends the main 3899 * data, then eventually send the OOB data by latching more data 3900 * cycles on the NAND bus, and finally sends the program command to 3901 * synchronyze the NAND chip cache. 3902 */ 3903 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, 3904 int oob_required, int page) 3905 { 3906 struct mtd_info *mtd = nand_to_mtd(chip); 3907 unsigned int size = mtd->writesize; 3908 u8 *write_buf = (u8 *)buf; 3909 3910 if (oob_required) { 3911 size += mtd->oobsize; 3912 3913 if (buf != chip->data_buf) { 3914 write_buf = nand_get_data_buf(chip); 3915 memcpy(write_buf, buf, mtd->writesize); 3916 } 3917 } 3918 3919 return nand_prog_page_op(chip, page, 0, write_buf, size); 3920 } 3921 EXPORT_SYMBOL(nand_monolithic_write_page_raw); 3922 3923 /** 3924 * nand_write_page_raw_syndrome - [INTERN] raw page write function 3925 * @chip: nand chip info structure 3926 * @buf: data buffer 3927 * @oob_required: must write chip->oob_poi to OOB 3928 * @page: page number to write 3929 * 3930 * We need a special oob layout and handling even when ECC isn't checked. 3931 */ 3932 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 3933 const uint8_t *buf, int oob_required, 3934 int page) 3935 { 3936 struct mtd_info *mtd = nand_to_mtd(chip); 3937 int eccsize = chip->ecc.size; 3938 int eccbytes = chip->ecc.bytes; 3939 uint8_t *oob = chip->oob_poi; 3940 int steps, size, ret; 3941 3942 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3943 if (ret) 3944 return ret; 3945 3946 for (steps = chip->ecc.steps; steps > 0; steps--) { 3947 ret = nand_write_data_op(chip, buf, eccsize, false); 3948 if (ret) 3949 return ret; 3950 3951 buf += eccsize; 3952 3953 if (chip->ecc.prepad) { 3954 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 3955 false); 3956 if (ret) 3957 return ret; 3958 3959 oob += chip->ecc.prepad; 3960 } 3961 3962 ret = nand_write_data_op(chip, oob, eccbytes, false); 3963 if (ret) 3964 return ret; 3965 3966 oob += eccbytes; 3967 3968 if (chip->ecc.postpad) { 3969 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 3970 false); 3971 if (ret) 3972 return ret; 3973 3974 oob += chip->ecc.postpad; 3975 } 3976 } 3977 3978 size = mtd->oobsize - (oob - chip->oob_poi); 3979 if (size) { 3980 ret = nand_write_data_op(chip, oob, size, false); 3981 if (ret) 3982 return ret; 3983 } 3984 3985 return nand_prog_page_end_op(chip); 3986 } 3987 /** 3988 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 3989 * @chip: nand chip info structure 3990 * @buf: data buffer 3991 * @oob_required: must write chip->oob_poi to OOB 3992 * @page: page number to write 3993 */ 3994 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 3995 int oob_required, int page) 3996 { 3997 struct mtd_info *mtd = nand_to_mtd(chip); 3998 int i, eccsize = chip->ecc.size, ret; 3999 int eccbytes = chip->ecc.bytes; 4000 int eccsteps = chip->ecc.steps; 4001 uint8_t *ecc_calc = chip->ecc.calc_buf; 4002 const uint8_t *p = buf; 4003 4004 /* Software ECC calculation */ 4005 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 4006 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4007 4008 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4009 chip->ecc.total); 4010 if (ret) 4011 return ret; 4012 4013 return chip->ecc.write_page_raw(chip, buf, 1, page); 4014 } 4015 4016 /** 4017 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 4018 * @chip: nand chip info structure 4019 * @buf: data buffer 4020 * @oob_required: must write chip->oob_poi to OOB 4021 * @page: page number to write 4022 */ 4023 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 4024 int oob_required, int page) 4025 { 4026 struct mtd_info *mtd = nand_to_mtd(chip); 4027 int i, eccsize = chip->ecc.size, ret; 4028 int eccbytes = chip->ecc.bytes; 4029 int eccsteps = chip->ecc.steps; 4030 uint8_t *ecc_calc = chip->ecc.calc_buf; 4031 const uint8_t *p = buf; 4032 4033 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4034 if (ret) 4035 return ret; 4036 4037 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4038 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4039 4040 ret = nand_write_data_op(chip, p, eccsize, false); 4041 if (ret) 4042 return ret; 4043 4044 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4045 } 4046 4047 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4048 chip->ecc.total); 4049 if (ret) 4050 return ret; 4051 4052 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4053 if (ret) 4054 return ret; 4055 4056 return nand_prog_page_end_op(chip); 4057 } 4058 4059 4060 /** 4061 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 4062 * @chip: nand chip info structure 4063 * @offset: column address of subpage within the page 4064 * @data_len: data length 4065 * @buf: data buffer 4066 * @oob_required: must write chip->oob_poi to OOB 4067 * @page: page number to write 4068 */ 4069 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 4070 uint32_t data_len, const uint8_t *buf, 4071 int oob_required, int page) 4072 { 4073 struct mtd_info *mtd = nand_to_mtd(chip); 4074 uint8_t *oob_buf = chip->oob_poi; 4075 uint8_t *ecc_calc = chip->ecc.calc_buf; 4076 int ecc_size = chip->ecc.size; 4077 int ecc_bytes = chip->ecc.bytes; 4078 int ecc_steps = chip->ecc.steps; 4079 uint32_t start_step = offset / ecc_size; 4080 uint32_t end_step = (offset + data_len - 1) / ecc_size; 4081 int oob_bytes = mtd->oobsize / ecc_steps; 4082 int step, ret; 4083 4084 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4085 if (ret) 4086 return ret; 4087 4088 for (step = 0; step < ecc_steps; step++) { 4089 /* configure controller for WRITE access */ 4090 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4091 4092 /* write data (untouched subpages already masked by 0xFF) */ 4093 ret = nand_write_data_op(chip, buf, ecc_size, false); 4094 if (ret) 4095 return ret; 4096 4097 /* mask ECC of un-touched subpages by padding 0xFF */ 4098 if ((step < start_step) || (step > end_step)) 4099 memset(ecc_calc, 0xff, ecc_bytes); 4100 else 4101 chip->ecc.calculate(chip, buf, ecc_calc); 4102 4103 /* mask OOB of un-touched subpages by padding 0xFF */ 4104 /* if oob_required, preserve OOB metadata of written subpage */ 4105 if (!oob_required || (step < start_step) || (step > end_step)) 4106 memset(oob_buf, 0xff, oob_bytes); 4107 4108 buf += ecc_size; 4109 ecc_calc += ecc_bytes; 4110 oob_buf += oob_bytes; 4111 } 4112 4113 /* copy calculated ECC for whole page to chip->buffer->oob */ 4114 /* this include masked-value(0xFF) for unwritten subpages */ 4115 ecc_calc = chip->ecc.calc_buf; 4116 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4117 chip->ecc.total); 4118 if (ret) 4119 return ret; 4120 4121 /* write OOB buffer to NAND device */ 4122 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4123 if (ret) 4124 return ret; 4125 4126 return nand_prog_page_end_op(chip); 4127 } 4128 4129 4130 /** 4131 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 4132 * @chip: nand chip info structure 4133 * @buf: data buffer 4134 * @oob_required: must write chip->oob_poi to OOB 4135 * @page: page number to write 4136 * 4137 * The hw generator calculates the error syndrome automatically. Therefore we 4138 * need a special oob layout and handling. 4139 */ 4140 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 4141 int oob_required, int page) 4142 { 4143 struct mtd_info *mtd = nand_to_mtd(chip); 4144 int i, eccsize = chip->ecc.size; 4145 int eccbytes = chip->ecc.bytes; 4146 int eccsteps = chip->ecc.steps; 4147 const uint8_t *p = buf; 4148 uint8_t *oob = chip->oob_poi; 4149 int ret; 4150 4151 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4152 if (ret) 4153 return ret; 4154 4155 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4156 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4157 4158 ret = nand_write_data_op(chip, p, eccsize, false); 4159 if (ret) 4160 return ret; 4161 4162 if (chip->ecc.prepad) { 4163 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4164 false); 4165 if (ret) 4166 return ret; 4167 4168 oob += chip->ecc.prepad; 4169 } 4170 4171 chip->ecc.calculate(chip, p, oob); 4172 4173 ret = nand_write_data_op(chip, oob, eccbytes, false); 4174 if (ret) 4175 return ret; 4176 4177 oob += eccbytes; 4178 4179 if (chip->ecc.postpad) { 4180 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4181 false); 4182 if (ret) 4183 return ret; 4184 4185 oob += chip->ecc.postpad; 4186 } 4187 } 4188 4189 /* Calculate remaining oob bytes */ 4190 i = mtd->oobsize - (oob - chip->oob_poi); 4191 if (i) { 4192 ret = nand_write_data_op(chip, oob, i, false); 4193 if (ret) 4194 return ret; 4195 } 4196 4197 return nand_prog_page_end_op(chip); 4198 } 4199 4200 /** 4201 * nand_write_page - write one page 4202 * @chip: NAND chip descriptor 4203 * @offset: address offset within the page 4204 * @data_len: length of actual data to be written 4205 * @buf: the data to write 4206 * @oob_required: must write chip->oob_poi to OOB 4207 * @page: page number to write 4208 * @raw: use _raw version of write_page 4209 */ 4210 static int nand_write_page(struct nand_chip *chip, uint32_t offset, 4211 int data_len, const uint8_t *buf, int oob_required, 4212 int page, int raw) 4213 { 4214 struct mtd_info *mtd = nand_to_mtd(chip); 4215 int status, subpage; 4216 4217 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 4218 chip->ecc.write_subpage) 4219 subpage = offset || (data_len < mtd->writesize); 4220 else 4221 subpage = 0; 4222 4223 if (unlikely(raw)) 4224 status = chip->ecc.write_page_raw(chip, buf, oob_required, 4225 page); 4226 else if (subpage) 4227 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 4228 oob_required, page); 4229 else 4230 status = chip->ecc.write_page(chip, buf, oob_required, page); 4231 4232 if (status < 0) 4233 return status; 4234 4235 return 0; 4236 } 4237 4238 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 4239 4240 /** 4241 * nand_do_write_ops - [INTERN] NAND write with ECC 4242 * @chip: NAND chip object 4243 * @to: offset to write to 4244 * @ops: oob operations description structure 4245 * 4246 * NAND write with ECC. 4247 */ 4248 static int nand_do_write_ops(struct nand_chip *chip, loff_t to, 4249 struct mtd_oob_ops *ops) 4250 { 4251 struct mtd_info *mtd = nand_to_mtd(chip); 4252 int chipnr, realpage, page, column; 4253 uint32_t writelen = ops->len; 4254 4255 uint32_t oobwritelen = ops->ooblen; 4256 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 4257 4258 uint8_t *oob = ops->oobbuf; 4259 uint8_t *buf = ops->datbuf; 4260 int ret; 4261 int oob_required = oob ? 1 : 0; 4262 4263 ops->retlen = 0; 4264 if (!writelen) 4265 return 0; 4266 4267 /* Reject writes, which are not page aligned */ 4268 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 4269 pr_notice("%s: attempt to write non page aligned data\n", 4270 __func__); 4271 return -EINVAL; 4272 } 4273 4274 /* Check if the region is secured */ 4275 if (nand_region_is_secured(chip, to, writelen)) 4276 return -EIO; 4277 4278 column = to & (mtd->writesize - 1); 4279 4280 chipnr = (int)(to >> chip->chip_shift); 4281 nand_select_target(chip, chipnr); 4282 4283 /* Check, if it is write protected */ 4284 if (nand_check_wp(chip)) { 4285 ret = -EIO; 4286 goto err_out; 4287 } 4288 4289 realpage = (int)(to >> chip->page_shift); 4290 page = realpage & chip->pagemask; 4291 4292 /* Invalidate the page cache, when we write to the cached page */ 4293 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && 4294 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) 4295 chip->pagecache.page = -1; 4296 4297 /* Don't allow multipage oob writes with offset */ 4298 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 4299 ret = -EINVAL; 4300 goto err_out; 4301 } 4302 4303 while (1) { 4304 int bytes = mtd->writesize; 4305 uint8_t *wbuf = buf; 4306 int use_bounce_buf; 4307 int part_pagewr = (column || writelen < mtd->writesize); 4308 4309 if (part_pagewr) 4310 use_bounce_buf = 1; 4311 else if (chip->options & NAND_USES_DMA) 4312 use_bounce_buf = !virt_addr_valid(buf) || 4313 !IS_ALIGNED((unsigned long)buf, 4314 chip->buf_align); 4315 else 4316 use_bounce_buf = 0; 4317 4318 /* 4319 * Copy the data from the initial buffer when doing partial page 4320 * writes or when a bounce buffer is required. 4321 */ 4322 if (use_bounce_buf) { 4323 pr_debug("%s: using write bounce buffer for buf@%p\n", 4324 __func__, buf); 4325 if (part_pagewr) 4326 bytes = min_t(int, bytes - column, writelen); 4327 wbuf = nand_get_data_buf(chip); 4328 memset(wbuf, 0xff, mtd->writesize); 4329 memcpy(&wbuf[column], buf, bytes); 4330 } 4331 4332 if (unlikely(oob)) { 4333 size_t len = min(oobwritelen, oobmaxlen); 4334 oob = nand_fill_oob(chip, oob, len, ops); 4335 oobwritelen -= len; 4336 } else { 4337 /* We still need to erase leftover OOB data */ 4338 memset(chip->oob_poi, 0xff, mtd->oobsize); 4339 } 4340 4341 ret = nand_write_page(chip, column, bytes, wbuf, 4342 oob_required, page, 4343 (ops->mode == MTD_OPS_RAW)); 4344 if (ret) 4345 break; 4346 4347 writelen -= bytes; 4348 if (!writelen) 4349 break; 4350 4351 column = 0; 4352 buf += bytes; 4353 realpage++; 4354 4355 page = realpage & chip->pagemask; 4356 /* Check, if we cross a chip boundary */ 4357 if (!page) { 4358 chipnr++; 4359 nand_deselect_target(chip); 4360 nand_select_target(chip, chipnr); 4361 } 4362 } 4363 4364 ops->retlen = ops->len - writelen; 4365 if (unlikely(oob)) 4366 ops->oobretlen = ops->ooblen; 4367 4368 err_out: 4369 nand_deselect_target(chip); 4370 return ret; 4371 } 4372 4373 /** 4374 * panic_nand_write - [MTD Interface] NAND write with ECC 4375 * @mtd: MTD device structure 4376 * @to: offset to write to 4377 * @len: number of bytes to write 4378 * @retlen: pointer to variable to store the number of written bytes 4379 * @buf: the data to write 4380 * 4381 * NAND write with ECC. Used when performing writes in interrupt context, this 4382 * may for example be called by mtdoops when writing an oops while in panic. 4383 */ 4384 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 4385 size_t *retlen, const uint8_t *buf) 4386 { 4387 struct nand_chip *chip = mtd_to_nand(mtd); 4388 int chipnr = (int)(to >> chip->chip_shift); 4389 struct mtd_oob_ops ops; 4390 int ret; 4391 4392 nand_select_target(chip, chipnr); 4393 4394 /* Wait for the device to get ready */ 4395 panic_nand_wait(chip, 400); 4396 4397 memset(&ops, 0, sizeof(ops)); 4398 ops.len = len; 4399 ops.datbuf = (uint8_t *)buf; 4400 ops.mode = MTD_OPS_PLACE_OOB; 4401 4402 ret = nand_do_write_ops(chip, to, &ops); 4403 4404 *retlen = ops.retlen; 4405 return ret; 4406 } 4407 4408 /** 4409 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4410 * @mtd: MTD device structure 4411 * @to: offset to write to 4412 * @ops: oob operation description structure 4413 */ 4414 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4415 struct mtd_oob_ops *ops) 4416 { 4417 struct nand_chip *chip = mtd_to_nand(mtd); 4418 int ret; 4419 4420 ops->retlen = 0; 4421 4422 ret = nand_get_device(chip); 4423 if (ret) 4424 return ret; 4425 4426 switch (ops->mode) { 4427 case MTD_OPS_PLACE_OOB: 4428 case MTD_OPS_AUTO_OOB: 4429 case MTD_OPS_RAW: 4430 break; 4431 4432 default: 4433 goto out; 4434 } 4435 4436 if (!ops->datbuf) 4437 ret = nand_do_write_oob(chip, to, ops); 4438 else 4439 ret = nand_do_write_ops(chip, to, ops); 4440 4441 out: 4442 nand_release_device(chip); 4443 return ret; 4444 } 4445 4446 /** 4447 * nand_erase - [MTD Interface] erase block(s) 4448 * @mtd: MTD device structure 4449 * @instr: erase instruction 4450 * 4451 * Erase one ore more blocks. 4452 */ 4453 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4454 { 4455 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4456 } 4457 4458 /** 4459 * nand_erase_nand - [INTERN] erase block(s) 4460 * @chip: NAND chip object 4461 * @instr: erase instruction 4462 * @allowbbt: allow erasing the bbt area 4463 * 4464 * Erase one ore more blocks. 4465 */ 4466 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4467 int allowbbt) 4468 { 4469 int page, pages_per_block, ret, chipnr; 4470 loff_t len; 4471 4472 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4473 __func__, (unsigned long long)instr->addr, 4474 (unsigned long long)instr->len); 4475 4476 if (check_offs_len(chip, instr->addr, instr->len)) 4477 return -EINVAL; 4478 4479 /* Check if the region is secured */ 4480 if (nand_region_is_secured(chip, instr->addr, instr->len)) 4481 return -EIO; 4482 4483 /* Grab the lock and see if the device is available */ 4484 ret = nand_get_device(chip); 4485 if (ret) 4486 return ret; 4487 4488 /* Shift to get first page */ 4489 page = (int)(instr->addr >> chip->page_shift); 4490 chipnr = (int)(instr->addr >> chip->chip_shift); 4491 4492 /* Calculate pages in each block */ 4493 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4494 4495 /* Select the NAND device */ 4496 nand_select_target(chip, chipnr); 4497 4498 /* Check, if it is write protected */ 4499 if (nand_check_wp(chip)) { 4500 pr_debug("%s: device is write protected!\n", 4501 __func__); 4502 ret = -EIO; 4503 goto erase_exit; 4504 } 4505 4506 /* Loop through the pages */ 4507 len = instr->len; 4508 4509 while (len) { 4510 /* Check if we have a bad block, we do not erase bad blocks! */ 4511 if (nand_block_checkbad(chip, ((loff_t) page) << 4512 chip->page_shift, allowbbt)) { 4513 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 4514 __func__, page); 4515 ret = -EIO; 4516 goto erase_exit; 4517 } 4518 4519 /* 4520 * Invalidate the page cache, if we erase the block which 4521 * contains the current cached page. 4522 */ 4523 if (page <= chip->pagecache.page && chip->pagecache.page < 4524 (page + pages_per_block)) 4525 chip->pagecache.page = -1; 4526 4527 ret = nand_erase_op(chip, (page & chip->pagemask) >> 4528 (chip->phys_erase_shift - chip->page_shift)); 4529 if (ret) { 4530 pr_debug("%s: failed erase, page 0x%08x\n", 4531 __func__, page); 4532 instr->fail_addr = 4533 ((loff_t)page << chip->page_shift); 4534 goto erase_exit; 4535 } 4536 4537 /* Increment page address and decrement length */ 4538 len -= (1ULL << chip->phys_erase_shift); 4539 page += pages_per_block; 4540 4541 /* Check, if we cross a chip boundary */ 4542 if (len && !(page & chip->pagemask)) { 4543 chipnr++; 4544 nand_deselect_target(chip); 4545 nand_select_target(chip, chipnr); 4546 } 4547 } 4548 4549 ret = 0; 4550 erase_exit: 4551 4552 /* Deselect and wake up anyone waiting on the device */ 4553 nand_deselect_target(chip); 4554 nand_release_device(chip); 4555 4556 /* Return more or less happy */ 4557 return ret; 4558 } 4559 4560 /** 4561 * nand_sync - [MTD Interface] sync 4562 * @mtd: MTD device structure 4563 * 4564 * Sync is actually a wait for chip ready function. 4565 */ 4566 static void nand_sync(struct mtd_info *mtd) 4567 { 4568 struct nand_chip *chip = mtd_to_nand(mtd); 4569 4570 pr_debug("%s: called\n", __func__); 4571 4572 /* Grab the lock and see if the device is available */ 4573 WARN_ON(nand_get_device(chip)); 4574 /* Release it and go back */ 4575 nand_release_device(chip); 4576 } 4577 4578 /** 4579 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4580 * @mtd: MTD device structure 4581 * @offs: offset relative to mtd start 4582 */ 4583 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4584 { 4585 struct nand_chip *chip = mtd_to_nand(mtd); 4586 int chipnr = (int)(offs >> chip->chip_shift); 4587 int ret; 4588 4589 /* Select the NAND device */ 4590 ret = nand_get_device(chip); 4591 if (ret) 4592 return ret; 4593 4594 nand_select_target(chip, chipnr); 4595 4596 ret = nand_block_checkbad(chip, offs, 0); 4597 4598 nand_deselect_target(chip); 4599 nand_release_device(chip); 4600 4601 return ret; 4602 } 4603 4604 /** 4605 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4606 * @mtd: MTD device structure 4607 * @ofs: offset relative to mtd start 4608 */ 4609 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4610 { 4611 int ret; 4612 4613 ret = nand_block_isbad(mtd, ofs); 4614 if (ret) { 4615 /* If it was bad already, return success and do nothing */ 4616 if (ret > 0) 4617 return 0; 4618 return ret; 4619 } 4620 4621 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); 4622 } 4623 4624 /** 4625 * nand_suspend - [MTD Interface] Suspend the NAND flash 4626 * @mtd: MTD device structure 4627 * 4628 * Returns 0 for success or negative error code otherwise. 4629 */ 4630 static int nand_suspend(struct mtd_info *mtd) 4631 { 4632 struct nand_chip *chip = mtd_to_nand(mtd); 4633 int ret = 0; 4634 4635 mutex_lock(&chip->lock); 4636 if (chip->ops.suspend) 4637 ret = chip->ops.suspend(chip); 4638 if (!ret) 4639 chip->suspended = 1; 4640 mutex_unlock(&chip->lock); 4641 4642 return ret; 4643 } 4644 4645 /** 4646 * nand_resume - [MTD Interface] Resume the NAND flash 4647 * @mtd: MTD device structure 4648 */ 4649 static void nand_resume(struct mtd_info *mtd) 4650 { 4651 struct nand_chip *chip = mtd_to_nand(mtd); 4652 4653 mutex_lock(&chip->lock); 4654 if (chip->suspended) { 4655 if (chip->ops.resume) 4656 chip->ops.resume(chip); 4657 chip->suspended = 0; 4658 } else { 4659 pr_err("%s called for a chip which is not in suspended state\n", 4660 __func__); 4661 } 4662 mutex_unlock(&chip->lock); 4663 } 4664 4665 /** 4666 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4667 * prevent further operations 4668 * @mtd: MTD device structure 4669 */ 4670 static void nand_shutdown(struct mtd_info *mtd) 4671 { 4672 nand_suspend(mtd); 4673 } 4674 4675 /** 4676 * nand_lock - [MTD Interface] Lock the NAND flash 4677 * @mtd: MTD device structure 4678 * @ofs: offset byte address 4679 * @len: number of bytes to lock (must be a multiple of block/page size) 4680 */ 4681 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4682 { 4683 struct nand_chip *chip = mtd_to_nand(mtd); 4684 4685 if (!chip->ops.lock_area) 4686 return -ENOTSUPP; 4687 4688 return chip->ops.lock_area(chip, ofs, len); 4689 } 4690 4691 /** 4692 * nand_unlock - [MTD Interface] Unlock the NAND flash 4693 * @mtd: MTD device structure 4694 * @ofs: offset byte address 4695 * @len: number of bytes to unlock (must be a multiple of block/page size) 4696 */ 4697 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4698 { 4699 struct nand_chip *chip = mtd_to_nand(mtd); 4700 4701 if (!chip->ops.unlock_area) 4702 return -ENOTSUPP; 4703 4704 return chip->ops.unlock_area(chip, ofs, len); 4705 } 4706 4707 /* Set default functions */ 4708 static void nand_set_defaults(struct nand_chip *chip) 4709 { 4710 /* If no controller is provided, use the dummy, legacy one. */ 4711 if (!chip->controller) { 4712 chip->controller = &chip->legacy.dummy_controller; 4713 nand_controller_init(chip->controller); 4714 } 4715 4716 nand_legacy_set_defaults(chip); 4717 4718 if (!chip->buf_align) 4719 chip->buf_align = 1; 4720 } 4721 4722 /* Sanitize ONFI strings so we can safely print them */ 4723 void sanitize_string(uint8_t *s, size_t len) 4724 { 4725 ssize_t i; 4726 4727 /* Null terminate */ 4728 s[len - 1] = 0; 4729 4730 /* Remove non printable chars */ 4731 for (i = 0; i < len - 1; i++) { 4732 if (s[i] < ' ' || s[i] > 127) 4733 s[i] = '?'; 4734 } 4735 4736 /* Remove trailing spaces */ 4737 strim(s); 4738 } 4739 4740 /* 4741 * nand_id_has_period - Check if an ID string has a given wraparound period 4742 * @id_data: the ID string 4743 * @arrlen: the length of the @id_data array 4744 * @period: the period of repitition 4745 * 4746 * Check if an ID string is repeated within a given sequence of bytes at 4747 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4748 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4749 * if the repetition has a period of @period; otherwise, returns zero. 4750 */ 4751 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4752 { 4753 int i, j; 4754 for (i = 0; i < period; i++) 4755 for (j = i + period; j < arrlen; j += period) 4756 if (id_data[i] != id_data[j]) 4757 return 0; 4758 return 1; 4759 } 4760 4761 /* 4762 * nand_id_len - Get the length of an ID string returned by CMD_READID 4763 * @id_data: the ID string 4764 * @arrlen: the length of the @id_data array 4765 4766 * Returns the length of the ID string, according to known wraparound/trailing 4767 * zero patterns. If no pattern exists, returns the length of the array. 4768 */ 4769 static int nand_id_len(u8 *id_data, int arrlen) 4770 { 4771 int last_nonzero, period; 4772 4773 /* Find last non-zero byte */ 4774 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4775 if (id_data[last_nonzero]) 4776 break; 4777 4778 /* All zeros */ 4779 if (last_nonzero < 0) 4780 return 0; 4781 4782 /* Calculate wraparound period */ 4783 for (period = 1; period < arrlen; period++) 4784 if (nand_id_has_period(id_data, arrlen, period)) 4785 break; 4786 4787 /* There's a repeated pattern */ 4788 if (period < arrlen) 4789 return period; 4790 4791 /* There are trailing zeros */ 4792 if (last_nonzero < arrlen - 1) 4793 return last_nonzero + 1; 4794 4795 /* No pattern detected */ 4796 return arrlen; 4797 } 4798 4799 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4800 static int nand_get_bits_per_cell(u8 cellinfo) 4801 { 4802 int bits; 4803 4804 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4805 bits >>= NAND_CI_CELLTYPE_SHIFT; 4806 return bits + 1; 4807 } 4808 4809 /* 4810 * Many new NAND share similar device ID codes, which represent the size of the 4811 * chip. The rest of the parameters must be decoded according to generic or 4812 * manufacturer-specific "extended ID" decoding patterns. 4813 */ 4814 void nand_decode_ext_id(struct nand_chip *chip) 4815 { 4816 struct nand_memory_organization *memorg; 4817 struct mtd_info *mtd = nand_to_mtd(chip); 4818 int extid; 4819 u8 *id_data = chip->id.data; 4820 4821 memorg = nanddev_get_memorg(&chip->base); 4822 4823 /* The 3rd id byte holds MLC / multichip data */ 4824 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4825 /* The 4th id byte is the important one */ 4826 extid = id_data[3]; 4827 4828 /* Calc pagesize */ 4829 memorg->pagesize = 1024 << (extid & 0x03); 4830 mtd->writesize = memorg->pagesize; 4831 extid >>= 2; 4832 /* Calc oobsize */ 4833 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4834 mtd->oobsize = memorg->oobsize; 4835 extid >>= 2; 4836 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4837 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / 4838 memorg->pagesize; 4839 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4840 extid >>= 2; 4841 /* Get buswidth information */ 4842 if (extid & 0x1) 4843 chip->options |= NAND_BUSWIDTH_16; 4844 } 4845 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 4846 4847 /* 4848 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 4849 * decodes a matching ID table entry and assigns the MTD size parameters for 4850 * the chip. 4851 */ 4852 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 4853 { 4854 struct mtd_info *mtd = nand_to_mtd(chip); 4855 struct nand_memory_organization *memorg; 4856 4857 memorg = nanddev_get_memorg(&chip->base); 4858 4859 memorg->pages_per_eraseblock = type->erasesize / type->pagesize; 4860 mtd->erasesize = type->erasesize; 4861 memorg->pagesize = type->pagesize; 4862 mtd->writesize = memorg->pagesize; 4863 memorg->oobsize = memorg->pagesize / 32; 4864 mtd->oobsize = memorg->oobsize; 4865 4866 /* All legacy ID NAND are small-page, SLC */ 4867 memorg->bits_per_cell = 1; 4868 } 4869 4870 /* 4871 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 4872 * heuristic patterns using various detected parameters (e.g., manufacturer, 4873 * page size, cell-type information). 4874 */ 4875 static void nand_decode_bbm_options(struct nand_chip *chip) 4876 { 4877 struct mtd_info *mtd = nand_to_mtd(chip); 4878 4879 /* Set the bad block position */ 4880 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 4881 chip->badblockpos = NAND_BBM_POS_LARGE; 4882 else 4883 chip->badblockpos = NAND_BBM_POS_SMALL; 4884 } 4885 4886 static inline bool is_full_id_nand(struct nand_flash_dev *type) 4887 { 4888 return type->id_len; 4889 } 4890 4891 static bool find_full_id_nand(struct nand_chip *chip, 4892 struct nand_flash_dev *type) 4893 { 4894 struct nand_device *base = &chip->base; 4895 struct nand_ecc_props requirements; 4896 struct mtd_info *mtd = nand_to_mtd(chip); 4897 struct nand_memory_organization *memorg; 4898 u8 *id_data = chip->id.data; 4899 4900 memorg = nanddev_get_memorg(&chip->base); 4901 4902 if (!strncmp(type->id, id_data, type->id_len)) { 4903 memorg->pagesize = type->pagesize; 4904 mtd->writesize = memorg->pagesize; 4905 memorg->pages_per_eraseblock = type->erasesize / 4906 type->pagesize; 4907 mtd->erasesize = type->erasesize; 4908 memorg->oobsize = type->oobsize; 4909 mtd->oobsize = memorg->oobsize; 4910 4911 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4912 memorg->eraseblocks_per_lun = 4913 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 4914 memorg->pagesize * 4915 memorg->pages_per_eraseblock); 4916 chip->options |= type->options; 4917 requirements.strength = NAND_ECC_STRENGTH(type); 4918 requirements.step_size = NAND_ECC_STEP(type); 4919 nanddev_set_ecc_requirements(base, &requirements); 4920 4921 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4922 if (!chip->parameters.model) 4923 return false; 4924 4925 return true; 4926 } 4927 return false; 4928 } 4929 4930 /* 4931 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 4932 * compliant and does not have a full-id or legacy-id entry in the nand_ids 4933 * table. 4934 */ 4935 static void nand_manufacturer_detect(struct nand_chip *chip) 4936 { 4937 /* 4938 * Try manufacturer detection if available and use 4939 * nand_decode_ext_id() otherwise. 4940 */ 4941 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4942 chip->manufacturer.desc->ops->detect) { 4943 struct nand_memory_organization *memorg; 4944 4945 memorg = nanddev_get_memorg(&chip->base); 4946 4947 /* The 3rd id byte holds MLC / multichip data */ 4948 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 4949 chip->manufacturer.desc->ops->detect(chip); 4950 } else { 4951 nand_decode_ext_id(chip); 4952 } 4953 } 4954 4955 /* 4956 * Manufacturer initialization. This function is called for all NANDs including 4957 * ONFI and JEDEC compliant ones. 4958 * Manufacturer drivers should put all their specific initialization code in 4959 * their ->init() hook. 4960 */ 4961 static int nand_manufacturer_init(struct nand_chip *chip) 4962 { 4963 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 4964 !chip->manufacturer.desc->ops->init) 4965 return 0; 4966 4967 return chip->manufacturer.desc->ops->init(chip); 4968 } 4969 4970 /* 4971 * Manufacturer cleanup. This function is called for all NANDs including 4972 * ONFI and JEDEC compliant ones. 4973 * Manufacturer drivers should put all their specific cleanup code in their 4974 * ->cleanup() hook. 4975 */ 4976 static void nand_manufacturer_cleanup(struct nand_chip *chip) 4977 { 4978 /* Release manufacturer private data */ 4979 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4980 chip->manufacturer.desc->ops->cleanup) 4981 chip->manufacturer.desc->ops->cleanup(chip); 4982 } 4983 4984 static const char * 4985 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) 4986 { 4987 return manufacturer_desc ? manufacturer_desc->name : "Unknown"; 4988 } 4989 4990 /* 4991 * Get the flash and manufacturer id and lookup if the type is supported. 4992 */ 4993 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 4994 { 4995 const struct nand_manufacturer_desc *manufacturer_desc; 4996 struct mtd_info *mtd = nand_to_mtd(chip); 4997 struct nand_memory_organization *memorg; 4998 int busw, ret; 4999 u8 *id_data = chip->id.data; 5000 u8 maf_id, dev_id; 5001 u64 targetsize; 5002 5003 /* 5004 * Let's start by initializing memorg fields that might be left 5005 * unassigned by the ID-based detection logic. 5006 */ 5007 memorg = nanddev_get_memorg(&chip->base); 5008 memorg->planes_per_lun = 1; 5009 memorg->luns_per_target = 1; 5010 5011 /* 5012 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 5013 * after power-up. 5014 */ 5015 ret = nand_reset(chip, 0); 5016 if (ret) 5017 return ret; 5018 5019 /* Select the device */ 5020 nand_select_target(chip, 0); 5021 5022 /* Send the command for reading device ID */ 5023 ret = nand_readid_op(chip, 0, id_data, 2); 5024 if (ret) 5025 return ret; 5026 5027 /* Read manufacturer and device IDs */ 5028 maf_id = id_data[0]; 5029 dev_id = id_data[1]; 5030 5031 /* 5032 * Try again to make sure, as some systems the bus-hold or other 5033 * interface concerns can cause random data which looks like a 5034 * possibly credible NAND flash to appear. If the two results do 5035 * not match, ignore the device completely. 5036 */ 5037 5038 /* Read entire ID string */ 5039 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 5040 if (ret) 5041 return ret; 5042 5043 if (id_data[0] != maf_id || id_data[1] != dev_id) { 5044 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 5045 maf_id, dev_id, id_data[0], id_data[1]); 5046 return -ENODEV; 5047 } 5048 5049 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 5050 5051 /* Try to identify manufacturer */ 5052 manufacturer_desc = nand_get_manufacturer_desc(maf_id); 5053 chip->manufacturer.desc = manufacturer_desc; 5054 5055 if (!type) 5056 type = nand_flash_ids; 5057 5058 /* 5059 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 5060 * override it. 5061 * This is required to make sure initial NAND bus width set by the 5062 * NAND controller driver is coherent with the real NAND bus width 5063 * (extracted by auto-detection code). 5064 */ 5065 busw = chip->options & NAND_BUSWIDTH_16; 5066 5067 /* 5068 * The flag is only set (never cleared), reset it to its default value 5069 * before starting auto-detection. 5070 */ 5071 chip->options &= ~NAND_BUSWIDTH_16; 5072 5073 for (; type->name != NULL; type++) { 5074 if (is_full_id_nand(type)) { 5075 if (find_full_id_nand(chip, type)) 5076 goto ident_done; 5077 } else if (dev_id == type->dev_id) { 5078 break; 5079 } 5080 } 5081 5082 if (!type->name || !type->pagesize) { 5083 /* Check if the chip is ONFI compliant */ 5084 ret = nand_onfi_detect(chip); 5085 if (ret < 0) 5086 return ret; 5087 else if (ret) 5088 goto ident_done; 5089 5090 /* Check if the chip is JEDEC compliant */ 5091 ret = nand_jedec_detect(chip); 5092 if (ret < 0) 5093 return ret; 5094 else if (ret) 5095 goto ident_done; 5096 } 5097 5098 if (!type->name) 5099 return -ENODEV; 5100 5101 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 5102 if (!chip->parameters.model) 5103 return -ENOMEM; 5104 5105 if (!type->pagesize) 5106 nand_manufacturer_detect(chip); 5107 else 5108 nand_decode_id(chip, type); 5109 5110 /* Get chip options */ 5111 chip->options |= type->options; 5112 5113 memorg->eraseblocks_per_lun = 5114 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 5115 memorg->pagesize * 5116 memorg->pages_per_eraseblock); 5117 5118 ident_done: 5119 if (!mtd->name) 5120 mtd->name = chip->parameters.model; 5121 5122 if (chip->options & NAND_BUSWIDTH_AUTO) { 5123 WARN_ON(busw & NAND_BUSWIDTH_16); 5124 nand_set_defaults(chip); 5125 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 5126 /* 5127 * Check, if buswidth is correct. Hardware drivers should set 5128 * chip correct! 5129 */ 5130 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5131 maf_id, dev_id); 5132 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5133 mtd->name); 5134 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 5135 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 5136 ret = -EINVAL; 5137 5138 goto free_detect_allocation; 5139 } 5140 5141 nand_decode_bbm_options(chip); 5142 5143 /* Calculate the address shift from the page size */ 5144 chip->page_shift = ffs(mtd->writesize) - 1; 5145 /* Convert chipsize to number of pages per chip -1 */ 5146 targetsize = nanddev_target_size(&chip->base); 5147 chip->pagemask = (targetsize >> chip->page_shift) - 1; 5148 5149 chip->bbt_erase_shift = chip->phys_erase_shift = 5150 ffs(mtd->erasesize) - 1; 5151 if (targetsize & 0xffffffff) 5152 chip->chip_shift = ffs((unsigned)targetsize) - 1; 5153 else { 5154 chip->chip_shift = ffs((unsigned)(targetsize >> 32)); 5155 chip->chip_shift += 32 - 1; 5156 } 5157 5158 if (chip->chip_shift - chip->page_shift > 16) 5159 chip->options |= NAND_ROW_ADDR_3; 5160 5161 chip->badblockbits = 8; 5162 5163 nand_legacy_adjust_cmdfunc(chip); 5164 5165 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5166 maf_id, dev_id); 5167 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5168 chip->parameters.model); 5169 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 5170 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 5171 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 5172 return 0; 5173 5174 free_detect_allocation: 5175 kfree(chip->parameters.model); 5176 5177 return ret; 5178 } 5179 5180 static enum nand_ecc_engine_type 5181 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np) 5182 { 5183 enum nand_ecc_legacy_mode { 5184 NAND_ECC_INVALID, 5185 NAND_ECC_NONE, 5186 NAND_ECC_SOFT, 5187 NAND_ECC_SOFT_BCH, 5188 NAND_ECC_HW, 5189 NAND_ECC_HW_SYNDROME, 5190 NAND_ECC_ON_DIE, 5191 }; 5192 const char * const nand_ecc_legacy_modes[] = { 5193 [NAND_ECC_NONE] = "none", 5194 [NAND_ECC_SOFT] = "soft", 5195 [NAND_ECC_SOFT_BCH] = "soft_bch", 5196 [NAND_ECC_HW] = "hw", 5197 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 5198 [NAND_ECC_ON_DIE] = "on-die", 5199 }; 5200 enum nand_ecc_legacy_mode eng_type; 5201 const char *pm; 5202 int err; 5203 5204 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5205 if (err) 5206 return NAND_ECC_ENGINE_TYPE_INVALID; 5207 5208 for (eng_type = NAND_ECC_NONE; 5209 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) { 5210 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) { 5211 switch (eng_type) { 5212 case NAND_ECC_NONE: 5213 return NAND_ECC_ENGINE_TYPE_NONE; 5214 case NAND_ECC_SOFT: 5215 case NAND_ECC_SOFT_BCH: 5216 return NAND_ECC_ENGINE_TYPE_SOFT; 5217 case NAND_ECC_HW: 5218 case NAND_ECC_HW_SYNDROME: 5219 return NAND_ECC_ENGINE_TYPE_ON_HOST; 5220 case NAND_ECC_ON_DIE: 5221 return NAND_ECC_ENGINE_TYPE_ON_DIE; 5222 default: 5223 break; 5224 } 5225 } 5226 } 5227 5228 return NAND_ECC_ENGINE_TYPE_INVALID; 5229 } 5230 5231 static enum nand_ecc_placement 5232 of_get_rawnand_ecc_placement_legacy(struct device_node *np) 5233 { 5234 const char *pm; 5235 int err; 5236 5237 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5238 if (!err) { 5239 if (!strcasecmp(pm, "hw_syndrome")) 5240 return NAND_ECC_PLACEMENT_INTERLEAVED; 5241 } 5242 5243 return NAND_ECC_PLACEMENT_UNKNOWN; 5244 } 5245 5246 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np) 5247 { 5248 const char *pm; 5249 int err; 5250 5251 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5252 if (!err) { 5253 if (!strcasecmp(pm, "soft")) 5254 return NAND_ECC_ALGO_HAMMING; 5255 else if (!strcasecmp(pm, "soft_bch")) 5256 return NAND_ECC_ALGO_BCH; 5257 } 5258 5259 return NAND_ECC_ALGO_UNKNOWN; 5260 } 5261 5262 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip) 5263 { 5264 struct device_node *dn = nand_get_flash_node(chip); 5265 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf; 5266 5267 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5268 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn); 5269 5270 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN) 5271 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn); 5272 5273 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN) 5274 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn); 5275 } 5276 5277 static int of_get_nand_bus_width(struct device_node *np) 5278 { 5279 u32 val; 5280 5281 if (of_property_read_u32(np, "nand-bus-width", &val)) 5282 return 8; 5283 5284 switch (val) { 5285 case 8: 5286 case 16: 5287 return val; 5288 default: 5289 return -EIO; 5290 } 5291 } 5292 5293 static bool of_get_nand_on_flash_bbt(struct device_node *np) 5294 { 5295 return of_property_read_bool(np, "nand-on-flash-bbt"); 5296 } 5297 5298 static int of_get_nand_secure_regions(struct nand_chip *chip) 5299 { 5300 struct device_node *dn = nand_get_flash_node(chip); 5301 struct property *prop; 5302 int nr_elem, i, j; 5303 5304 /* Only proceed if the "secure-regions" property is present in DT */ 5305 prop = of_find_property(dn, "secure-regions", NULL); 5306 if (!prop) 5307 return 0; 5308 5309 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5310 if (nr_elem <= 0) 5311 return nr_elem; 5312 5313 chip->nr_secure_regions = nr_elem / 2; 5314 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), 5315 GFP_KERNEL); 5316 if (!chip->secure_regions) 5317 return -ENOMEM; 5318 5319 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) { 5320 of_property_read_u64_index(dn, "secure-regions", j, 5321 &chip->secure_regions[i].offset); 5322 of_property_read_u64_index(dn, "secure-regions", j + 1, 5323 &chip->secure_regions[i].size); 5324 } 5325 5326 return 0; 5327 } 5328 5329 /** 5330 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller 5331 * @dev: Device that will be parsed. Also used for managed allocations. 5332 * @cs_array: Array of GPIO desc pointers allocated on success 5333 * @ncs_array: Number of entries in @cs_array updated on success. 5334 * @return 0 on success, an error otherwise. 5335 */ 5336 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, 5337 unsigned int *ncs_array) 5338 { 5339 struct device_node *np = dev->of_node; 5340 struct gpio_desc **descs; 5341 int ndescs, i; 5342 5343 ndescs = of_gpio_named_count(np, "cs-gpios"); 5344 if (ndescs < 0) { 5345 dev_dbg(dev, "No valid cs-gpios property\n"); 5346 return 0; 5347 } 5348 5349 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); 5350 if (!descs) 5351 return -ENOMEM; 5352 5353 for (i = 0; i < ndescs; i++) { 5354 descs[i] = gpiod_get_index_optional(dev, "cs", i, 5355 GPIOD_OUT_HIGH); 5356 if (IS_ERR(descs[i])) 5357 return PTR_ERR(descs[i]); 5358 } 5359 5360 *ncs_array = ndescs; 5361 *cs_array = descs; 5362 5363 return 0; 5364 } 5365 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); 5366 5367 static int rawnand_dt_init(struct nand_chip *chip) 5368 { 5369 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); 5370 struct device_node *dn = nand_get_flash_node(chip); 5371 5372 if (!dn) 5373 return 0; 5374 5375 if (of_get_nand_bus_width(dn) == 16) 5376 chip->options |= NAND_BUSWIDTH_16; 5377 5378 if (of_property_read_bool(dn, "nand-is-boot-medium")) 5379 chip->options |= NAND_IS_BOOT_MEDIUM; 5380 5381 if (of_get_nand_on_flash_bbt(dn)) 5382 chip->bbt_options |= NAND_BBT_USE_FLASH; 5383 5384 of_get_nand_ecc_user_config(nand); 5385 of_get_nand_ecc_legacy_user_config(chip); 5386 5387 /* 5388 * If neither the user nor the NAND controller have requested a specific 5389 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST. 5390 */ 5391 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 5392 5393 /* 5394 * Use the user requested engine type, unless there is none, in this 5395 * case default to the NAND controller choice, otherwise fallback to 5396 * the raw NAND default one. 5397 */ 5398 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID) 5399 chip->ecc.engine_type = nand->ecc.user_conf.engine_type; 5400 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5401 chip->ecc.engine_type = nand->ecc.defaults.engine_type; 5402 5403 chip->ecc.placement = nand->ecc.user_conf.placement; 5404 chip->ecc.algo = nand->ecc.user_conf.algo; 5405 chip->ecc.strength = nand->ecc.user_conf.strength; 5406 chip->ecc.size = nand->ecc.user_conf.step_size; 5407 5408 return 0; 5409 } 5410 5411 /** 5412 * nand_scan_ident - Scan for the NAND device 5413 * @chip: NAND chip object 5414 * @maxchips: number of chips to scan for 5415 * @table: alternative NAND ID table 5416 * 5417 * This is the first phase of the normal nand_scan() function. It reads the 5418 * flash ID and sets up MTD fields accordingly. 5419 * 5420 * This helper used to be called directly from controller drivers that needed 5421 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 5422 * prevented dynamic allocations during this phase which was unconvenient and 5423 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 5424 */ 5425 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 5426 struct nand_flash_dev *table) 5427 { 5428 struct mtd_info *mtd = nand_to_mtd(chip); 5429 struct nand_memory_organization *memorg; 5430 int nand_maf_id, nand_dev_id; 5431 unsigned int i; 5432 int ret; 5433 5434 memorg = nanddev_get_memorg(&chip->base); 5435 5436 /* Assume all dies are deselected when we enter nand_scan_ident(). */ 5437 chip->cur_cs = -1; 5438 5439 mutex_init(&chip->lock); 5440 5441 /* Enforce the right timings for reset/detection */ 5442 chip->current_interface_config = nand_get_reset_interface_config(); 5443 5444 ret = rawnand_dt_init(chip); 5445 if (ret) 5446 return ret; 5447 5448 if (!mtd->name && mtd->dev.parent) 5449 mtd->name = dev_name(mtd->dev.parent); 5450 5451 /* Set the default functions */ 5452 nand_set_defaults(chip); 5453 5454 ret = nand_legacy_check_hooks(chip); 5455 if (ret) 5456 return ret; 5457 5458 memorg->ntargets = maxchips; 5459 5460 /* Read the flash type */ 5461 ret = nand_detect(chip, table); 5462 if (ret) { 5463 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 5464 pr_warn("No NAND device found\n"); 5465 nand_deselect_target(chip); 5466 return ret; 5467 } 5468 5469 nand_maf_id = chip->id.data[0]; 5470 nand_dev_id = chip->id.data[1]; 5471 5472 nand_deselect_target(chip); 5473 5474 /* Check for a chip array */ 5475 for (i = 1; i < maxchips; i++) { 5476 u8 id[2]; 5477 5478 /* See comment in nand_get_flash_type for reset */ 5479 ret = nand_reset(chip, i); 5480 if (ret) 5481 break; 5482 5483 nand_select_target(chip, i); 5484 /* Send the command for reading device ID */ 5485 ret = nand_readid_op(chip, 0, id, sizeof(id)); 5486 if (ret) 5487 break; 5488 /* Read manufacturer and device IDs */ 5489 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5490 nand_deselect_target(chip); 5491 break; 5492 } 5493 nand_deselect_target(chip); 5494 } 5495 if (i > 1) 5496 pr_info("%d chips detected\n", i); 5497 5498 /* Store the number of chips and calc total size for mtd */ 5499 memorg->ntargets = i; 5500 mtd->size = i * nanddev_target_size(&chip->base); 5501 5502 return 0; 5503 } 5504 5505 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5506 { 5507 kfree(chip->parameters.model); 5508 kfree(chip->parameters.onfi); 5509 } 5510 5511 int rawnand_sw_hamming_init(struct nand_chip *chip) 5512 { 5513 struct nand_ecc_sw_hamming_conf *engine_conf; 5514 struct nand_device *base = &chip->base; 5515 int ret; 5516 5517 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5518 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; 5519 base->ecc.user_conf.strength = chip->ecc.strength; 5520 base->ecc.user_conf.step_size = chip->ecc.size; 5521 5522 ret = nand_ecc_sw_hamming_init_ctx(base); 5523 if (ret) 5524 return ret; 5525 5526 engine_conf = base->ecc.ctx.priv; 5527 5528 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) 5529 engine_conf->sm_order = true; 5530 5531 chip->ecc.size = base->ecc.ctx.conf.step_size; 5532 chip->ecc.strength = base->ecc.ctx.conf.strength; 5533 chip->ecc.total = base->ecc.ctx.total; 5534 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5535 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5536 5537 return 0; 5538 } 5539 EXPORT_SYMBOL(rawnand_sw_hamming_init); 5540 5541 int rawnand_sw_hamming_calculate(struct nand_chip *chip, 5542 const unsigned char *buf, 5543 unsigned char *code) 5544 { 5545 struct nand_device *base = &chip->base; 5546 5547 return nand_ecc_sw_hamming_calculate(base, buf, code); 5548 } 5549 EXPORT_SYMBOL(rawnand_sw_hamming_calculate); 5550 5551 int rawnand_sw_hamming_correct(struct nand_chip *chip, 5552 unsigned char *buf, 5553 unsigned char *read_ecc, 5554 unsigned char *calc_ecc) 5555 { 5556 struct nand_device *base = &chip->base; 5557 5558 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); 5559 } 5560 EXPORT_SYMBOL(rawnand_sw_hamming_correct); 5561 5562 void rawnand_sw_hamming_cleanup(struct nand_chip *chip) 5563 { 5564 struct nand_device *base = &chip->base; 5565 5566 nand_ecc_sw_hamming_cleanup_ctx(base); 5567 } 5568 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); 5569 5570 int rawnand_sw_bch_init(struct nand_chip *chip) 5571 { 5572 struct nand_device *base = &chip->base; 5573 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base); 5574 int ret; 5575 5576 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5577 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; 5578 base->ecc.user_conf.step_size = chip->ecc.size; 5579 base->ecc.user_conf.strength = chip->ecc.strength; 5580 5581 ret = nand_ecc_sw_bch_init_ctx(base); 5582 if (ret) 5583 return ret; 5584 5585 chip->ecc.size = ecc_conf->step_size; 5586 chip->ecc.strength = ecc_conf->strength; 5587 chip->ecc.total = base->ecc.ctx.total; 5588 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5589 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5590 5591 return 0; 5592 } 5593 EXPORT_SYMBOL(rawnand_sw_bch_init); 5594 5595 static int rawnand_sw_bch_calculate(struct nand_chip *chip, 5596 const unsigned char *buf, 5597 unsigned char *code) 5598 { 5599 struct nand_device *base = &chip->base; 5600 5601 return nand_ecc_sw_bch_calculate(base, buf, code); 5602 } 5603 5604 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, 5605 unsigned char *read_ecc, unsigned char *calc_ecc) 5606 { 5607 struct nand_device *base = &chip->base; 5608 5609 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); 5610 } 5611 EXPORT_SYMBOL(rawnand_sw_bch_correct); 5612 5613 void rawnand_sw_bch_cleanup(struct nand_chip *chip) 5614 { 5615 struct nand_device *base = &chip->base; 5616 5617 nand_ecc_sw_bch_cleanup_ctx(base); 5618 } 5619 EXPORT_SYMBOL(rawnand_sw_bch_cleanup); 5620 5621 static int nand_set_ecc_on_host_ops(struct nand_chip *chip) 5622 { 5623 struct nand_ecc_ctrl *ecc = &chip->ecc; 5624 5625 switch (ecc->placement) { 5626 case NAND_ECC_PLACEMENT_UNKNOWN: 5627 case NAND_ECC_PLACEMENT_OOB: 5628 /* Use standard hwecc read page function? */ 5629 if (!ecc->read_page) 5630 ecc->read_page = nand_read_page_hwecc; 5631 if (!ecc->write_page) 5632 ecc->write_page = nand_write_page_hwecc; 5633 if (!ecc->read_page_raw) 5634 ecc->read_page_raw = nand_read_page_raw; 5635 if (!ecc->write_page_raw) 5636 ecc->write_page_raw = nand_write_page_raw; 5637 if (!ecc->read_oob) 5638 ecc->read_oob = nand_read_oob_std; 5639 if (!ecc->write_oob) 5640 ecc->write_oob = nand_write_oob_std; 5641 if (!ecc->read_subpage) 5642 ecc->read_subpage = nand_read_subpage; 5643 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5644 ecc->write_subpage = nand_write_subpage_hwecc; 5645 fallthrough; 5646 5647 case NAND_ECC_PLACEMENT_INTERLEAVED: 5648 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5649 (!ecc->read_page || 5650 ecc->read_page == nand_read_page_hwecc || 5651 !ecc->write_page || 5652 ecc->write_page == nand_write_page_hwecc)) { 5653 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5654 return -EINVAL; 5655 } 5656 /* Use standard syndrome read/write page function? */ 5657 if (!ecc->read_page) 5658 ecc->read_page = nand_read_page_syndrome; 5659 if (!ecc->write_page) 5660 ecc->write_page = nand_write_page_syndrome; 5661 if (!ecc->read_page_raw) 5662 ecc->read_page_raw = nand_read_page_raw_syndrome; 5663 if (!ecc->write_page_raw) 5664 ecc->write_page_raw = nand_write_page_raw_syndrome; 5665 if (!ecc->read_oob) 5666 ecc->read_oob = nand_read_oob_syndrome; 5667 if (!ecc->write_oob) 5668 ecc->write_oob = nand_write_oob_syndrome; 5669 break; 5670 5671 default: 5672 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n", 5673 ecc->placement); 5674 return -EINVAL; 5675 } 5676 5677 return 0; 5678 } 5679 5680 static int nand_set_ecc_soft_ops(struct nand_chip *chip) 5681 { 5682 struct mtd_info *mtd = nand_to_mtd(chip); 5683 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5684 struct nand_ecc_ctrl *ecc = &chip->ecc; 5685 int ret; 5686 5687 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) 5688 return -EINVAL; 5689 5690 switch (ecc->algo) { 5691 case NAND_ECC_ALGO_HAMMING: 5692 ecc->calculate = rawnand_sw_hamming_calculate; 5693 ecc->correct = rawnand_sw_hamming_correct; 5694 ecc->read_page = nand_read_page_swecc; 5695 ecc->read_subpage = nand_read_subpage; 5696 ecc->write_page = nand_write_page_swecc; 5697 if (!ecc->read_page_raw) 5698 ecc->read_page_raw = nand_read_page_raw; 5699 if (!ecc->write_page_raw) 5700 ecc->write_page_raw = nand_write_page_raw; 5701 ecc->read_oob = nand_read_oob_std; 5702 ecc->write_oob = nand_write_oob_std; 5703 if (!ecc->size) 5704 ecc->size = 256; 5705 ecc->bytes = 3; 5706 ecc->strength = 1; 5707 5708 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) 5709 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5710 5711 ret = rawnand_sw_hamming_init(chip); 5712 if (ret) { 5713 WARN(1, "Hamming ECC initialization failed!\n"); 5714 return ret; 5715 } 5716 5717 return 0; 5718 case NAND_ECC_ALGO_BCH: 5719 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { 5720 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); 5721 return -EINVAL; 5722 } 5723 ecc->calculate = rawnand_sw_bch_calculate; 5724 ecc->correct = rawnand_sw_bch_correct; 5725 ecc->read_page = nand_read_page_swecc; 5726 ecc->read_subpage = nand_read_subpage; 5727 ecc->write_page = nand_write_page_swecc; 5728 if (!ecc->read_page_raw) 5729 ecc->read_page_raw = nand_read_page_raw; 5730 if (!ecc->write_page_raw) 5731 ecc->write_page_raw = nand_write_page_raw; 5732 ecc->read_oob = nand_read_oob_std; 5733 ecc->write_oob = nand_write_oob_std; 5734 5735 /* 5736 * We can only maximize ECC config when the default layout is 5737 * used, otherwise we don't know how many bytes can really be 5738 * used. 5739 */ 5740 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && 5741 mtd->ooblayout != nand_get_large_page_ooblayout()) 5742 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; 5743 5744 ret = rawnand_sw_bch_init(chip); 5745 if (ret) { 5746 WARN(1, "BCH ECC initialization failed!\n"); 5747 return ret; 5748 } 5749 5750 return 0; 5751 default: 5752 WARN(1, "Unsupported ECC algorithm!\n"); 5753 return -EINVAL; 5754 } 5755 } 5756 5757 /** 5758 * nand_check_ecc_caps - check the sanity of preset ECC settings 5759 * @chip: nand chip info structure 5760 * @caps: ECC caps info structure 5761 * @oobavail: OOB size that the ECC engine can use 5762 * 5763 * When ECC step size and strength are already set, check if they are supported 5764 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5765 * On success, the calculated ECC bytes is set. 5766 */ 5767 static int 5768 nand_check_ecc_caps(struct nand_chip *chip, 5769 const struct nand_ecc_caps *caps, int oobavail) 5770 { 5771 struct mtd_info *mtd = nand_to_mtd(chip); 5772 const struct nand_ecc_step_info *stepinfo; 5773 int preset_step = chip->ecc.size; 5774 int preset_strength = chip->ecc.strength; 5775 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5776 int i, j; 5777 5778 for (i = 0; i < caps->nstepinfos; i++) { 5779 stepinfo = &caps->stepinfos[i]; 5780 5781 if (stepinfo->stepsize != preset_step) 5782 continue; 5783 5784 for (j = 0; j < stepinfo->nstrengths; j++) { 5785 if (stepinfo->strengths[j] != preset_strength) 5786 continue; 5787 5788 ecc_bytes = caps->calc_ecc_bytes(preset_step, 5789 preset_strength); 5790 if (WARN_ON_ONCE(ecc_bytes < 0)) 5791 return ecc_bytes; 5792 5793 if (ecc_bytes * nsteps > oobavail) { 5794 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 5795 preset_step, preset_strength); 5796 return -ENOSPC; 5797 } 5798 5799 chip->ecc.bytes = ecc_bytes; 5800 5801 return 0; 5802 } 5803 } 5804 5805 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 5806 preset_step, preset_strength); 5807 5808 return -ENOTSUPP; 5809 } 5810 5811 /** 5812 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 5813 * @chip: nand chip info structure 5814 * @caps: ECC engine caps info structure 5815 * @oobavail: OOB size that the ECC engine can use 5816 * 5817 * If a chip's ECC requirement is provided, try to meet it with the least 5818 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 5819 * On success, the chosen ECC settings are set. 5820 */ 5821 static int 5822 nand_match_ecc_req(struct nand_chip *chip, 5823 const struct nand_ecc_caps *caps, int oobavail) 5824 { 5825 const struct nand_ecc_props *requirements = 5826 nanddev_get_ecc_requirements(&chip->base); 5827 struct mtd_info *mtd = nand_to_mtd(chip); 5828 const struct nand_ecc_step_info *stepinfo; 5829 int req_step = requirements->step_size; 5830 int req_strength = requirements->strength; 5831 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 5832 int best_step, best_strength, best_ecc_bytes; 5833 int best_ecc_bytes_total = INT_MAX; 5834 int i, j; 5835 5836 /* No information provided by the NAND chip */ 5837 if (!req_step || !req_strength) 5838 return -ENOTSUPP; 5839 5840 /* number of correctable bits the chip requires in a page */ 5841 req_corr = mtd->writesize / req_step * req_strength; 5842 5843 for (i = 0; i < caps->nstepinfos; i++) { 5844 stepinfo = &caps->stepinfos[i]; 5845 step_size = stepinfo->stepsize; 5846 5847 for (j = 0; j < stepinfo->nstrengths; j++) { 5848 strength = stepinfo->strengths[j]; 5849 5850 /* 5851 * If both step size and strength are smaller than the 5852 * chip's requirement, it is not easy to compare the 5853 * resulted reliability. 5854 */ 5855 if (step_size < req_step && strength < req_strength) 5856 continue; 5857 5858 if (mtd->writesize % step_size) 5859 continue; 5860 5861 nsteps = mtd->writesize / step_size; 5862 5863 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5864 if (WARN_ON_ONCE(ecc_bytes < 0)) 5865 continue; 5866 ecc_bytes_total = ecc_bytes * nsteps; 5867 5868 if (ecc_bytes_total > oobavail || 5869 strength * nsteps < req_corr) 5870 continue; 5871 5872 /* 5873 * We assume the best is to meet the chip's requrement 5874 * with the least number of ECC bytes. 5875 */ 5876 if (ecc_bytes_total < best_ecc_bytes_total) { 5877 best_ecc_bytes_total = ecc_bytes_total; 5878 best_step = step_size; 5879 best_strength = strength; 5880 best_ecc_bytes = ecc_bytes; 5881 } 5882 } 5883 } 5884 5885 if (best_ecc_bytes_total == INT_MAX) 5886 return -ENOTSUPP; 5887 5888 chip->ecc.size = best_step; 5889 chip->ecc.strength = best_strength; 5890 chip->ecc.bytes = best_ecc_bytes; 5891 5892 return 0; 5893 } 5894 5895 /** 5896 * nand_maximize_ecc - choose the max ECC strength available 5897 * @chip: nand chip info structure 5898 * @caps: ECC engine caps info structure 5899 * @oobavail: OOB size that the ECC engine can use 5900 * 5901 * Choose the max ECC strength that is supported on the controller, and can fit 5902 * within the chip's OOB. On success, the chosen ECC settings are set. 5903 */ 5904 static int 5905 nand_maximize_ecc(struct nand_chip *chip, 5906 const struct nand_ecc_caps *caps, int oobavail) 5907 { 5908 struct mtd_info *mtd = nand_to_mtd(chip); 5909 const struct nand_ecc_step_info *stepinfo; 5910 int step_size, strength, nsteps, ecc_bytes, corr; 5911 int best_corr = 0; 5912 int best_step = 0; 5913 int best_strength, best_ecc_bytes; 5914 int i, j; 5915 5916 for (i = 0; i < caps->nstepinfos; i++) { 5917 stepinfo = &caps->stepinfos[i]; 5918 step_size = stepinfo->stepsize; 5919 5920 /* If chip->ecc.size is already set, respect it */ 5921 if (chip->ecc.size && step_size != chip->ecc.size) 5922 continue; 5923 5924 for (j = 0; j < stepinfo->nstrengths; j++) { 5925 strength = stepinfo->strengths[j]; 5926 5927 if (mtd->writesize % step_size) 5928 continue; 5929 5930 nsteps = mtd->writesize / step_size; 5931 5932 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5933 if (WARN_ON_ONCE(ecc_bytes < 0)) 5934 continue; 5935 5936 if (ecc_bytes * nsteps > oobavail) 5937 continue; 5938 5939 corr = strength * nsteps; 5940 5941 /* 5942 * If the number of correctable bits is the same, 5943 * bigger step_size has more reliability. 5944 */ 5945 if (corr > best_corr || 5946 (corr == best_corr && step_size > best_step)) { 5947 best_corr = corr; 5948 best_step = step_size; 5949 best_strength = strength; 5950 best_ecc_bytes = ecc_bytes; 5951 } 5952 } 5953 } 5954 5955 if (!best_corr) 5956 return -ENOTSUPP; 5957 5958 chip->ecc.size = best_step; 5959 chip->ecc.strength = best_strength; 5960 chip->ecc.bytes = best_ecc_bytes; 5961 5962 return 0; 5963 } 5964 5965 /** 5966 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 5967 * @chip: nand chip info structure 5968 * @caps: ECC engine caps info structure 5969 * @oobavail: OOB size that the ECC engine can use 5970 * 5971 * Choose the ECC configuration according to following logic. 5972 * 5973 * 1. If both ECC step size and ECC strength are already set (usually by DT) 5974 * then check if it is supported by this controller. 5975 * 2. If the user provided the nand-ecc-maximize property, then select maximum 5976 * ECC strength. 5977 * 3. Otherwise, try to match the ECC step size and ECC strength closest 5978 * to the chip's requirement. If available OOB size can't fit the chip 5979 * requirement then fallback to the maximum ECC step size and ECC strength. 5980 * 5981 * On success, the chosen ECC settings are set. 5982 */ 5983 int nand_ecc_choose_conf(struct nand_chip *chip, 5984 const struct nand_ecc_caps *caps, int oobavail) 5985 { 5986 struct mtd_info *mtd = nand_to_mtd(chip); 5987 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5988 5989 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 5990 return -EINVAL; 5991 5992 if (chip->ecc.size && chip->ecc.strength) 5993 return nand_check_ecc_caps(chip, caps, oobavail); 5994 5995 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) 5996 return nand_maximize_ecc(chip, caps, oobavail); 5997 5998 if (!nand_match_ecc_req(chip, caps, oobavail)) 5999 return 0; 6000 6001 return nand_maximize_ecc(chip, caps, oobavail); 6002 } 6003 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 6004 6005 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) 6006 { 6007 struct nand_chip *chip = container_of(nand, struct nand_chip, 6008 base); 6009 unsigned int eb = nanddev_pos_to_row(nand, pos); 6010 int ret; 6011 6012 eb >>= nand->rowconv.eraseblock_addr_shift; 6013 6014 nand_select_target(chip, pos->target); 6015 ret = nand_erase_op(chip, eb); 6016 nand_deselect_target(chip); 6017 6018 return ret; 6019 } 6020 6021 static int rawnand_markbad(struct nand_device *nand, 6022 const struct nand_pos *pos) 6023 { 6024 struct nand_chip *chip = container_of(nand, struct nand_chip, 6025 base); 6026 6027 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6028 } 6029 6030 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) 6031 { 6032 struct nand_chip *chip = container_of(nand, struct nand_chip, 6033 base); 6034 int ret; 6035 6036 nand_select_target(chip, pos->target); 6037 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6038 nand_deselect_target(chip); 6039 6040 return ret; 6041 } 6042 6043 static const struct nand_ops rawnand_ops = { 6044 .erase = rawnand_erase, 6045 .markbad = rawnand_markbad, 6046 .isbad = rawnand_isbad, 6047 }; 6048 6049 /** 6050 * nand_scan_tail - Scan for the NAND device 6051 * @chip: NAND chip object 6052 * 6053 * This is the second phase of the normal nand_scan() function. It fills out 6054 * all the uninitialized function pointers with the defaults and scans for a 6055 * bad block table if appropriate. 6056 */ 6057 static int nand_scan_tail(struct nand_chip *chip) 6058 { 6059 struct mtd_info *mtd = nand_to_mtd(chip); 6060 struct nand_ecc_ctrl *ecc = &chip->ecc; 6061 int ret, i; 6062 6063 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 6064 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 6065 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 6066 return -EINVAL; 6067 } 6068 6069 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 6070 if (!chip->data_buf) 6071 return -ENOMEM; 6072 6073 /* 6074 * FIXME: some NAND manufacturer drivers expect the first die to be 6075 * selected when manufacturer->init() is called. They should be fixed 6076 * to explictly select the relevant die when interacting with the NAND 6077 * chip. 6078 */ 6079 nand_select_target(chip, 0); 6080 ret = nand_manufacturer_init(chip); 6081 nand_deselect_target(chip); 6082 if (ret) 6083 goto err_free_buf; 6084 6085 /* Set the internal oob buffer location, just after the page data */ 6086 chip->oob_poi = chip->data_buf + mtd->writesize; 6087 6088 /* 6089 * If no default placement scheme is given, select an appropriate one. 6090 */ 6091 if (!mtd->ooblayout && 6092 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6093 ecc->algo == NAND_ECC_ALGO_BCH) && 6094 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6095 ecc->algo == NAND_ECC_ALGO_HAMMING)) { 6096 switch (mtd->oobsize) { 6097 case 8: 6098 case 16: 6099 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); 6100 break; 6101 case 64: 6102 case 128: 6103 mtd_set_ooblayout(mtd, 6104 nand_get_large_page_hamming_ooblayout()); 6105 break; 6106 default: 6107 /* 6108 * Expose the whole OOB area to users if ECC_NONE 6109 * is passed. We could do that for all kind of 6110 * ->oobsize, but we must keep the old large/small 6111 * page with ECC layout when ->oobsize <= 128 for 6112 * compatibility reasons. 6113 */ 6114 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) { 6115 mtd_set_ooblayout(mtd, 6116 nand_get_large_page_ooblayout()); 6117 break; 6118 } 6119 6120 WARN(1, "No oob scheme defined for oobsize %d\n", 6121 mtd->oobsize); 6122 ret = -EINVAL; 6123 goto err_nand_manuf_cleanup; 6124 } 6125 } 6126 6127 /* 6128 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 6129 * selected and we have 256 byte pagesize fallback to software ECC 6130 */ 6131 6132 switch (ecc->engine_type) { 6133 case NAND_ECC_ENGINE_TYPE_ON_HOST: 6134 ret = nand_set_ecc_on_host_ops(chip); 6135 if (ret) 6136 goto err_nand_manuf_cleanup; 6137 6138 if (mtd->writesize >= ecc->size) { 6139 if (!ecc->strength) { 6140 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 6141 ret = -EINVAL; 6142 goto err_nand_manuf_cleanup; 6143 } 6144 break; 6145 } 6146 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 6147 ecc->size, mtd->writesize); 6148 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 6149 ecc->algo = NAND_ECC_ALGO_HAMMING; 6150 fallthrough; 6151 6152 case NAND_ECC_ENGINE_TYPE_SOFT: 6153 ret = nand_set_ecc_soft_ops(chip); 6154 if (ret) 6155 goto err_nand_manuf_cleanup; 6156 break; 6157 6158 case NAND_ECC_ENGINE_TYPE_ON_DIE: 6159 if (!ecc->read_page || !ecc->write_page) { 6160 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 6161 ret = -EINVAL; 6162 goto err_nand_manuf_cleanup; 6163 } 6164 if (!ecc->read_oob) 6165 ecc->read_oob = nand_read_oob_std; 6166 if (!ecc->write_oob) 6167 ecc->write_oob = nand_write_oob_std; 6168 break; 6169 6170 case NAND_ECC_ENGINE_TYPE_NONE: 6171 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n"); 6172 ecc->read_page = nand_read_page_raw; 6173 ecc->write_page = nand_write_page_raw; 6174 ecc->read_oob = nand_read_oob_std; 6175 ecc->read_page_raw = nand_read_page_raw; 6176 ecc->write_page_raw = nand_write_page_raw; 6177 ecc->write_oob = nand_write_oob_std; 6178 ecc->size = mtd->writesize; 6179 ecc->bytes = 0; 6180 ecc->strength = 0; 6181 break; 6182 6183 default: 6184 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type); 6185 ret = -EINVAL; 6186 goto err_nand_manuf_cleanup; 6187 } 6188 6189 if (ecc->correct || ecc->calculate) { 6190 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6191 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6192 if (!ecc->calc_buf || !ecc->code_buf) { 6193 ret = -ENOMEM; 6194 goto err_nand_manuf_cleanup; 6195 } 6196 } 6197 6198 /* For many systems, the standard OOB write also works for raw */ 6199 if (!ecc->read_oob_raw) 6200 ecc->read_oob_raw = ecc->read_oob; 6201 if (!ecc->write_oob_raw) 6202 ecc->write_oob_raw = ecc->write_oob; 6203 6204 /* propagate ecc info to mtd_info */ 6205 mtd->ecc_strength = ecc->strength; 6206 mtd->ecc_step_size = ecc->size; 6207 6208 /* 6209 * Set the number of read / write steps for one page depending on ECC 6210 * mode. 6211 */ 6212 if (!ecc->steps) 6213 ecc->steps = mtd->writesize / ecc->size; 6214 if (ecc->steps * ecc->size != mtd->writesize) { 6215 WARN(1, "Invalid ECC parameters\n"); 6216 ret = -EINVAL; 6217 goto err_nand_manuf_cleanup; 6218 } 6219 6220 if (!ecc->total) { 6221 ecc->total = ecc->steps * ecc->bytes; 6222 chip->base.ecc.ctx.total = ecc->total; 6223 } 6224 6225 if (ecc->total > mtd->oobsize) { 6226 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 6227 ret = -EINVAL; 6228 goto err_nand_manuf_cleanup; 6229 } 6230 6231 /* 6232 * The number of bytes available for a client to place data into 6233 * the out of band area. 6234 */ 6235 ret = mtd_ooblayout_count_freebytes(mtd); 6236 if (ret < 0) 6237 ret = 0; 6238 6239 mtd->oobavail = ret; 6240 6241 /* ECC sanity check: warn if it's too weak */ 6242 if (!nand_ecc_is_strong_enough(&chip->base)) 6243 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", 6244 mtd->name, chip->ecc.strength, chip->ecc.size, 6245 nanddev_get_ecc_requirements(&chip->base)->strength, 6246 nanddev_get_ecc_requirements(&chip->base)->step_size); 6247 6248 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 6249 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 6250 switch (ecc->steps) { 6251 case 2: 6252 mtd->subpage_sft = 1; 6253 break; 6254 case 4: 6255 case 8: 6256 case 16: 6257 mtd->subpage_sft = 2; 6258 break; 6259 } 6260 } 6261 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 6262 6263 /* Invalidate the pagebuffer reference */ 6264 chip->pagecache.page = -1; 6265 6266 /* Large page NAND with SOFT_ECC should support subpage reads */ 6267 switch (ecc->engine_type) { 6268 case NAND_ECC_ENGINE_TYPE_SOFT: 6269 if (chip->page_shift > 9) 6270 chip->options |= NAND_SUBPAGE_READ; 6271 break; 6272 6273 default: 6274 break; 6275 } 6276 6277 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); 6278 if (ret) 6279 goto err_nand_manuf_cleanup; 6280 6281 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ 6282 if (chip->options & NAND_ROM) 6283 mtd->flags = MTD_CAP_ROM; 6284 6285 /* Fill in remaining MTD driver data */ 6286 mtd->_erase = nand_erase; 6287 mtd->_point = NULL; 6288 mtd->_unpoint = NULL; 6289 mtd->_panic_write = panic_nand_write; 6290 mtd->_read_oob = nand_read_oob; 6291 mtd->_write_oob = nand_write_oob; 6292 mtd->_sync = nand_sync; 6293 mtd->_lock = nand_lock; 6294 mtd->_unlock = nand_unlock; 6295 mtd->_suspend = nand_suspend; 6296 mtd->_resume = nand_resume; 6297 mtd->_reboot = nand_shutdown; 6298 mtd->_block_isreserved = nand_block_isreserved; 6299 mtd->_block_isbad = nand_block_isbad; 6300 mtd->_block_markbad = nand_block_markbad; 6301 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 6302 6303 /* 6304 * Initialize bitflip_threshold to its default prior scan_bbt() call. 6305 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 6306 * properly set. 6307 */ 6308 if (!mtd->bitflip_threshold) 6309 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 6310 6311 /* Find the fastest data interface for this chip */ 6312 ret = nand_choose_interface_config(chip); 6313 if (ret) 6314 goto err_nanddev_cleanup; 6315 6316 /* Enter fastest possible mode on all dies. */ 6317 for (i = 0; i < nanddev_ntargets(&chip->base); i++) { 6318 ret = nand_setup_interface(chip, i); 6319 if (ret) 6320 goto err_free_interface_config; 6321 } 6322 6323 /* 6324 * Look for secure regions in the NAND chip. These regions are supposed 6325 * to be protected by a secure element like Trustzone. So the read/write 6326 * accesses to these regions will be blocked in the runtime by this 6327 * driver. 6328 */ 6329 ret = of_get_nand_secure_regions(chip); 6330 if (ret) 6331 goto err_free_interface_config; 6332 6333 /* Check, if we should skip the bad block table scan */ 6334 if (chip->options & NAND_SKIP_BBTSCAN) 6335 return 0; 6336 6337 /* Build bad block table */ 6338 ret = nand_create_bbt(chip); 6339 if (ret) 6340 goto err_free_secure_regions; 6341 6342 return 0; 6343 6344 err_free_secure_regions: 6345 kfree(chip->secure_regions); 6346 6347 err_free_interface_config: 6348 kfree(chip->best_interface_config); 6349 6350 err_nanddev_cleanup: 6351 nanddev_cleanup(&chip->base); 6352 6353 err_nand_manuf_cleanup: 6354 nand_manufacturer_cleanup(chip); 6355 6356 err_free_buf: 6357 kfree(chip->data_buf); 6358 kfree(ecc->code_buf); 6359 kfree(ecc->calc_buf); 6360 6361 return ret; 6362 } 6363 6364 static int nand_attach(struct nand_chip *chip) 6365 { 6366 if (chip->controller->ops && chip->controller->ops->attach_chip) 6367 return chip->controller->ops->attach_chip(chip); 6368 6369 return 0; 6370 } 6371 6372 static void nand_detach(struct nand_chip *chip) 6373 { 6374 if (chip->controller->ops && chip->controller->ops->detach_chip) 6375 chip->controller->ops->detach_chip(chip); 6376 } 6377 6378 /** 6379 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 6380 * @chip: NAND chip object 6381 * @maxchips: number of chips to scan for. 6382 * @ids: optional flash IDs table 6383 * 6384 * This fills out all the uninitialized function pointers with the defaults. 6385 * The flash ID is read and the mtd/chip structures are filled with the 6386 * appropriate values. 6387 */ 6388 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 6389 struct nand_flash_dev *ids) 6390 { 6391 int ret; 6392 6393 if (!maxchips) 6394 return -EINVAL; 6395 6396 ret = nand_scan_ident(chip, maxchips, ids); 6397 if (ret) 6398 return ret; 6399 6400 ret = nand_attach(chip); 6401 if (ret) 6402 goto cleanup_ident; 6403 6404 ret = nand_scan_tail(chip); 6405 if (ret) 6406 goto detach_chip; 6407 6408 return 0; 6409 6410 detach_chip: 6411 nand_detach(chip); 6412 cleanup_ident: 6413 nand_scan_ident_cleanup(chip); 6414 6415 return ret; 6416 } 6417 EXPORT_SYMBOL(nand_scan_with_ids); 6418 6419 /** 6420 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 6421 * @chip: NAND chip object 6422 */ 6423 void nand_cleanup(struct nand_chip *chip) 6424 { 6425 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { 6426 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) 6427 rawnand_sw_hamming_cleanup(chip); 6428 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 6429 rawnand_sw_bch_cleanup(chip); 6430 } 6431 6432 nanddev_cleanup(&chip->base); 6433 6434 /* Free secure regions data */ 6435 kfree(chip->secure_regions); 6436 6437 /* Free bad block table memory */ 6438 kfree(chip->bbt); 6439 kfree(chip->data_buf); 6440 kfree(chip->ecc.code_buf); 6441 kfree(chip->ecc.calc_buf); 6442 6443 /* Free bad block descriptor memory */ 6444 if (chip->badblock_pattern && chip->badblock_pattern->options 6445 & NAND_BBT_DYNAMICSTRUCT) 6446 kfree(chip->badblock_pattern); 6447 6448 /* Free the data interface */ 6449 kfree(chip->best_interface_config); 6450 6451 /* Free manufacturer priv data. */ 6452 nand_manufacturer_cleanup(chip); 6453 6454 /* Free controller specific allocations after chip identification */ 6455 nand_detach(chip); 6456 6457 /* Free identification phase allocations */ 6458 nand_scan_ident_cleanup(chip); 6459 } 6460 6461 EXPORT_SYMBOL_GPL(nand_cleanup); 6462 6463 MODULE_LICENSE("GPL"); 6464 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 6465 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 6466 MODULE_DESCRIPTION("Generic NAND flash driver code"); 6467