1 /* 2 * Overview: 3 * This is the generic MTD driver for NAND flash devices. It should be 4 * capable of working with almost all NAND chips currently available. 5 * 6 * Additional technical information is available on 7 * http://www.linux-mtd.infradead.org/doc/nand.html 8 * 9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 11 * 12 * Credits: 13 * David Woodhouse for adding multichip support 14 * 15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 16 * rework for 2K page size chips 17 * 18 * TODO: 19 * Enable cached programming for 2k page size chips 20 * Check, if mtd->ecctype should be set to MTD_ECC_HW 21 * if we have HW ECC support. 22 * BBT table is not serialized, has to be fixed 23 * 24 * This program is free software; you can redistribute it and/or modify 25 * it under the terms of the GNU General Public License version 2 as 26 * published by the Free Software Foundation. 27 * 28 */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/module.h> 33 #include <linux/delay.h> 34 #include <linux/errno.h> 35 #include <linux/err.h> 36 #include <linux/sched.h> 37 #include <linux/slab.h> 38 #include <linux/mm.h> 39 #include <linux/types.h> 40 #include <linux/mtd/mtd.h> 41 #include <linux/mtd/nand_ecc.h> 42 #include <linux/mtd/nand_bch.h> 43 #include <linux/interrupt.h> 44 #include <linux/bitops.h> 45 #include <linux/io.h> 46 #include <linux/mtd/partitions.h> 47 #include <linux/of.h> 48 49 #include "internals.h" 50 51 static int nand_get_device(struct mtd_info *mtd, int new_state); 52 53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 54 struct mtd_oob_ops *ops); 55 56 /* Define default oob placement schemes for large and small page devices */ 57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, 58 struct mtd_oob_region *oobregion) 59 { 60 struct nand_chip *chip = mtd_to_nand(mtd); 61 struct nand_ecc_ctrl *ecc = &chip->ecc; 62 63 if (section > 1) 64 return -ERANGE; 65 66 if (!section) { 67 oobregion->offset = 0; 68 if (mtd->oobsize == 16) 69 oobregion->length = 4; 70 else 71 oobregion->length = 3; 72 } else { 73 if (mtd->oobsize == 8) 74 return -ERANGE; 75 76 oobregion->offset = 6; 77 oobregion->length = ecc->total - 4; 78 } 79 80 return 0; 81 } 82 83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section, 84 struct mtd_oob_region *oobregion) 85 { 86 if (section > 1) 87 return -ERANGE; 88 89 if (mtd->oobsize == 16) { 90 if (section) 91 return -ERANGE; 92 93 oobregion->length = 8; 94 oobregion->offset = 8; 95 } else { 96 oobregion->length = 2; 97 if (!section) 98 oobregion->offset = 3; 99 else 100 oobregion->offset = 6; 101 } 102 103 return 0; 104 } 105 106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = { 107 .ecc = nand_ooblayout_ecc_sp, 108 .free = nand_ooblayout_free_sp, 109 }; 110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops); 111 112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, 113 struct mtd_oob_region *oobregion) 114 { 115 struct nand_chip *chip = mtd_to_nand(mtd); 116 struct nand_ecc_ctrl *ecc = &chip->ecc; 117 118 if (section || !ecc->total) 119 return -ERANGE; 120 121 oobregion->length = ecc->total; 122 oobregion->offset = mtd->oobsize - oobregion->length; 123 124 return 0; 125 } 126 127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section, 128 struct mtd_oob_region *oobregion) 129 { 130 struct nand_chip *chip = mtd_to_nand(mtd); 131 struct nand_ecc_ctrl *ecc = &chip->ecc; 132 133 if (section) 134 return -ERANGE; 135 136 oobregion->length = mtd->oobsize - ecc->total - 2; 137 oobregion->offset = 2; 138 139 return 0; 140 } 141 142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { 143 .ecc = nand_ooblayout_ecc_lp, 144 .free = nand_ooblayout_free_lp, 145 }; 146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); 147 148 /* 149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC 150 * are placed at a fixed offset. 151 */ 152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section, 153 struct mtd_oob_region *oobregion) 154 { 155 struct nand_chip *chip = mtd_to_nand(mtd); 156 struct nand_ecc_ctrl *ecc = &chip->ecc; 157 158 if (section) 159 return -ERANGE; 160 161 switch (mtd->oobsize) { 162 case 64: 163 oobregion->offset = 40; 164 break; 165 case 128: 166 oobregion->offset = 80; 167 break; 168 default: 169 return -EINVAL; 170 } 171 172 oobregion->length = ecc->total; 173 if (oobregion->offset + oobregion->length > mtd->oobsize) 174 return -ERANGE; 175 176 return 0; 177 } 178 179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, 180 struct mtd_oob_region *oobregion) 181 { 182 struct nand_chip *chip = mtd_to_nand(mtd); 183 struct nand_ecc_ctrl *ecc = &chip->ecc; 184 int ecc_offset = 0; 185 186 if (section < 0 || section > 1) 187 return -ERANGE; 188 189 switch (mtd->oobsize) { 190 case 64: 191 ecc_offset = 40; 192 break; 193 case 128: 194 ecc_offset = 80; 195 break; 196 default: 197 return -EINVAL; 198 } 199 200 if (section == 0) { 201 oobregion->offset = 2; 202 oobregion->length = ecc_offset - 2; 203 } else { 204 oobregion->offset = ecc_offset + ecc->total; 205 oobregion->length = mtd->oobsize - oobregion->offset; 206 } 207 208 return 0; 209 } 210 211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 212 .ecc = nand_ooblayout_ecc_lp_hamming, 213 .free = nand_ooblayout_free_lp_hamming, 214 }; 215 216 static int check_offs_len(struct mtd_info *mtd, 217 loff_t ofs, uint64_t len) 218 { 219 struct nand_chip *chip = mtd_to_nand(mtd); 220 int ret = 0; 221 222 /* Start address must align on block boundary */ 223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 224 pr_debug("%s: unaligned address\n", __func__); 225 ret = -EINVAL; 226 } 227 228 /* Length must align on block boundary */ 229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 230 pr_debug("%s: length not block aligned\n", __func__); 231 ret = -EINVAL; 232 } 233 234 return ret; 235 } 236 237 /** 238 * nand_release_device - [GENERIC] release chip 239 * @mtd: MTD device structure 240 * 241 * Release chip lock and wake up anyone waiting on the device. 242 */ 243 static void nand_release_device(struct mtd_info *mtd) 244 { 245 struct nand_chip *chip = mtd_to_nand(mtd); 246 247 /* Release the controller and the chip */ 248 spin_lock(&chip->controller->lock); 249 chip->controller->active = NULL; 250 chip->state = FL_READY; 251 wake_up(&chip->controller->wq); 252 spin_unlock(&chip->controller->lock); 253 } 254 255 /** 256 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 257 * @chip: NAND chip object 258 * @ofs: offset from device start 259 * 260 * Check, if the block is bad. 261 */ 262 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 263 { 264 struct mtd_info *mtd = nand_to_mtd(chip); 265 int page, page_end, res; 266 u8 bad; 267 268 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 269 ofs += mtd->erasesize - mtd->writesize; 270 271 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 272 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1); 273 274 for (; page < page_end; page++) { 275 res = chip->ecc.read_oob(chip, page); 276 if (res < 0) 277 return res; 278 279 bad = chip->oob_poi[chip->badblockpos]; 280 281 if (likely(chip->badblockbits == 8)) 282 res = bad != 0xFF; 283 else 284 res = hweight8(bad) < chip->badblockbits; 285 if (res) 286 return res; 287 } 288 289 return 0; 290 } 291 292 /** 293 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 294 * @chip: NAND chip object 295 * @ofs: offset from device start 296 * 297 * This is the default implementation, which can be overridden by a hardware 298 * specific driver. It provides the details for writing a bad block marker to a 299 * block. 300 */ 301 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 302 { 303 struct mtd_info *mtd = nand_to_mtd(chip); 304 struct mtd_oob_ops ops; 305 uint8_t buf[2] = { 0, 0 }; 306 int ret = 0, res, i = 0; 307 308 memset(&ops, 0, sizeof(ops)); 309 ops.oobbuf = buf; 310 ops.ooboffs = chip->badblockpos; 311 if (chip->options & NAND_BUSWIDTH_16) { 312 ops.ooboffs &= ~0x01; 313 ops.len = ops.ooblen = 2; 314 } else { 315 ops.len = ops.ooblen = 1; 316 } 317 ops.mode = MTD_OPS_PLACE_OOB; 318 319 /* Write to first/last page(s) if necessary */ 320 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 321 ofs += mtd->erasesize - mtd->writesize; 322 do { 323 res = nand_do_write_oob(mtd, ofs, &ops); 324 if (!ret) 325 ret = res; 326 327 i++; 328 ofs += mtd->writesize; 329 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2); 330 331 return ret; 332 } 333 334 /** 335 * nand_markbad_bbm - mark a block by updating the BBM 336 * @chip: NAND chip object 337 * @ofs: offset of the block to mark bad 338 */ 339 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 340 { 341 if (chip->legacy.block_markbad) 342 return chip->legacy.block_markbad(chip, ofs); 343 344 return nand_default_block_markbad(chip, ofs); 345 } 346 347 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 348 { 349 if (chip->legacy.block_bad) 350 return chip->legacy.block_bad(chip, ofs); 351 352 return nand_block_bad(chip, ofs); 353 } 354 355 /** 356 * nand_block_markbad_lowlevel - mark a block bad 357 * @mtd: MTD device structure 358 * @ofs: offset from device start 359 * 360 * This function performs the generic NAND bad block marking steps (i.e., bad 361 * block table(s) and/or marker(s)). We only allow the hardware driver to 362 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 363 * 364 * We try operations in the following order: 365 * 366 * (1) erase the affected block, to allow OOB marker to be written cleanly 367 * (2) write bad block marker to OOB area of affected block (unless flag 368 * NAND_BBT_NO_OOB_BBM is present) 369 * (3) update the BBT 370 * 371 * Note that we retain the first error encountered in (2) or (3), finish the 372 * procedures, and dump the error in the end. 373 */ 374 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs) 375 { 376 struct nand_chip *chip = mtd_to_nand(mtd); 377 int res, ret = 0; 378 379 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 380 struct erase_info einfo; 381 382 /* Attempt erase before marking OOB */ 383 memset(&einfo, 0, sizeof(einfo)); 384 einfo.addr = ofs; 385 einfo.len = 1ULL << chip->phys_erase_shift; 386 nand_erase_nand(chip, &einfo, 0); 387 388 /* Write bad block marker to OOB */ 389 nand_get_device(mtd, FL_WRITING); 390 ret = nand_markbad_bbm(chip, ofs); 391 nand_release_device(mtd); 392 } 393 394 /* Mark block bad in BBT */ 395 if (chip->bbt) { 396 res = nand_markbad_bbt(chip, ofs); 397 if (!ret) 398 ret = res; 399 } 400 401 if (!ret) 402 mtd->ecc_stats.badblocks++; 403 404 return ret; 405 } 406 407 /** 408 * nand_check_wp - [GENERIC] check if the chip is write protected 409 * @mtd: MTD device structure 410 * 411 * Check, if the device is write protected. The function expects, that the 412 * device is already selected. 413 */ 414 static int nand_check_wp(struct mtd_info *mtd) 415 { 416 struct nand_chip *chip = mtd_to_nand(mtd); 417 u8 status; 418 int ret; 419 420 /* Broken xD cards report WP despite being writable */ 421 if (chip->options & NAND_BROKEN_XD) 422 return 0; 423 424 /* Check the WP bit */ 425 ret = nand_status_op(chip, &status); 426 if (ret) 427 return ret; 428 429 return status & NAND_STATUS_WP ? 0 : 1; 430 } 431 432 /** 433 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 434 * @mtd: MTD device structure 435 * @ofs: offset from device start 436 * 437 * Check if the block is marked as reserved. 438 */ 439 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 440 { 441 struct nand_chip *chip = mtd_to_nand(mtd); 442 443 if (!chip->bbt) 444 return 0; 445 /* Return info from the table */ 446 return nand_isreserved_bbt(chip, ofs); 447 } 448 449 /** 450 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 451 * @mtd: MTD device structure 452 * @ofs: offset from device start 453 * @allowbbt: 1, if its allowed to access the bbt area 454 * 455 * Check, if the block is bad. Either by reading the bad block table or 456 * calling of the scan function. 457 */ 458 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt) 459 { 460 struct nand_chip *chip = mtd_to_nand(mtd); 461 462 /* Return info from the table */ 463 if (chip->bbt) 464 return nand_isbad_bbt(chip, ofs, allowbbt); 465 466 return nand_isbad_bbm(chip, ofs); 467 } 468 469 /** 470 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 471 * @chip: NAND chip structure 472 * @timeout_ms: Timeout in ms 473 * 474 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 475 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 476 * returned. 477 * 478 * This helper is intended to be used when the controller does not have access 479 * to the NAND R/B pin. 480 * 481 * Be aware that calling this helper from an ->exec_op() implementation means 482 * ->exec_op() must be re-entrant. 483 * 484 * Return 0 if the NAND chip is ready, a negative error otherwise. 485 */ 486 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 487 { 488 const struct nand_sdr_timings *timings; 489 u8 status = 0; 490 int ret; 491 492 if (!chip->exec_op) 493 return -ENOTSUPP; 494 495 /* Wait tWB before polling the STATUS reg. */ 496 timings = nand_get_sdr_timings(&chip->data_interface); 497 ndelay(PSEC_TO_NSEC(timings->tWB_max)); 498 499 ret = nand_status_op(chip, NULL); 500 if (ret) 501 return ret; 502 503 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); 504 do { 505 ret = nand_read_data_op(chip, &status, sizeof(status), true); 506 if (ret) 507 break; 508 509 if (status & NAND_STATUS_READY) 510 break; 511 512 /* 513 * Typical lowest execution time for a tR on most NANDs is 10us, 514 * use this as polling delay before doing something smarter (ie. 515 * deriving a delay from the timeout value, timeout_ms/ratio). 516 */ 517 udelay(10); 518 } while (time_before(jiffies, timeout_ms)); 519 520 /* 521 * We have to exit READ_STATUS mode in order to read real data on the 522 * bus in case the WAITRDY instruction is preceding a DATA_IN 523 * instruction. 524 */ 525 nand_exit_status_op(chip); 526 527 if (ret) 528 return ret; 529 530 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 531 }; 532 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 533 534 /** 535 * panic_nand_get_device - [GENERIC] Get chip for selected access 536 * @chip: the nand chip descriptor 537 * @mtd: MTD device structure 538 * @new_state: the state which is requested 539 * 540 * Used when in panic, no locks are taken. 541 */ 542 static void panic_nand_get_device(struct nand_chip *chip, 543 struct mtd_info *mtd, int new_state) 544 { 545 /* Hardware controller shared among independent devices */ 546 chip->controller->active = chip; 547 chip->state = new_state; 548 } 549 550 /** 551 * nand_get_device - [GENERIC] Get chip for selected access 552 * @mtd: MTD device structure 553 * @new_state: the state which is requested 554 * 555 * Get the device and lock it for exclusive access 556 */ 557 static int 558 nand_get_device(struct mtd_info *mtd, int new_state) 559 { 560 struct nand_chip *chip = mtd_to_nand(mtd); 561 spinlock_t *lock = &chip->controller->lock; 562 wait_queue_head_t *wq = &chip->controller->wq; 563 DECLARE_WAITQUEUE(wait, current); 564 retry: 565 spin_lock(lock); 566 567 /* Hardware controller shared among independent devices */ 568 if (!chip->controller->active) 569 chip->controller->active = chip; 570 571 if (chip->controller->active == chip && chip->state == FL_READY) { 572 chip->state = new_state; 573 spin_unlock(lock); 574 return 0; 575 } 576 if (new_state == FL_PM_SUSPENDED) { 577 if (chip->controller->active->state == FL_PM_SUSPENDED) { 578 chip->state = FL_PM_SUSPENDED; 579 spin_unlock(lock); 580 return 0; 581 } 582 } 583 set_current_state(TASK_UNINTERRUPTIBLE); 584 add_wait_queue(wq, &wait); 585 spin_unlock(lock); 586 schedule(); 587 remove_wait_queue(wq, &wait); 588 goto retry; 589 } 590 591 /** 592 * panic_nand_wait - [GENERIC] wait until the command is done 593 * @chip: NAND chip structure 594 * @timeo: timeout 595 * 596 * Wait for command done. This is a helper function for nand_wait used when 597 * we are in interrupt context. May happen when in panic and trying to write 598 * an oops through mtdoops. 599 */ 600 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 601 { 602 int i; 603 for (i = 0; i < timeo; i++) { 604 if (chip->legacy.dev_ready) { 605 if (chip->legacy.dev_ready(chip)) 606 break; 607 } else { 608 int ret; 609 u8 status; 610 611 ret = nand_read_data_op(chip, &status, sizeof(status), 612 true); 613 if (ret) 614 return; 615 616 if (status & NAND_STATUS_READY) 617 break; 618 } 619 mdelay(1); 620 } 621 } 622 623 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 624 { 625 return (chip->parameters.supports_set_get_features && 626 test_bit(addr, chip->parameters.get_feature_list)); 627 } 628 629 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 630 { 631 return (chip->parameters.supports_set_get_features && 632 test_bit(addr, chip->parameters.set_feature_list)); 633 } 634 635 /** 636 * nand_reset_data_interface - Reset data interface and timings 637 * @chip: The NAND chip 638 * @chipnr: Internal die id 639 * 640 * Reset the Data interface and timings to ONFI mode 0. 641 * 642 * Returns 0 for success or negative error code otherwise. 643 */ 644 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) 645 { 646 int ret; 647 648 if (!chip->setup_data_interface) 649 return 0; 650 651 /* 652 * The ONFI specification says: 653 * " 654 * To transition from NV-DDR or NV-DDR2 to the SDR data 655 * interface, the host shall use the Reset (FFh) command 656 * using SDR timing mode 0. A device in any timing mode is 657 * required to recognize Reset (FFh) command issued in SDR 658 * timing mode 0. 659 * " 660 * 661 * Configure the data interface in SDR mode and set the 662 * timings to timing mode 0. 663 */ 664 665 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); 666 ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface); 667 if (ret) 668 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 669 670 return ret; 671 } 672 673 /** 674 * nand_setup_data_interface - Setup the best data interface and timings 675 * @chip: The NAND chip 676 * @chipnr: Internal die id 677 * 678 * Find and configure the best data interface and NAND timings supported by 679 * the chip and the driver. 680 * First tries to retrieve supported timing modes from ONFI information, 681 * and if the NAND chip does not support ONFI, relies on the 682 * ->onfi_timing_mode_default specified in the nand_ids table. 683 * 684 * Returns 0 for success or negative error code otherwise. 685 */ 686 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) 687 { 688 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 689 chip->onfi_timing_mode_default, 690 }; 691 int ret; 692 693 if (!chip->setup_data_interface) 694 return 0; 695 696 /* Change the mode on the chip side (if supported by the NAND chip) */ 697 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 698 chip->select_chip(chip, chipnr); 699 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 700 tmode_param); 701 chip->select_chip(chip, -1); 702 if (ret) 703 return ret; 704 } 705 706 /* Change the mode on the controller side */ 707 ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface); 708 if (ret) 709 return ret; 710 711 /* Check the mode has been accepted by the chip, if supported */ 712 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 713 return 0; 714 715 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 716 chip->select_chip(chip, chipnr); 717 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 718 tmode_param); 719 chip->select_chip(chip, -1); 720 if (ret) 721 goto err_reset_chip; 722 723 if (tmode_param[0] != chip->onfi_timing_mode_default) { 724 pr_warn("timing mode %d not acknowledged by the NAND chip\n", 725 chip->onfi_timing_mode_default); 726 goto err_reset_chip; 727 } 728 729 return 0; 730 731 err_reset_chip: 732 /* 733 * Fallback to mode 0 if the chip explicitly did not ack the chosen 734 * timing mode. 735 */ 736 nand_reset_data_interface(chip, chipnr); 737 chip->select_chip(chip, chipnr); 738 nand_reset_op(chip); 739 chip->select_chip(chip, -1); 740 741 return ret; 742 } 743 744 /** 745 * nand_init_data_interface - find the best data interface and timings 746 * @chip: The NAND chip 747 * 748 * Find the best data interface and NAND timings supported by the chip 749 * and the driver. 750 * First tries to retrieve supported timing modes from ONFI information, 751 * and if the NAND chip does not support ONFI, relies on the 752 * ->onfi_timing_mode_default specified in the nand_ids table. After this 753 * function nand_chip->data_interface is initialized with the best timing mode 754 * available. 755 * 756 * Returns 0 for success or negative error code otherwise. 757 */ 758 static int nand_init_data_interface(struct nand_chip *chip) 759 { 760 int modes, mode, ret; 761 762 if (!chip->setup_data_interface) 763 return 0; 764 765 /* 766 * First try to identify the best timings from ONFI parameters and 767 * if the NAND does not support ONFI, fallback to the default ONFI 768 * timing mode. 769 */ 770 if (chip->parameters.onfi) { 771 modes = chip->parameters.onfi->async_timing_mode; 772 } else { 773 if (!chip->onfi_timing_mode_default) 774 return 0; 775 776 modes = GENMASK(chip->onfi_timing_mode_default, 0); 777 } 778 779 for (mode = fls(modes) - 1; mode >= 0; mode--) { 780 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode); 781 if (ret) 782 continue; 783 784 /* 785 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the 786 * controller supports the requested timings. 787 */ 788 ret = chip->setup_data_interface(chip, 789 NAND_DATA_IFACE_CHECK_ONLY, 790 &chip->data_interface); 791 if (!ret) { 792 chip->onfi_timing_mode_default = mode; 793 break; 794 } 795 } 796 797 return 0; 798 } 799 800 /** 801 * nand_fill_column_cycles - fill the column cycles of an address 802 * @chip: The NAND chip 803 * @addrs: Array of address cycles to fill 804 * @offset_in_page: The offset in the page 805 * 806 * Fills the first or the first two bytes of the @addrs field depending 807 * on the NAND bus width and the page size. 808 * 809 * Returns the number of cycles needed to encode the column, or a negative 810 * error code in case one of the arguments is invalid. 811 */ 812 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 813 unsigned int offset_in_page) 814 { 815 struct mtd_info *mtd = nand_to_mtd(chip); 816 817 /* Make sure the offset is less than the actual page size. */ 818 if (offset_in_page > mtd->writesize + mtd->oobsize) 819 return -EINVAL; 820 821 /* 822 * On small page NANDs, there's a dedicated command to access the OOB 823 * area, and the column address is relative to the start of the OOB 824 * area, not the start of the page. Asjust the address accordingly. 825 */ 826 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 827 offset_in_page -= mtd->writesize; 828 829 /* 830 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 831 * wide, then it must be divided by 2. 832 */ 833 if (chip->options & NAND_BUSWIDTH_16) { 834 if (WARN_ON(offset_in_page % 2)) 835 return -EINVAL; 836 837 offset_in_page /= 2; 838 } 839 840 addrs[0] = offset_in_page; 841 842 /* 843 * Small page NANDs use 1 cycle for the columns, while large page NANDs 844 * need 2 845 */ 846 if (mtd->writesize <= 512) 847 return 1; 848 849 addrs[1] = offset_in_page >> 8; 850 851 return 2; 852 } 853 854 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 855 unsigned int offset_in_page, void *buf, 856 unsigned int len) 857 { 858 struct mtd_info *mtd = nand_to_mtd(chip); 859 const struct nand_sdr_timings *sdr = 860 nand_get_sdr_timings(&chip->data_interface); 861 u8 addrs[4]; 862 struct nand_op_instr instrs[] = { 863 NAND_OP_CMD(NAND_CMD_READ0, 0), 864 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)), 865 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 866 PSEC_TO_NSEC(sdr->tRR_min)), 867 NAND_OP_DATA_IN(len, buf, 0), 868 }; 869 struct nand_operation op = NAND_OPERATION(instrs); 870 int ret; 871 872 /* Drop the DATA_IN instruction if len is set to 0. */ 873 if (!len) 874 op.ninstrs--; 875 876 if (offset_in_page >= mtd->writesize) 877 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 878 else if (offset_in_page >= 256 && 879 !(chip->options & NAND_BUSWIDTH_16)) 880 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 881 882 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 883 if (ret < 0) 884 return ret; 885 886 addrs[1] = page; 887 addrs[2] = page >> 8; 888 889 if (chip->options & NAND_ROW_ADDR_3) { 890 addrs[3] = page >> 16; 891 instrs[1].ctx.addr.naddrs++; 892 } 893 894 return nand_exec_op(chip, &op); 895 } 896 897 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 898 unsigned int offset_in_page, void *buf, 899 unsigned int len) 900 { 901 const struct nand_sdr_timings *sdr = 902 nand_get_sdr_timings(&chip->data_interface); 903 u8 addrs[5]; 904 struct nand_op_instr instrs[] = { 905 NAND_OP_CMD(NAND_CMD_READ0, 0), 906 NAND_OP_ADDR(4, addrs, 0), 907 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)), 908 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 909 PSEC_TO_NSEC(sdr->tRR_min)), 910 NAND_OP_DATA_IN(len, buf, 0), 911 }; 912 struct nand_operation op = NAND_OPERATION(instrs); 913 int ret; 914 915 /* Drop the DATA_IN instruction if len is set to 0. */ 916 if (!len) 917 op.ninstrs--; 918 919 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 920 if (ret < 0) 921 return ret; 922 923 addrs[2] = page; 924 addrs[3] = page >> 8; 925 926 if (chip->options & NAND_ROW_ADDR_3) { 927 addrs[4] = page >> 16; 928 instrs[1].ctx.addr.naddrs++; 929 } 930 931 return nand_exec_op(chip, &op); 932 } 933 934 /** 935 * nand_read_page_op - Do a READ PAGE operation 936 * @chip: The NAND chip 937 * @page: page to read 938 * @offset_in_page: offset within the page 939 * @buf: buffer used to store the data 940 * @len: length of the buffer 941 * 942 * This function issues a READ PAGE operation. 943 * This function does not select/unselect the CS line. 944 * 945 * Returns 0 on success, a negative error code otherwise. 946 */ 947 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 948 unsigned int offset_in_page, void *buf, unsigned int len) 949 { 950 struct mtd_info *mtd = nand_to_mtd(chip); 951 952 if (len && !buf) 953 return -EINVAL; 954 955 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 956 return -EINVAL; 957 958 if (chip->exec_op) { 959 if (mtd->writesize > 512) 960 return nand_lp_exec_read_page_op(chip, page, 961 offset_in_page, buf, 962 len); 963 964 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 965 buf, len); 966 } 967 968 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 969 if (len) 970 chip->legacy.read_buf(chip, buf, len); 971 972 return 0; 973 } 974 EXPORT_SYMBOL_GPL(nand_read_page_op); 975 976 /** 977 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 978 * @chip: The NAND chip 979 * @page: parameter page to read 980 * @buf: buffer used to store the data 981 * @len: length of the buffer 982 * 983 * This function issues a READ PARAMETER PAGE operation. 984 * This function does not select/unselect the CS line. 985 * 986 * Returns 0 on success, a negative error code otherwise. 987 */ 988 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 989 unsigned int len) 990 { 991 unsigned int i; 992 u8 *p = buf; 993 994 if (len && !buf) 995 return -EINVAL; 996 997 if (chip->exec_op) { 998 const struct nand_sdr_timings *sdr = 999 nand_get_sdr_timings(&chip->data_interface); 1000 struct nand_op_instr instrs[] = { 1001 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1002 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), 1003 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 1004 PSEC_TO_NSEC(sdr->tRR_min)), 1005 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1006 }; 1007 struct nand_operation op = NAND_OPERATION(instrs); 1008 1009 /* Drop the DATA_IN instruction if len is set to 0. */ 1010 if (!len) 1011 op.ninstrs--; 1012 1013 return nand_exec_op(chip, &op); 1014 } 1015 1016 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1017 for (i = 0; i < len; i++) 1018 p[i] = chip->legacy.read_byte(chip); 1019 1020 return 0; 1021 } 1022 1023 /** 1024 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1025 * @chip: The NAND chip 1026 * @offset_in_page: offset within the page 1027 * @buf: buffer used to store the data 1028 * @len: length of the buffer 1029 * @force_8bit: force 8-bit bus access 1030 * 1031 * This function issues a CHANGE READ COLUMN operation. 1032 * This function does not select/unselect the CS line. 1033 * 1034 * Returns 0 on success, a negative error code otherwise. 1035 */ 1036 int nand_change_read_column_op(struct nand_chip *chip, 1037 unsigned int offset_in_page, void *buf, 1038 unsigned int len, bool force_8bit) 1039 { 1040 struct mtd_info *mtd = nand_to_mtd(chip); 1041 1042 if (len && !buf) 1043 return -EINVAL; 1044 1045 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1046 return -EINVAL; 1047 1048 /* Small page NANDs do not support column change. */ 1049 if (mtd->writesize <= 512) 1050 return -ENOTSUPP; 1051 1052 if (chip->exec_op) { 1053 const struct nand_sdr_timings *sdr = 1054 nand_get_sdr_timings(&chip->data_interface); 1055 u8 addrs[2] = {}; 1056 struct nand_op_instr instrs[] = { 1057 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1058 NAND_OP_ADDR(2, addrs, 0), 1059 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1060 PSEC_TO_NSEC(sdr->tCCS_min)), 1061 NAND_OP_DATA_IN(len, buf, 0), 1062 }; 1063 struct nand_operation op = NAND_OPERATION(instrs); 1064 int ret; 1065 1066 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1067 if (ret < 0) 1068 return ret; 1069 1070 /* Drop the DATA_IN instruction if len is set to 0. */ 1071 if (!len) 1072 op.ninstrs--; 1073 1074 instrs[3].ctx.data.force_8bit = force_8bit; 1075 1076 return nand_exec_op(chip, &op); 1077 } 1078 1079 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1080 if (len) 1081 chip->legacy.read_buf(chip, buf, len); 1082 1083 return 0; 1084 } 1085 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1086 1087 /** 1088 * nand_read_oob_op - Do a READ OOB operation 1089 * @chip: The NAND chip 1090 * @page: page to read 1091 * @offset_in_oob: offset within the OOB area 1092 * @buf: buffer used to store the data 1093 * @len: length of the buffer 1094 * 1095 * This function issues a READ OOB operation. 1096 * This function does not select/unselect the CS line. 1097 * 1098 * Returns 0 on success, a negative error code otherwise. 1099 */ 1100 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1101 unsigned int offset_in_oob, void *buf, unsigned int len) 1102 { 1103 struct mtd_info *mtd = nand_to_mtd(chip); 1104 1105 if (len && !buf) 1106 return -EINVAL; 1107 1108 if (offset_in_oob + len > mtd->oobsize) 1109 return -EINVAL; 1110 1111 if (chip->exec_op) 1112 return nand_read_page_op(chip, page, 1113 mtd->writesize + offset_in_oob, 1114 buf, len); 1115 1116 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1117 if (len) 1118 chip->legacy.read_buf(chip, buf, len); 1119 1120 return 0; 1121 } 1122 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1123 1124 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1125 unsigned int offset_in_page, const void *buf, 1126 unsigned int len, bool prog) 1127 { 1128 struct mtd_info *mtd = nand_to_mtd(chip); 1129 const struct nand_sdr_timings *sdr = 1130 nand_get_sdr_timings(&chip->data_interface); 1131 u8 addrs[5] = {}; 1132 struct nand_op_instr instrs[] = { 1133 /* 1134 * The first instruction will be dropped if we're dealing 1135 * with a large page NAND and adjusted if we're dealing 1136 * with a small page NAND and the page offset is > 255. 1137 */ 1138 NAND_OP_CMD(NAND_CMD_READ0, 0), 1139 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1140 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)), 1141 NAND_OP_DATA_OUT(len, buf, 0), 1142 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), 1143 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), 1144 }; 1145 struct nand_operation op = NAND_OPERATION(instrs); 1146 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1147 int ret; 1148 u8 status; 1149 1150 if (naddrs < 0) 1151 return naddrs; 1152 1153 addrs[naddrs++] = page; 1154 addrs[naddrs++] = page >> 8; 1155 if (chip->options & NAND_ROW_ADDR_3) 1156 addrs[naddrs++] = page >> 16; 1157 1158 instrs[2].ctx.addr.naddrs = naddrs; 1159 1160 /* Drop the last two instructions if we're not programming the page. */ 1161 if (!prog) { 1162 op.ninstrs -= 2; 1163 /* Also drop the DATA_OUT instruction if empty. */ 1164 if (!len) 1165 op.ninstrs--; 1166 } 1167 1168 if (mtd->writesize <= 512) { 1169 /* 1170 * Small pages need some more tweaking: we have to adjust the 1171 * first instruction depending on the page offset we're trying 1172 * to access. 1173 */ 1174 if (offset_in_page >= mtd->writesize) 1175 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1176 else if (offset_in_page >= 256 && 1177 !(chip->options & NAND_BUSWIDTH_16)) 1178 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1179 } else { 1180 /* 1181 * Drop the first command if we're dealing with a large page 1182 * NAND. 1183 */ 1184 op.instrs++; 1185 op.ninstrs--; 1186 } 1187 1188 ret = nand_exec_op(chip, &op); 1189 if (!prog || ret) 1190 return ret; 1191 1192 ret = nand_status_op(chip, &status); 1193 if (ret) 1194 return ret; 1195 1196 return status; 1197 } 1198 1199 /** 1200 * nand_prog_page_begin_op - starts a PROG PAGE operation 1201 * @chip: The NAND chip 1202 * @page: page to write 1203 * @offset_in_page: offset within the page 1204 * @buf: buffer containing the data to write to the page 1205 * @len: length of the buffer 1206 * 1207 * This function issues the first half of a PROG PAGE operation. 1208 * This function does not select/unselect the CS line. 1209 * 1210 * Returns 0 on success, a negative error code otherwise. 1211 */ 1212 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1213 unsigned int offset_in_page, const void *buf, 1214 unsigned int len) 1215 { 1216 struct mtd_info *mtd = nand_to_mtd(chip); 1217 1218 if (len && !buf) 1219 return -EINVAL; 1220 1221 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1222 return -EINVAL; 1223 1224 if (chip->exec_op) 1225 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1226 len, false); 1227 1228 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1229 1230 if (buf) 1231 chip->legacy.write_buf(chip, buf, len); 1232 1233 return 0; 1234 } 1235 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1236 1237 /** 1238 * nand_prog_page_end_op - ends a PROG PAGE operation 1239 * @chip: The NAND chip 1240 * 1241 * This function issues the second half of a PROG PAGE operation. 1242 * This function does not select/unselect the CS line. 1243 * 1244 * Returns 0 on success, a negative error code otherwise. 1245 */ 1246 int nand_prog_page_end_op(struct nand_chip *chip) 1247 { 1248 int ret; 1249 u8 status; 1250 1251 if (chip->exec_op) { 1252 const struct nand_sdr_timings *sdr = 1253 nand_get_sdr_timings(&chip->data_interface); 1254 struct nand_op_instr instrs[] = { 1255 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1256 PSEC_TO_NSEC(sdr->tWB_max)), 1257 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), 1258 }; 1259 struct nand_operation op = NAND_OPERATION(instrs); 1260 1261 ret = nand_exec_op(chip, &op); 1262 if (ret) 1263 return ret; 1264 1265 ret = nand_status_op(chip, &status); 1266 if (ret) 1267 return ret; 1268 } else { 1269 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1270 ret = chip->legacy.waitfunc(chip); 1271 if (ret < 0) 1272 return ret; 1273 1274 status = ret; 1275 } 1276 1277 if (status & NAND_STATUS_FAIL) 1278 return -EIO; 1279 1280 return 0; 1281 } 1282 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1283 1284 /** 1285 * nand_prog_page_op - Do a full PROG PAGE operation 1286 * @chip: The NAND chip 1287 * @page: page to write 1288 * @offset_in_page: offset within the page 1289 * @buf: buffer containing the data to write to the page 1290 * @len: length of the buffer 1291 * 1292 * This function issues a full PROG PAGE operation. 1293 * This function does not select/unselect the CS line. 1294 * 1295 * Returns 0 on success, a negative error code otherwise. 1296 */ 1297 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1298 unsigned int offset_in_page, const void *buf, 1299 unsigned int len) 1300 { 1301 struct mtd_info *mtd = nand_to_mtd(chip); 1302 int status; 1303 1304 if (!len || !buf) 1305 return -EINVAL; 1306 1307 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1308 return -EINVAL; 1309 1310 if (chip->exec_op) { 1311 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1312 len, true); 1313 } else { 1314 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1315 page); 1316 chip->legacy.write_buf(chip, buf, len); 1317 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1318 status = chip->legacy.waitfunc(chip); 1319 } 1320 1321 if (status & NAND_STATUS_FAIL) 1322 return -EIO; 1323 1324 return 0; 1325 } 1326 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1327 1328 /** 1329 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1330 * @chip: The NAND chip 1331 * @offset_in_page: offset within the page 1332 * @buf: buffer containing the data to send to the NAND 1333 * @len: length of the buffer 1334 * @force_8bit: force 8-bit bus access 1335 * 1336 * This function issues a CHANGE WRITE COLUMN operation. 1337 * This function does not select/unselect the CS line. 1338 * 1339 * Returns 0 on success, a negative error code otherwise. 1340 */ 1341 int nand_change_write_column_op(struct nand_chip *chip, 1342 unsigned int offset_in_page, 1343 const void *buf, unsigned int len, 1344 bool force_8bit) 1345 { 1346 struct mtd_info *mtd = nand_to_mtd(chip); 1347 1348 if (len && !buf) 1349 return -EINVAL; 1350 1351 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1352 return -EINVAL; 1353 1354 /* Small page NANDs do not support column change. */ 1355 if (mtd->writesize <= 512) 1356 return -ENOTSUPP; 1357 1358 if (chip->exec_op) { 1359 const struct nand_sdr_timings *sdr = 1360 nand_get_sdr_timings(&chip->data_interface); 1361 u8 addrs[2]; 1362 struct nand_op_instr instrs[] = { 1363 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1364 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)), 1365 NAND_OP_DATA_OUT(len, buf, 0), 1366 }; 1367 struct nand_operation op = NAND_OPERATION(instrs); 1368 int ret; 1369 1370 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1371 if (ret < 0) 1372 return ret; 1373 1374 instrs[2].ctx.data.force_8bit = force_8bit; 1375 1376 /* Drop the DATA_OUT instruction if len is set to 0. */ 1377 if (!len) 1378 op.ninstrs--; 1379 1380 return nand_exec_op(chip, &op); 1381 } 1382 1383 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1384 if (len) 1385 chip->legacy.write_buf(chip, buf, len); 1386 1387 return 0; 1388 } 1389 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1390 1391 /** 1392 * nand_readid_op - Do a READID operation 1393 * @chip: The NAND chip 1394 * @addr: address cycle to pass after the READID command 1395 * @buf: buffer used to store the ID 1396 * @len: length of the buffer 1397 * 1398 * This function sends a READID command and reads back the ID returned by the 1399 * NAND. 1400 * This function does not select/unselect the CS line. 1401 * 1402 * Returns 0 on success, a negative error code otherwise. 1403 */ 1404 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1405 unsigned int len) 1406 { 1407 unsigned int i; 1408 u8 *id = buf; 1409 1410 if (len && !buf) 1411 return -EINVAL; 1412 1413 if (chip->exec_op) { 1414 const struct nand_sdr_timings *sdr = 1415 nand_get_sdr_timings(&chip->data_interface); 1416 struct nand_op_instr instrs[] = { 1417 NAND_OP_CMD(NAND_CMD_READID, 0), 1418 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), 1419 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1420 }; 1421 struct nand_operation op = NAND_OPERATION(instrs); 1422 1423 /* Drop the DATA_IN instruction if len is set to 0. */ 1424 if (!len) 1425 op.ninstrs--; 1426 1427 return nand_exec_op(chip, &op); 1428 } 1429 1430 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1431 1432 for (i = 0; i < len; i++) 1433 id[i] = chip->legacy.read_byte(chip); 1434 1435 return 0; 1436 } 1437 EXPORT_SYMBOL_GPL(nand_readid_op); 1438 1439 /** 1440 * nand_status_op - Do a STATUS operation 1441 * @chip: The NAND chip 1442 * @status: out variable to store the NAND status 1443 * 1444 * This function sends a STATUS command and reads back the status returned by 1445 * the NAND. 1446 * This function does not select/unselect the CS line. 1447 * 1448 * Returns 0 on success, a negative error code otherwise. 1449 */ 1450 int nand_status_op(struct nand_chip *chip, u8 *status) 1451 { 1452 if (chip->exec_op) { 1453 const struct nand_sdr_timings *sdr = 1454 nand_get_sdr_timings(&chip->data_interface); 1455 struct nand_op_instr instrs[] = { 1456 NAND_OP_CMD(NAND_CMD_STATUS, 1457 PSEC_TO_NSEC(sdr->tADL_min)), 1458 NAND_OP_8BIT_DATA_IN(1, status, 0), 1459 }; 1460 struct nand_operation op = NAND_OPERATION(instrs); 1461 1462 if (!status) 1463 op.ninstrs--; 1464 1465 return nand_exec_op(chip, &op); 1466 } 1467 1468 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1469 if (status) 1470 *status = chip->legacy.read_byte(chip); 1471 1472 return 0; 1473 } 1474 EXPORT_SYMBOL_GPL(nand_status_op); 1475 1476 /** 1477 * nand_exit_status_op - Exit a STATUS operation 1478 * @chip: The NAND chip 1479 * 1480 * This function sends a READ0 command to cancel the effect of the STATUS 1481 * command to avoid reading only the status until a new read command is sent. 1482 * 1483 * This function does not select/unselect the CS line. 1484 * 1485 * Returns 0 on success, a negative error code otherwise. 1486 */ 1487 int nand_exit_status_op(struct nand_chip *chip) 1488 { 1489 if (chip->exec_op) { 1490 struct nand_op_instr instrs[] = { 1491 NAND_OP_CMD(NAND_CMD_READ0, 0), 1492 }; 1493 struct nand_operation op = NAND_OPERATION(instrs); 1494 1495 return nand_exec_op(chip, &op); 1496 } 1497 1498 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1499 1500 return 0; 1501 } 1502 1503 /** 1504 * nand_erase_op - Do an erase operation 1505 * @chip: The NAND chip 1506 * @eraseblock: block to erase 1507 * 1508 * This function sends an ERASE command and waits for the NAND to be ready 1509 * before returning. 1510 * This function does not select/unselect the CS line. 1511 * 1512 * Returns 0 on success, a negative error code otherwise. 1513 */ 1514 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1515 { 1516 unsigned int page = eraseblock << 1517 (chip->phys_erase_shift - chip->page_shift); 1518 int ret; 1519 u8 status; 1520 1521 if (chip->exec_op) { 1522 const struct nand_sdr_timings *sdr = 1523 nand_get_sdr_timings(&chip->data_interface); 1524 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1525 struct nand_op_instr instrs[] = { 1526 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1527 NAND_OP_ADDR(2, addrs, 0), 1528 NAND_OP_CMD(NAND_CMD_ERASE2, 1529 PSEC_TO_MSEC(sdr->tWB_max)), 1530 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0), 1531 }; 1532 struct nand_operation op = NAND_OPERATION(instrs); 1533 1534 if (chip->options & NAND_ROW_ADDR_3) 1535 instrs[1].ctx.addr.naddrs++; 1536 1537 ret = nand_exec_op(chip, &op); 1538 if (ret) 1539 return ret; 1540 1541 ret = nand_status_op(chip, &status); 1542 if (ret) 1543 return ret; 1544 } else { 1545 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1546 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1547 1548 ret = chip->legacy.waitfunc(chip); 1549 if (ret < 0) 1550 return ret; 1551 1552 status = ret; 1553 } 1554 1555 if (status & NAND_STATUS_FAIL) 1556 return -EIO; 1557 1558 return 0; 1559 } 1560 EXPORT_SYMBOL_GPL(nand_erase_op); 1561 1562 /** 1563 * nand_set_features_op - Do a SET FEATURES operation 1564 * @chip: The NAND chip 1565 * @feature: feature id 1566 * @data: 4 bytes of data 1567 * 1568 * This function sends a SET FEATURES command and waits for the NAND to be 1569 * ready before returning. 1570 * This function does not select/unselect the CS line. 1571 * 1572 * Returns 0 on success, a negative error code otherwise. 1573 */ 1574 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 1575 const void *data) 1576 { 1577 const u8 *params = data; 1578 int i, ret; 1579 1580 if (chip->exec_op) { 1581 const struct nand_sdr_timings *sdr = 1582 nand_get_sdr_timings(&chip->data_interface); 1583 struct nand_op_instr instrs[] = { 1584 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 1585 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), 1586 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 1587 PSEC_TO_NSEC(sdr->tWB_max)), 1588 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0), 1589 }; 1590 struct nand_operation op = NAND_OPERATION(instrs); 1591 1592 return nand_exec_op(chip, &op); 1593 } 1594 1595 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 1596 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1597 chip->legacy.write_byte(chip, params[i]); 1598 1599 ret = chip->legacy.waitfunc(chip); 1600 if (ret < 0) 1601 return ret; 1602 1603 if (ret & NAND_STATUS_FAIL) 1604 return -EIO; 1605 1606 return 0; 1607 } 1608 1609 /** 1610 * nand_get_features_op - Do a GET FEATURES operation 1611 * @chip: The NAND chip 1612 * @feature: feature id 1613 * @data: 4 bytes of data 1614 * 1615 * This function sends a GET FEATURES command and waits for the NAND to be 1616 * ready before returning. 1617 * This function does not select/unselect the CS line. 1618 * 1619 * Returns 0 on success, a negative error code otherwise. 1620 */ 1621 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 1622 void *data) 1623 { 1624 u8 *params = data; 1625 int i; 1626 1627 if (chip->exec_op) { 1628 const struct nand_sdr_timings *sdr = 1629 nand_get_sdr_timings(&chip->data_interface); 1630 struct nand_op_instr instrs[] = { 1631 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 1632 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), 1633 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 1634 PSEC_TO_NSEC(sdr->tRR_min)), 1635 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 1636 data, 0), 1637 }; 1638 struct nand_operation op = NAND_OPERATION(instrs); 1639 1640 return nand_exec_op(chip, &op); 1641 } 1642 1643 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 1644 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1645 params[i] = chip->legacy.read_byte(chip); 1646 1647 return 0; 1648 } 1649 1650 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 1651 unsigned int delay_ns) 1652 { 1653 if (chip->exec_op) { 1654 struct nand_op_instr instrs[] = { 1655 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 1656 PSEC_TO_NSEC(delay_ns)), 1657 }; 1658 struct nand_operation op = NAND_OPERATION(instrs); 1659 1660 return nand_exec_op(chip, &op); 1661 } 1662 1663 /* Apply delay or wait for ready/busy pin */ 1664 if (!chip->legacy.dev_ready) 1665 udelay(chip->legacy.chip_delay); 1666 else 1667 nand_wait_ready(chip); 1668 1669 return 0; 1670 } 1671 1672 /** 1673 * nand_reset_op - Do a reset operation 1674 * @chip: The NAND chip 1675 * 1676 * This function sends a RESET command and waits for the NAND to be ready 1677 * before returning. 1678 * This function does not select/unselect the CS line. 1679 * 1680 * Returns 0 on success, a negative error code otherwise. 1681 */ 1682 int nand_reset_op(struct nand_chip *chip) 1683 { 1684 if (chip->exec_op) { 1685 const struct nand_sdr_timings *sdr = 1686 nand_get_sdr_timings(&chip->data_interface); 1687 struct nand_op_instr instrs[] = { 1688 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), 1689 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), 1690 }; 1691 struct nand_operation op = NAND_OPERATION(instrs); 1692 1693 return nand_exec_op(chip, &op); 1694 } 1695 1696 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 1697 1698 return 0; 1699 } 1700 EXPORT_SYMBOL_GPL(nand_reset_op); 1701 1702 /** 1703 * nand_read_data_op - Read data from the NAND 1704 * @chip: The NAND chip 1705 * @buf: buffer used to store the data 1706 * @len: length of the buffer 1707 * @force_8bit: force 8-bit bus access 1708 * 1709 * This function does a raw data read on the bus. Usually used after launching 1710 * another NAND operation like nand_read_page_op(). 1711 * This function does not select/unselect the CS line. 1712 * 1713 * Returns 0 on success, a negative error code otherwise. 1714 */ 1715 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 1716 bool force_8bit) 1717 { 1718 if (!len || !buf) 1719 return -EINVAL; 1720 1721 if (chip->exec_op) { 1722 struct nand_op_instr instrs[] = { 1723 NAND_OP_DATA_IN(len, buf, 0), 1724 }; 1725 struct nand_operation op = NAND_OPERATION(instrs); 1726 1727 instrs[0].ctx.data.force_8bit = force_8bit; 1728 1729 return nand_exec_op(chip, &op); 1730 } 1731 1732 if (force_8bit) { 1733 u8 *p = buf; 1734 unsigned int i; 1735 1736 for (i = 0; i < len; i++) 1737 p[i] = chip->legacy.read_byte(chip); 1738 } else { 1739 chip->legacy.read_buf(chip, buf, len); 1740 } 1741 1742 return 0; 1743 } 1744 EXPORT_SYMBOL_GPL(nand_read_data_op); 1745 1746 /** 1747 * nand_write_data_op - Write data from the NAND 1748 * @chip: The NAND chip 1749 * @buf: buffer containing the data to send on the bus 1750 * @len: length of the buffer 1751 * @force_8bit: force 8-bit bus access 1752 * 1753 * This function does a raw data write on the bus. Usually used after launching 1754 * another NAND operation like nand_write_page_begin_op(). 1755 * This function does not select/unselect the CS line. 1756 * 1757 * Returns 0 on success, a negative error code otherwise. 1758 */ 1759 int nand_write_data_op(struct nand_chip *chip, const void *buf, 1760 unsigned int len, bool force_8bit) 1761 { 1762 if (!len || !buf) 1763 return -EINVAL; 1764 1765 if (chip->exec_op) { 1766 struct nand_op_instr instrs[] = { 1767 NAND_OP_DATA_OUT(len, buf, 0), 1768 }; 1769 struct nand_operation op = NAND_OPERATION(instrs); 1770 1771 instrs[0].ctx.data.force_8bit = force_8bit; 1772 1773 return nand_exec_op(chip, &op); 1774 } 1775 1776 if (force_8bit) { 1777 const u8 *p = buf; 1778 unsigned int i; 1779 1780 for (i = 0; i < len; i++) 1781 chip->legacy.write_byte(chip, p[i]); 1782 } else { 1783 chip->legacy.write_buf(chip, buf, len); 1784 } 1785 1786 return 0; 1787 } 1788 EXPORT_SYMBOL_GPL(nand_write_data_op); 1789 1790 /** 1791 * struct nand_op_parser_ctx - Context used by the parser 1792 * @instrs: array of all the instructions that must be addressed 1793 * @ninstrs: length of the @instrs array 1794 * @subop: Sub-operation to be passed to the NAND controller 1795 * 1796 * This structure is used by the core to split NAND operations into 1797 * sub-operations that can be handled by the NAND controller. 1798 */ 1799 struct nand_op_parser_ctx { 1800 const struct nand_op_instr *instrs; 1801 unsigned int ninstrs; 1802 struct nand_subop subop; 1803 }; 1804 1805 /** 1806 * nand_op_parser_must_split_instr - Checks if an instruction must be split 1807 * @pat: the parser pattern element that matches @instr 1808 * @instr: pointer to the instruction to check 1809 * @start_offset: this is an in/out parameter. If @instr has already been 1810 * split, then @start_offset is the offset from which to start 1811 * (either an address cycle or an offset in the data buffer). 1812 * Conversely, if the function returns true (ie. instr must be 1813 * split), this parameter is updated to point to the first 1814 * data/address cycle that has not been taken care of. 1815 * 1816 * Some NAND controllers are limited and cannot send X address cycles with a 1817 * unique operation, or cannot read/write more than Y bytes at the same time. 1818 * In this case, split the instruction that does not fit in a single 1819 * controller-operation into two or more chunks. 1820 * 1821 * Returns true if the instruction must be split, false otherwise. 1822 * The @start_offset parameter is also updated to the offset at which the next 1823 * bundle of instruction must start (if an address or a data instruction). 1824 */ 1825 static bool 1826 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 1827 const struct nand_op_instr *instr, 1828 unsigned int *start_offset) 1829 { 1830 switch (pat->type) { 1831 case NAND_OP_ADDR_INSTR: 1832 if (!pat->ctx.addr.maxcycles) 1833 break; 1834 1835 if (instr->ctx.addr.naddrs - *start_offset > 1836 pat->ctx.addr.maxcycles) { 1837 *start_offset += pat->ctx.addr.maxcycles; 1838 return true; 1839 } 1840 break; 1841 1842 case NAND_OP_DATA_IN_INSTR: 1843 case NAND_OP_DATA_OUT_INSTR: 1844 if (!pat->ctx.data.maxlen) 1845 break; 1846 1847 if (instr->ctx.data.len - *start_offset > 1848 pat->ctx.data.maxlen) { 1849 *start_offset += pat->ctx.data.maxlen; 1850 return true; 1851 } 1852 break; 1853 1854 default: 1855 break; 1856 } 1857 1858 return false; 1859 } 1860 1861 /** 1862 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 1863 * remaining in the parser context 1864 * @pat: the pattern to test 1865 * @ctx: the parser context structure to match with the pattern @pat 1866 * 1867 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 1868 * Returns true if this is the case, false ortherwise. When true is returned, 1869 * @ctx->subop is updated with the set of instructions to be passed to the 1870 * controller driver. 1871 */ 1872 static bool 1873 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 1874 struct nand_op_parser_ctx *ctx) 1875 { 1876 unsigned int instr_offset = ctx->subop.first_instr_start_off; 1877 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 1878 const struct nand_op_instr *instr = ctx->subop.instrs; 1879 unsigned int i, ninstrs; 1880 1881 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 1882 /* 1883 * The pattern instruction does not match the operation 1884 * instruction. If the instruction is marked optional in the 1885 * pattern definition, we skip the pattern element and continue 1886 * to the next one. If the element is mandatory, there's no 1887 * match and we can return false directly. 1888 */ 1889 if (instr->type != pat->elems[i].type) { 1890 if (!pat->elems[i].optional) 1891 return false; 1892 1893 continue; 1894 } 1895 1896 /* 1897 * Now check the pattern element constraints. If the pattern is 1898 * not able to handle the whole instruction in a single step, 1899 * we have to split it. 1900 * The last_instr_end_off value comes back updated to point to 1901 * the position where we have to split the instruction (the 1902 * start of the next subop chunk). 1903 */ 1904 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 1905 &instr_offset)) { 1906 ninstrs++; 1907 i++; 1908 break; 1909 } 1910 1911 instr++; 1912 ninstrs++; 1913 instr_offset = 0; 1914 } 1915 1916 /* 1917 * This can happen if all instructions of a pattern are optional. 1918 * Still, if there's not at least one instruction handled by this 1919 * pattern, this is not a match, and we should try the next one (if 1920 * any). 1921 */ 1922 if (!ninstrs) 1923 return false; 1924 1925 /* 1926 * We had a match on the pattern head, but the pattern may be longer 1927 * than the instructions we're asked to execute. We need to make sure 1928 * there's no mandatory elements in the pattern tail. 1929 */ 1930 for (; i < pat->nelems; i++) { 1931 if (!pat->elems[i].optional) 1932 return false; 1933 } 1934 1935 /* 1936 * We have a match: update the subop structure accordingly and return 1937 * true. 1938 */ 1939 ctx->subop.ninstrs = ninstrs; 1940 ctx->subop.last_instr_end_off = instr_offset; 1941 1942 return true; 1943 } 1944 1945 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 1946 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 1947 { 1948 const struct nand_op_instr *instr; 1949 char *prefix = " "; 1950 unsigned int i; 1951 1952 pr_debug("executing subop:\n"); 1953 1954 for (i = 0; i < ctx->ninstrs; i++) { 1955 instr = &ctx->instrs[i]; 1956 1957 if (instr == &ctx->subop.instrs[0]) 1958 prefix = " ->"; 1959 1960 switch (instr->type) { 1961 case NAND_OP_CMD_INSTR: 1962 pr_debug("%sCMD [0x%02x]\n", prefix, 1963 instr->ctx.cmd.opcode); 1964 break; 1965 case NAND_OP_ADDR_INSTR: 1966 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, 1967 instr->ctx.addr.naddrs, 1968 instr->ctx.addr.naddrs < 64 ? 1969 instr->ctx.addr.naddrs : 64, 1970 instr->ctx.addr.addrs); 1971 break; 1972 case NAND_OP_DATA_IN_INSTR: 1973 pr_debug("%sDATA_IN [%d B%s]\n", prefix, 1974 instr->ctx.data.len, 1975 instr->ctx.data.force_8bit ? 1976 ", force 8-bit" : ""); 1977 break; 1978 case NAND_OP_DATA_OUT_INSTR: 1979 pr_debug("%sDATA_OUT [%d B%s]\n", prefix, 1980 instr->ctx.data.len, 1981 instr->ctx.data.force_8bit ? 1982 ", force 8-bit" : ""); 1983 break; 1984 case NAND_OP_WAITRDY_INSTR: 1985 pr_debug("%sWAITRDY [max %d ms]\n", prefix, 1986 instr->ctx.waitrdy.timeout_ms); 1987 break; 1988 } 1989 1990 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 1991 prefix = " "; 1992 } 1993 } 1994 #else 1995 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 1996 { 1997 /* NOP */ 1998 } 1999 #endif 2000 2001 /** 2002 * nand_op_parser_exec_op - exec_op parser 2003 * @chip: the NAND chip 2004 * @parser: patterns description provided by the controller driver 2005 * @op: the NAND operation to address 2006 * @check_only: when true, the function only checks if @op can be handled but 2007 * does not execute the operation 2008 * 2009 * Helper function designed to ease integration of NAND controller drivers that 2010 * only support a limited set of instruction sequences. The supported sequences 2011 * are described in @parser, and the framework takes care of splitting @op into 2012 * multiple sub-operations (if required) and pass them back to the ->exec() 2013 * callback of the matching pattern if @check_only is set to false. 2014 * 2015 * NAND controller drivers should call this function from their own ->exec_op() 2016 * implementation. 2017 * 2018 * Returns 0 on success, a negative error code otherwise. A failure can be 2019 * caused by an unsupported operation (none of the supported patterns is able 2020 * to handle the requested operation), or an error returned by one of the 2021 * matching pattern->exec() hook. 2022 */ 2023 int nand_op_parser_exec_op(struct nand_chip *chip, 2024 const struct nand_op_parser *parser, 2025 const struct nand_operation *op, bool check_only) 2026 { 2027 struct nand_op_parser_ctx ctx = { 2028 .subop.instrs = op->instrs, 2029 .instrs = op->instrs, 2030 .ninstrs = op->ninstrs, 2031 }; 2032 unsigned int i; 2033 2034 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2035 int ret; 2036 2037 for (i = 0; i < parser->npatterns; i++) { 2038 const struct nand_op_parser_pattern *pattern; 2039 2040 pattern = &parser->patterns[i]; 2041 if (!nand_op_parser_match_pat(pattern, &ctx)) 2042 continue; 2043 2044 nand_op_parser_trace(&ctx); 2045 2046 if (check_only) 2047 break; 2048 2049 ret = pattern->exec(chip, &ctx.subop); 2050 if (ret) 2051 return ret; 2052 2053 break; 2054 } 2055 2056 if (i == parser->npatterns) { 2057 pr_debug("->exec_op() parser: pattern not found!\n"); 2058 return -ENOTSUPP; 2059 } 2060 2061 /* 2062 * Update the context structure by pointing to the start of the 2063 * next subop. 2064 */ 2065 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2066 if (ctx.subop.last_instr_end_off) 2067 ctx.subop.instrs -= 1; 2068 2069 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2070 } 2071 2072 return 0; 2073 } 2074 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2075 2076 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2077 { 2078 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2079 instr->type == NAND_OP_DATA_OUT_INSTR); 2080 } 2081 2082 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2083 unsigned int instr_idx) 2084 { 2085 return subop && instr_idx < subop->ninstrs; 2086 } 2087 2088 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2089 unsigned int instr_idx) 2090 { 2091 if (instr_idx) 2092 return 0; 2093 2094 return subop->first_instr_start_off; 2095 } 2096 2097 /** 2098 * nand_subop_get_addr_start_off - Get the start offset in an address array 2099 * @subop: The entire sub-operation 2100 * @instr_idx: Index of the instruction inside the sub-operation 2101 * 2102 * During driver development, one could be tempted to directly use the 2103 * ->addr.addrs field of address instructions. This is wrong as address 2104 * instructions might be split. 2105 * 2106 * Given an address instruction, returns the offset of the first cycle to issue. 2107 */ 2108 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2109 unsigned int instr_idx) 2110 { 2111 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2112 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2113 return 0; 2114 2115 return nand_subop_get_start_off(subop, instr_idx); 2116 } 2117 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2118 2119 /** 2120 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2121 * @subop: The entire sub-operation 2122 * @instr_idx: Index of the instruction inside the sub-operation 2123 * 2124 * During driver development, one could be tempted to directly use the 2125 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2126 * might be split. 2127 * 2128 * Given an address instruction, returns the number of address cycle to issue. 2129 */ 2130 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2131 unsigned int instr_idx) 2132 { 2133 int start_off, end_off; 2134 2135 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2136 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2137 return 0; 2138 2139 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2140 2141 if (instr_idx == subop->ninstrs - 1 && 2142 subop->last_instr_end_off) 2143 end_off = subop->last_instr_end_off; 2144 else 2145 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2146 2147 return end_off - start_off; 2148 } 2149 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2150 2151 /** 2152 * nand_subop_get_data_start_off - Get the start offset in a data array 2153 * @subop: The entire sub-operation 2154 * @instr_idx: Index of the instruction inside the sub-operation 2155 * 2156 * During driver development, one could be tempted to directly use the 2157 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2158 * instructions might be split. 2159 * 2160 * Given a data instruction, returns the offset to start from. 2161 */ 2162 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2163 unsigned int instr_idx) 2164 { 2165 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2166 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2167 return 0; 2168 2169 return nand_subop_get_start_off(subop, instr_idx); 2170 } 2171 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2172 2173 /** 2174 * nand_subop_get_data_len - Get the number of bytes to retrieve 2175 * @subop: The entire sub-operation 2176 * @instr_idx: Index of the instruction inside the sub-operation 2177 * 2178 * During driver development, one could be tempted to directly use the 2179 * ->data->len field of a data instruction. This is wrong as data instructions 2180 * might be split. 2181 * 2182 * Returns the length of the chunk of data to send/receive. 2183 */ 2184 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2185 unsigned int instr_idx) 2186 { 2187 int start_off = 0, end_off; 2188 2189 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2190 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2191 return 0; 2192 2193 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2194 2195 if (instr_idx == subop->ninstrs - 1 && 2196 subop->last_instr_end_off) 2197 end_off = subop->last_instr_end_off; 2198 else 2199 end_off = subop->instrs[instr_idx].ctx.data.len; 2200 2201 return end_off - start_off; 2202 } 2203 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2204 2205 /** 2206 * nand_reset - Reset and initialize a NAND device 2207 * @chip: The NAND chip 2208 * @chipnr: Internal die id 2209 * 2210 * Save the timings data structure, then apply SDR timings mode 0 (see 2211 * nand_reset_data_interface for details), do the reset operation, and 2212 * apply back the previous timings. 2213 * 2214 * Returns 0 on success, a negative error code otherwise. 2215 */ 2216 int nand_reset(struct nand_chip *chip, int chipnr) 2217 { 2218 struct nand_data_interface saved_data_intf = chip->data_interface; 2219 int ret; 2220 2221 ret = nand_reset_data_interface(chip, chipnr); 2222 if (ret) 2223 return ret; 2224 2225 /* 2226 * The CS line has to be released before we can apply the new NAND 2227 * interface settings, hence this weird ->select_chip() dance. 2228 */ 2229 chip->select_chip(chip, chipnr); 2230 ret = nand_reset_op(chip); 2231 chip->select_chip(chip, -1); 2232 if (ret) 2233 return ret; 2234 2235 /* 2236 * A nand_reset_data_interface() put both the NAND chip and the NAND 2237 * controller in timings mode 0. If the default mode for this chip is 2238 * also 0, no need to proceed to the change again. Plus, at probe time, 2239 * nand_setup_data_interface() uses ->set/get_features() which would 2240 * fail anyway as the parameter page is not available yet. 2241 */ 2242 if (!chip->onfi_timing_mode_default) 2243 return 0; 2244 2245 chip->data_interface = saved_data_intf; 2246 ret = nand_setup_data_interface(chip, chipnr); 2247 if (ret) 2248 return ret; 2249 2250 return 0; 2251 } 2252 EXPORT_SYMBOL_GPL(nand_reset); 2253 2254 /** 2255 * nand_get_features - wrapper to perform a GET_FEATURE 2256 * @chip: NAND chip info structure 2257 * @addr: feature address 2258 * @subfeature_param: the subfeature parameters, a four bytes array 2259 * 2260 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2261 * operation cannot be handled. 2262 */ 2263 int nand_get_features(struct nand_chip *chip, int addr, 2264 u8 *subfeature_param) 2265 { 2266 if (!nand_supports_get_features(chip, addr)) 2267 return -ENOTSUPP; 2268 2269 if (chip->legacy.get_features) 2270 return chip->legacy.get_features(chip, addr, subfeature_param); 2271 2272 return nand_get_features_op(chip, addr, subfeature_param); 2273 } 2274 2275 /** 2276 * nand_set_features - wrapper to perform a SET_FEATURE 2277 * @chip: NAND chip info structure 2278 * @addr: feature address 2279 * @subfeature_param: the subfeature parameters, a four bytes array 2280 * 2281 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2282 * operation cannot be handled. 2283 */ 2284 int nand_set_features(struct nand_chip *chip, int addr, 2285 u8 *subfeature_param) 2286 { 2287 if (!nand_supports_set_features(chip, addr)) 2288 return -ENOTSUPP; 2289 2290 if (chip->legacy.set_features) 2291 return chip->legacy.set_features(chip, addr, subfeature_param); 2292 2293 return nand_set_features_op(chip, addr, subfeature_param); 2294 } 2295 2296 /** 2297 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2298 * @buf: buffer to test 2299 * @len: buffer length 2300 * @bitflips_threshold: maximum number of bitflips 2301 * 2302 * Check if a buffer contains only 0xff, which means the underlying region 2303 * has been erased and is ready to be programmed. 2304 * The bitflips_threshold specify the maximum number of bitflips before 2305 * considering the region is not erased. 2306 * Note: The logic of this function has been extracted from the memweight 2307 * implementation, except that nand_check_erased_buf function exit before 2308 * testing the whole buffer if the number of bitflips exceed the 2309 * bitflips_threshold value. 2310 * 2311 * Returns a positive number of bitflips less than or equal to 2312 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2313 * threshold. 2314 */ 2315 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2316 { 2317 const unsigned char *bitmap = buf; 2318 int bitflips = 0; 2319 int weight; 2320 2321 for (; len && ((uintptr_t)bitmap) % sizeof(long); 2322 len--, bitmap++) { 2323 weight = hweight8(*bitmap); 2324 bitflips += BITS_PER_BYTE - weight; 2325 if (unlikely(bitflips > bitflips_threshold)) 2326 return -EBADMSG; 2327 } 2328 2329 for (; len >= sizeof(long); 2330 len -= sizeof(long), bitmap += sizeof(long)) { 2331 unsigned long d = *((unsigned long *)bitmap); 2332 if (d == ~0UL) 2333 continue; 2334 weight = hweight_long(d); 2335 bitflips += BITS_PER_LONG - weight; 2336 if (unlikely(bitflips > bitflips_threshold)) 2337 return -EBADMSG; 2338 } 2339 2340 for (; len > 0; len--, bitmap++) { 2341 weight = hweight8(*bitmap); 2342 bitflips += BITS_PER_BYTE - weight; 2343 if (unlikely(bitflips > bitflips_threshold)) 2344 return -EBADMSG; 2345 } 2346 2347 return bitflips; 2348 } 2349 2350 /** 2351 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2352 * 0xff data 2353 * @data: data buffer to test 2354 * @datalen: data length 2355 * @ecc: ECC buffer 2356 * @ecclen: ECC length 2357 * @extraoob: extra OOB buffer 2358 * @extraooblen: extra OOB length 2359 * @bitflips_threshold: maximum number of bitflips 2360 * 2361 * Check if a data buffer and its associated ECC and OOB data contains only 2362 * 0xff pattern, which means the underlying region has been erased and is 2363 * ready to be programmed. 2364 * The bitflips_threshold specify the maximum number of bitflips before 2365 * considering the region as not erased. 2366 * 2367 * Note: 2368 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2369 * different from the NAND page size. When fixing bitflips, ECC engines will 2370 * report the number of errors per chunk, and the NAND core infrastructure 2371 * expect you to return the maximum number of bitflips for the whole page. 2372 * This is why you should always use this function on a single chunk and 2373 * not on the whole page. After checking each chunk you should update your 2374 * max_bitflips value accordingly. 2375 * 2/ When checking for bitflips in erased pages you should not only check 2376 * the payload data but also their associated ECC data, because a user might 2377 * have programmed almost all bits to 1 but a few. In this case, we 2378 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2379 * this case. 2380 * 3/ The extraoob argument is optional, and should be used if some of your OOB 2381 * data are protected by the ECC engine. 2382 * It could also be used if you support subpages and want to attach some 2383 * extra OOB data to an ECC chunk. 2384 * 2385 * Returns a positive number of bitflips less than or equal to 2386 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2387 * threshold. In case of success, the passed buffers are filled with 0xff. 2388 */ 2389 int nand_check_erased_ecc_chunk(void *data, int datalen, 2390 void *ecc, int ecclen, 2391 void *extraoob, int extraooblen, 2392 int bitflips_threshold) 2393 { 2394 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2395 2396 data_bitflips = nand_check_erased_buf(data, datalen, 2397 bitflips_threshold); 2398 if (data_bitflips < 0) 2399 return data_bitflips; 2400 2401 bitflips_threshold -= data_bitflips; 2402 2403 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2404 if (ecc_bitflips < 0) 2405 return ecc_bitflips; 2406 2407 bitflips_threshold -= ecc_bitflips; 2408 2409 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2410 bitflips_threshold); 2411 if (extraoob_bitflips < 0) 2412 return extraoob_bitflips; 2413 2414 if (data_bitflips) 2415 memset(data, 0xff, datalen); 2416 2417 if (ecc_bitflips) 2418 memset(ecc, 0xff, ecclen); 2419 2420 if (extraoob_bitflips) 2421 memset(extraoob, 0xff, extraooblen); 2422 2423 return data_bitflips + ecc_bitflips + extraoob_bitflips; 2424 } 2425 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2426 2427 /** 2428 * nand_read_page_raw_notsupp - dummy read raw page function 2429 * @chip: nand chip info structure 2430 * @buf: buffer to store read data 2431 * @oob_required: caller requires OOB data read to chip->oob_poi 2432 * @page: page number to read 2433 * 2434 * Returns -ENOTSUPP unconditionally. 2435 */ 2436 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2437 int oob_required, int page) 2438 { 2439 return -ENOTSUPP; 2440 } 2441 2442 /** 2443 * nand_read_page_raw - [INTERN] read raw page data without ecc 2444 * @chip: nand chip info structure 2445 * @buf: buffer to store read data 2446 * @oob_required: caller requires OOB data read to chip->oob_poi 2447 * @page: page number to read 2448 * 2449 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2450 */ 2451 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2452 int page) 2453 { 2454 struct mtd_info *mtd = nand_to_mtd(chip); 2455 int ret; 2456 2457 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2458 if (ret) 2459 return ret; 2460 2461 if (oob_required) { 2462 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2463 false); 2464 if (ret) 2465 return ret; 2466 } 2467 2468 return 0; 2469 } 2470 EXPORT_SYMBOL(nand_read_page_raw); 2471 2472 /** 2473 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2474 * @chip: nand chip info structure 2475 * @buf: buffer to store read data 2476 * @oob_required: caller requires OOB data read to chip->oob_poi 2477 * @page: page number to read 2478 * 2479 * We need a special oob layout and handling even when OOB isn't used. 2480 */ 2481 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2482 int oob_required, int page) 2483 { 2484 struct mtd_info *mtd = nand_to_mtd(chip); 2485 int eccsize = chip->ecc.size; 2486 int eccbytes = chip->ecc.bytes; 2487 uint8_t *oob = chip->oob_poi; 2488 int steps, size, ret; 2489 2490 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2491 if (ret) 2492 return ret; 2493 2494 for (steps = chip->ecc.steps; steps > 0; steps--) { 2495 ret = nand_read_data_op(chip, buf, eccsize, false); 2496 if (ret) 2497 return ret; 2498 2499 buf += eccsize; 2500 2501 if (chip->ecc.prepad) { 2502 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2503 false); 2504 if (ret) 2505 return ret; 2506 2507 oob += chip->ecc.prepad; 2508 } 2509 2510 ret = nand_read_data_op(chip, oob, eccbytes, false); 2511 if (ret) 2512 return ret; 2513 2514 oob += eccbytes; 2515 2516 if (chip->ecc.postpad) { 2517 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2518 false); 2519 if (ret) 2520 return ret; 2521 2522 oob += chip->ecc.postpad; 2523 } 2524 } 2525 2526 size = mtd->oobsize - (oob - chip->oob_poi); 2527 if (size) { 2528 ret = nand_read_data_op(chip, oob, size, false); 2529 if (ret) 2530 return ret; 2531 } 2532 2533 return 0; 2534 } 2535 2536 /** 2537 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 2538 * @chip: nand chip info structure 2539 * @buf: buffer to store read data 2540 * @oob_required: caller requires OOB data read to chip->oob_poi 2541 * @page: page number to read 2542 */ 2543 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 2544 int oob_required, int page) 2545 { 2546 struct mtd_info *mtd = nand_to_mtd(chip); 2547 int i, eccsize = chip->ecc.size, ret; 2548 int eccbytes = chip->ecc.bytes; 2549 int eccsteps = chip->ecc.steps; 2550 uint8_t *p = buf; 2551 uint8_t *ecc_calc = chip->ecc.calc_buf; 2552 uint8_t *ecc_code = chip->ecc.code_buf; 2553 unsigned int max_bitflips = 0; 2554 2555 chip->ecc.read_page_raw(chip, buf, 1, page); 2556 2557 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2558 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2559 2560 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2561 chip->ecc.total); 2562 if (ret) 2563 return ret; 2564 2565 eccsteps = chip->ecc.steps; 2566 p = buf; 2567 2568 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2569 int stat; 2570 2571 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2572 if (stat < 0) { 2573 mtd->ecc_stats.failed++; 2574 } else { 2575 mtd->ecc_stats.corrected += stat; 2576 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2577 } 2578 } 2579 return max_bitflips; 2580 } 2581 2582 /** 2583 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 2584 * @chip: nand chip info structure 2585 * @data_offs: offset of requested data within the page 2586 * @readlen: data length 2587 * @bufpoi: buffer to store read data 2588 * @page: page number to read 2589 */ 2590 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 2591 uint32_t readlen, uint8_t *bufpoi, int page) 2592 { 2593 struct mtd_info *mtd = nand_to_mtd(chip); 2594 int start_step, end_step, num_steps, ret; 2595 uint8_t *p; 2596 int data_col_addr, i, gaps = 0; 2597 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 2598 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 2599 int index, section = 0; 2600 unsigned int max_bitflips = 0; 2601 struct mtd_oob_region oobregion = { }; 2602 2603 /* Column address within the page aligned to ECC size (256bytes) */ 2604 start_step = data_offs / chip->ecc.size; 2605 end_step = (data_offs + readlen - 1) / chip->ecc.size; 2606 num_steps = end_step - start_step + 1; 2607 index = start_step * chip->ecc.bytes; 2608 2609 /* Data size aligned to ECC ecc.size */ 2610 datafrag_len = num_steps * chip->ecc.size; 2611 eccfrag_len = num_steps * chip->ecc.bytes; 2612 2613 data_col_addr = start_step * chip->ecc.size; 2614 /* If we read not a page aligned data */ 2615 p = bufpoi + data_col_addr; 2616 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 2617 if (ret) 2618 return ret; 2619 2620 /* Calculate ECC */ 2621 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 2622 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 2623 2624 /* 2625 * The performance is faster if we position offsets according to 2626 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 2627 */ 2628 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 2629 if (ret) 2630 return ret; 2631 2632 if (oobregion.length < eccfrag_len) 2633 gaps = 1; 2634 2635 if (gaps) { 2636 ret = nand_change_read_column_op(chip, mtd->writesize, 2637 chip->oob_poi, mtd->oobsize, 2638 false); 2639 if (ret) 2640 return ret; 2641 } else { 2642 /* 2643 * Send the command to read the particular ECC bytes take care 2644 * about buswidth alignment in read_buf. 2645 */ 2646 aligned_pos = oobregion.offset & ~(busw - 1); 2647 aligned_len = eccfrag_len; 2648 if (oobregion.offset & (busw - 1)) 2649 aligned_len++; 2650 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 2651 (busw - 1)) 2652 aligned_len++; 2653 2654 ret = nand_change_read_column_op(chip, 2655 mtd->writesize + aligned_pos, 2656 &chip->oob_poi[aligned_pos], 2657 aligned_len, false); 2658 if (ret) 2659 return ret; 2660 } 2661 2662 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 2663 chip->oob_poi, index, eccfrag_len); 2664 if (ret) 2665 return ret; 2666 2667 p = bufpoi + data_col_addr; 2668 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 2669 int stat; 2670 2671 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 2672 &chip->ecc.calc_buf[i]); 2673 if (stat == -EBADMSG && 2674 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 2675 /* check for empty pages with bitflips */ 2676 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 2677 &chip->ecc.code_buf[i], 2678 chip->ecc.bytes, 2679 NULL, 0, 2680 chip->ecc.strength); 2681 } 2682 2683 if (stat < 0) { 2684 mtd->ecc_stats.failed++; 2685 } else { 2686 mtd->ecc_stats.corrected += stat; 2687 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2688 } 2689 } 2690 return max_bitflips; 2691 } 2692 2693 /** 2694 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 2695 * @chip: nand chip info structure 2696 * @buf: buffer to store read data 2697 * @oob_required: caller requires OOB data read to chip->oob_poi 2698 * @page: page number to read 2699 * 2700 * Not for syndrome calculating ECC controllers which need a special oob layout. 2701 */ 2702 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 2703 int oob_required, int page) 2704 { 2705 struct mtd_info *mtd = nand_to_mtd(chip); 2706 int i, eccsize = chip->ecc.size, ret; 2707 int eccbytes = chip->ecc.bytes; 2708 int eccsteps = chip->ecc.steps; 2709 uint8_t *p = buf; 2710 uint8_t *ecc_calc = chip->ecc.calc_buf; 2711 uint8_t *ecc_code = chip->ecc.code_buf; 2712 unsigned int max_bitflips = 0; 2713 2714 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2715 if (ret) 2716 return ret; 2717 2718 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2719 chip->ecc.hwctl(chip, NAND_ECC_READ); 2720 2721 ret = nand_read_data_op(chip, p, eccsize, false); 2722 if (ret) 2723 return ret; 2724 2725 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2726 } 2727 2728 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false); 2729 if (ret) 2730 return ret; 2731 2732 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2733 chip->ecc.total); 2734 if (ret) 2735 return ret; 2736 2737 eccsteps = chip->ecc.steps; 2738 p = buf; 2739 2740 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2741 int stat; 2742 2743 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2744 if (stat == -EBADMSG && 2745 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 2746 /* check for empty pages with bitflips */ 2747 stat = nand_check_erased_ecc_chunk(p, eccsize, 2748 &ecc_code[i], eccbytes, 2749 NULL, 0, 2750 chip->ecc.strength); 2751 } 2752 2753 if (stat < 0) { 2754 mtd->ecc_stats.failed++; 2755 } else { 2756 mtd->ecc_stats.corrected += stat; 2757 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2758 } 2759 } 2760 return max_bitflips; 2761 } 2762 2763 /** 2764 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first 2765 * @chip: nand chip info structure 2766 * @buf: buffer to store read data 2767 * @oob_required: caller requires OOB data read to chip->oob_poi 2768 * @page: page number to read 2769 * 2770 * Hardware ECC for large page chips, require OOB to be read first. For this 2771 * ECC mode, the write_page method is re-used from ECC_HW. These methods 2772 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with 2773 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from 2774 * the data area, by overwriting the NAND manufacturer bad block markings. 2775 */ 2776 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, 2777 int oob_required, int page) 2778 { 2779 struct mtd_info *mtd = nand_to_mtd(chip); 2780 int i, eccsize = chip->ecc.size, ret; 2781 int eccbytes = chip->ecc.bytes; 2782 int eccsteps = chip->ecc.steps; 2783 uint8_t *p = buf; 2784 uint8_t *ecc_code = chip->ecc.code_buf; 2785 uint8_t *ecc_calc = chip->ecc.calc_buf; 2786 unsigned int max_bitflips = 0; 2787 2788 /* Read the OOB area first */ 2789 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 2790 if (ret) 2791 return ret; 2792 2793 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2794 if (ret) 2795 return ret; 2796 2797 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2798 chip->ecc.total); 2799 if (ret) 2800 return ret; 2801 2802 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2803 int stat; 2804 2805 chip->ecc.hwctl(chip, NAND_ECC_READ); 2806 2807 ret = nand_read_data_op(chip, p, eccsize, false); 2808 if (ret) 2809 return ret; 2810 2811 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2812 2813 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 2814 if (stat == -EBADMSG && 2815 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 2816 /* check for empty pages with bitflips */ 2817 stat = nand_check_erased_ecc_chunk(p, eccsize, 2818 &ecc_code[i], eccbytes, 2819 NULL, 0, 2820 chip->ecc.strength); 2821 } 2822 2823 if (stat < 0) { 2824 mtd->ecc_stats.failed++; 2825 } else { 2826 mtd->ecc_stats.corrected += stat; 2827 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2828 } 2829 } 2830 return max_bitflips; 2831 } 2832 2833 /** 2834 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 2835 * @chip: nand chip info structure 2836 * @buf: buffer to store read data 2837 * @oob_required: caller requires OOB data read to chip->oob_poi 2838 * @page: page number to read 2839 * 2840 * The hw generator calculates the error syndrome automatically. Therefore we 2841 * need a special oob layout and handling. 2842 */ 2843 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 2844 int oob_required, int page) 2845 { 2846 struct mtd_info *mtd = nand_to_mtd(chip); 2847 int ret, i, eccsize = chip->ecc.size; 2848 int eccbytes = chip->ecc.bytes; 2849 int eccsteps = chip->ecc.steps; 2850 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 2851 uint8_t *p = buf; 2852 uint8_t *oob = chip->oob_poi; 2853 unsigned int max_bitflips = 0; 2854 2855 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2856 if (ret) 2857 return ret; 2858 2859 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2860 int stat; 2861 2862 chip->ecc.hwctl(chip, NAND_ECC_READ); 2863 2864 ret = nand_read_data_op(chip, p, eccsize, false); 2865 if (ret) 2866 return ret; 2867 2868 if (chip->ecc.prepad) { 2869 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2870 false); 2871 if (ret) 2872 return ret; 2873 2874 oob += chip->ecc.prepad; 2875 } 2876 2877 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 2878 2879 ret = nand_read_data_op(chip, oob, eccbytes, false); 2880 if (ret) 2881 return ret; 2882 2883 stat = chip->ecc.correct(chip, p, oob, NULL); 2884 2885 oob += eccbytes; 2886 2887 if (chip->ecc.postpad) { 2888 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2889 false); 2890 if (ret) 2891 return ret; 2892 2893 oob += chip->ecc.postpad; 2894 } 2895 2896 if (stat == -EBADMSG && 2897 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 2898 /* check for empty pages with bitflips */ 2899 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 2900 oob - eccpadbytes, 2901 eccpadbytes, 2902 NULL, 0, 2903 chip->ecc.strength); 2904 } 2905 2906 if (stat < 0) { 2907 mtd->ecc_stats.failed++; 2908 } else { 2909 mtd->ecc_stats.corrected += stat; 2910 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2911 } 2912 } 2913 2914 /* Calculate remaining oob bytes */ 2915 i = mtd->oobsize - (oob - chip->oob_poi); 2916 if (i) { 2917 ret = nand_read_data_op(chip, oob, i, false); 2918 if (ret) 2919 return ret; 2920 } 2921 2922 return max_bitflips; 2923 } 2924 2925 /** 2926 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 2927 * @mtd: mtd info structure 2928 * @oob: oob destination address 2929 * @ops: oob ops structure 2930 * @len: size of oob to transfer 2931 */ 2932 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob, 2933 struct mtd_oob_ops *ops, size_t len) 2934 { 2935 struct nand_chip *chip = mtd_to_nand(mtd); 2936 int ret; 2937 2938 switch (ops->mode) { 2939 2940 case MTD_OPS_PLACE_OOB: 2941 case MTD_OPS_RAW: 2942 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 2943 return oob + len; 2944 2945 case MTD_OPS_AUTO_OOB: 2946 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 2947 ops->ooboffs, len); 2948 BUG_ON(ret); 2949 return oob + len; 2950 2951 default: 2952 BUG(); 2953 } 2954 return NULL; 2955 } 2956 2957 /** 2958 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 2959 * @chip: NAND chip object 2960 * @retry_mode: the retry mode to use 2961 * 2962 * Some vendors supply a special command to shift the Vt threshold, to be used 2963 * when there are too many bitflips in a page (i.e., ECC error). After setting 2964 * a new threshold, the host should retry reading the page. 2965 */ 2966 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 2967 { 2968 pr_debug("setting READ RETRY mode %d\n", retry_mode); 2969 2970 if (retry_mode >= chip->read_retries) 2971 return -EINVAL; 2972 2973 if (!chip->setup_read_retry) 2974 return -EOPNOTSUPP; 2975 2976 return chip->setup_read_retry(chip, retry_mode); 2977 } 2978 2979 static void nand_wait_readrdy(struct nand_chip *chip) 2980 { 2981 const struct nand_sdr_timings *sdr; 2982 2983 if (!(chip->options & NAND_NEED_READRDY)) 2984 return; 2985 2986 sdr = nand_get_sdr_timings(&chip->data_interface); 2987 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); 2988 } 2989 2990 /** 2991 * nand_do_read_ops - [INTERN] Read data with ECC 2992 * @mtd: MTD device structure 2993 * @from: offset to read from 2994 * @ops: oob ops structure 2995 * 2996 * Internal function. Called with chip held. 2997 */ 2998 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, 2999 struct mtd_oob_ops *ops) 3000 { 3001 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3002 struct nand_chip *chip = mtd_to_nand(mtd); 3003 int ret = 0; 3004 uint32_t readlen = ops->len; 3005 uint32_t oobreadlen = ops->ooblen; 3006 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3007 3008 uint8_t *bufpoi, *oob, *buf; 3009 int use_bufpoi; 3010 unsigned int max_bitflips = 0; 3011 int retry_mode = 0; 3012 bool ecc_fail = false; 3013 3014 chipnr = (int)(from >> chip->chip_shift); 3015 chip->select_chip(chip, chipnr); 3016 3017 realpage = (int)(from >> chip->page_shift); 3018 page = realpage & chip->pagemask; 3019 3020 col = (int)(from & (mtd->writesize - 1)); 3021 3022 buf = ops->datbuf; 3023 oob = ops->oobbuf; 3024 oob_required = oob ? 1 : 0; 3025 3026 while (1) { 3027 unsigned int ecc_failures = mtd->ecc_stats.failed; 3028 3029 bytes = min(mtd->writesize - col, readlen); 3030 aligned = (bytes == mtd->writesize); 3031 3032 if (!aligned) 3033 use_bufpoi = 1; 3034 else if (chip->options & NAND_USE_BOUNCE_BUFFER) 3035 use_bufpoi = !virt_addr_valid(buf) || 3036 !IS_ALIGNED((unsigned long)buf, 3037 chip->buf_align); 3038 else 3039 use_bufpoi = 0; 3040 3041 /* Is the current page in the buffer? */ 3042 if (realpage != chip->pagebuf || oob) { 3043 bufpoi = use_bufpoi ? chip->data_buf : buf; 3044 3045 if (use_bufpoi && aligned) 3046 pr_debug("%s: using read bounce buffer for buf@%p\n", 3047 __func__, buf); 3048 3049 read_retry: 3050 /* 3051 * Now read the page into the buffer. Absent an error, 3052 * the read methods return max bitflips per ecc step. 3053 */ 3054 if (unlikely(ops->mode == MTD_OPS_RAW)) 3055 ret = chip->ecc.read_page_raw(chip, bufpoi, 3056 oob_required, 3057 page); 3058 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3059 !oob) 3060 ret = chip->ecc.read_subpage(chip, col, bytes, 3061 bufpoi, page); 3062 else 3063 ret = chip->ecc.read_page(chip, bufpoi, 3064 oob_required, page); 3065 if (ret < 0) { 3066 if (use_bufpoi) 3067 /* Invalidate page cache */ 3068 chip->pagebuf = -1; 3069 break; 3070 } 3071 3072 /* Transfer not aligned data */ 3073 if (use_bufpoi) { 3074 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3075 !(mtd->ecc_stats.failed - ecc_failures) && 3076 (ops->mode != MTD_OPS_RAW)) { 3077 chip->pagebuf = realpage; 3078 chip->pagebuf_bitflips = ret; 3079 } else { 3080 /* Invalidate page cache */ 3081 chip->pagebuf = -1; 3082 } 3083 memcpy(buf, chip->data_buf + col, bytes); 3084 } 3085 3086 if (unlikely(oob)) { 3087 int toread = min(oobreadlen, max_oobsize); 3088 3089 if (toread) { 3090 oob = nand_transfer_oob(mtd, 3091 oob, ops, toread); 3092 oobreadlen -= toread; 3093 } 3094 } 3095 3096 nand_wait_readrdy(chip); 3097 3098 if (mtd->ecc_stats.failed - ecc_failures) { 3099 if (retry_mode + 1 < chip->read_retries) { 3100 retry_mode++; 3101 ret = nand_setup_read_retry(chip, 3102 retry_mode); 3103 if (ret < 0) 3104 break; 3105 3106 /* Reset failures; retry */ 3107 mtd->ecc_stats.failed = ecc_failures; 3108 goto read_retry; 3109 } else { 3110 /* No more retry modes; real failure */ 3111 ecc_fail = true; 3112 } 3113 } 3114 3115 buf += bytes; 3116 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3117 } else { 3118 memcpy(buf, chip->data_buf + col, bytes); 3119 buf += bytes; 3120 max_bitflips = max_t(unsigned int, max_bitflips, 3121 chip->pagebuf_bitflips); 3122 } 3123 3124 readlen -= bytes; 3125 3126 /* Reset to retry mode 0 */ 3127 if (retry_mode) { 3128 ret = nand_setup_read_retry(chip, 0); 3129 if (ret < 0) 3130 break; 3131 retry_mode = 0; 3132 } 3133 3134 if (!readlen) 3135 break; 3136 3137 /* For subsequent reads align to page boundary */ 3138 col = 0; 3139 /* Increment page address */ 3140 realpage++; 3141 3142 page = realpage & chip->pagemask; 3143 /* Check, if we cross a chip boundary */ 3144 if (!page) { 3145 chipnr++; 3146 chip->select_chip(chip, -1); 3147 chip->select_chip(chip, chipnr); 3148 } 3149 } 3150 chip->select_chip(chip, -1); 3151 3152 ops->retlen = ops->len - (size_t) readlen; 3153 if (oob) 3154 ops->oobretlen = ops->ooblen - oobreadlen; 3155 3156 if (ret < 0) 3157 return ret; 3158 3159 if (ecc_fail) 3160 return -EBADMSG; 3161 3162 return max_bitflips; 3163 } 3164 3165 /** 3166 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3167 * @chip: nand chip info structure 3168 * @page: page number to read 3169 */ 3170 int nand_read_oob_std(struct nand_chip *chip, int page) 3171 { 3172 struct mtd_info *mtd = nand_to_mtd(chip); 3173 3174 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3175 } 3176 EXPORT_SYMBOL(nand_read_oob_std); 3177 3178 /** 3179 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3180 * with syndromes 3181 * @chip: nand chip info structure 3182 * @page: page number to read 3183 */ 3184 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3185 { 3186 struct mtd_info *mtd = nand_to_mtd(chip); 3187 int length = mtd->oobsize; 3188 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3189 int eccsize = chip->ecc.size; 3190 uint8_t *bufpoi = chip->oob_poi; 3191 int i, toread, sndrnd = 0, pos, ret; 3192 3193 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3194 if (ret) 3195 return ret; 3196 3197 for (i = 0; i < chip->ecc.steps; i++) { 3198 if (sndrnd) { 3199 int ret; 3200 3201 pos = eccsize + i * (eccsize + chunk); 3202 if (mtd->writesize > 512) 3203 ret = nand_change_read_column_op(chip, pos, 3204 NULL, 0, 3205 false); 3206 else 3207 ret = nand_read_page_op(chip, page, pos, NULL, 3208 0); 3209 3210 if (ret) 3211 return ret; 3212 } else 3213 sndrnd = 1; 3214 toread = min_t(int, length, chunk); 3215 3216 ret = nand_read_data_op(chip, bufpoi, toread, false); 3217 if (ret) 3218 return ret; 3219 3220 bufpoi += toread; 3221 length -= toread; 3222 } 3223 if (length > 0) { 3224 ret = nand_read_data_op(chip, bufpoi, length, false); 3225 if (ret) 3226 return ret; 3227 } 3228 3229 return 0; 3230 } 3231 3232 /** 3233 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3234 * @chip: nand chip info structure 3235 * @page: page number to write 3236 */ 3237 int nand_write_oob_std(struct nand_chip *chip, int page) 3238 { 3239 struct mtd_info *mtd = nand_to_mtd(chip); 3240 3241 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3242 mtd->oobsize); 3243 } 3244 EXPORT_SYMBOL(nand_write_oob_std); 3245 3246 /** 3247 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3248 * with syndrome - only for large page flash 3249 * @chip: nand chip info structure 3250 * @page: page number to write 3251 */ 3252 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3253 { 3254 struct mtd_info *mtd = nand_to_mtd(chip); 3255 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3256 int eccsize = chip->ecc.size, length = mtd->oobsize; 3257 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3258 const uint8_t *bufpoi = chip->oob_poi; 3259 3260 /* 3261 * data-ecc-data-ecc ... ecc-oob 3262 * or 3263 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3264 */ 3265 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3266 pos = steps * (eccsize + chunk); 3267 steps = 0; 3268 } else 3269 pos = eccsize; 3270 3271 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3272 if (ret) 3273 return ret; 3274 3275 for (i = 0; i < steps; i++) { 3276 if (sndcmd) { 3277 if (mtd->writesize <= 512) { 3278 uint32_t fill = 0xFFFFFFFF; 3279 3280 len = eccsize; 3281 while (len > 0) { 3282 int num = min_t(int, len, 4); 3283 3284 ret = nand_write_data_op(chip, &fill, 3285 num, false); 3286 if (ret) 3287 return ret; 3288 3289 len -= num; 3290 } 3291 } else { 3292 pos = eccsize + i * (eccsize + chunk); 3293 ret = nand_change_write_column_op(chip, pos, 3294 NULL, 0, 3295 false); 3296 if (ret) 3297 return ret; 3298 } 3299 } else 3300 sndcmd = 1; 3301 len = min_t(int, length, chunk); 3302 3303 ret = nand_write_data_op(chip, bufpoi, len, false); 3304 if (ret) 3305 return ret; 3306 3307 bufpoi += len; 3308 length -= len; 3309 } 3310 if (length > 0) { 3311 ret = nand_write_data_op(chip, bufpoi, length, false); 3312 if (ret) 3313 return ret; 3314 } 3315 3316 return nand_prog_page_end_op(chip); 3317 } 3318 3319 /** 3320 * nand_do_read_oob - [INTERN] NAND read out-of-band 3321 * @mtd: MTD device structure 3322 * @from: offset to read from 3323 * @ops: oob operations description structure 3324 * 3325 * NAND read out-of-band data from the spare area. 3326 */ 3327 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 3328 struct mtd_oob_ops *ops) 3329 { 3330 unsigned int max_bitflips = 0; 3331 int page, realpage, chipnr; 3332 struct nand_chip *chip = mtd_to_nand(mtd); 3333 struct mtd_ecc_stats stats; 3334 int readlen = ops->ooblen; 3335 int len; 3336 uint8_t *buf = ops->oobbuf; 3337 int ret = 0; 3338 3339 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3340 __func__, (unsigned long long)from, readlen); 3341 3342 stats = mtd->ecc_stats; 3343 3344 len = mtd_oobavail(mtd, ops); 3345 3346 chipnr = (int)(from >> chip->chip_shift); 3347 chip->select_chip(chip, chipnr); 3348 3349 /* Shift to get page */ 3350 realpage = (int)(from >> chip->page_shift); 3351 page = realpage & chip->pagemask; 3352 3353 while (1) { 3354 if (ops->mode == MTD_OPS_RAW) 3355 ret = chip->ecc.read_oob_raw(chip, page); 3356 else 3357 ret = chip->ecc.read_oob(chip, page); 3358 3359 if (ret < 0) 3360 break; 3361 3362 len = min(len, readlen); 3363 buf = nand_transfer_oob(mtd, buf, ops, len); 3364 3365 nand_wait_readrdy(chip); 3366 3367 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3368 3369 readlen -= len; 3370 if (!readlen) 3371 break; 3372 3373 /* Increment page address */ 3374 realpage++; 3375 3376 page = realpage & chip->pagemask; 3377 /* Check, if we cross a chip boundary */ 3378 if (!page) { 3379 chipnr++; 3380 chip->select_chip(chip, -1); 3381 chip->select_chip(chip, chipnr); 3382 } 3383 } 3384 chip->select_chip(chip, -1); 3385 3386 ops->oobretlen = ops->ooblen - readlen; 3387 3388 if (ret < 0) 3389 return ret; 3390 3391 if (mtd->ecc_stats.failed - stats.failed) 3392 return -EBADMSG; 3393 3394 return max_bitflips; 3395 } 3396 3397 /** 3398 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3399 * @mtd: MTD device structure 3400 * @from: offset to read from 3401 * @ops: oob operation description structure 3402 * 3403 * NAND read data and/or out-of-band data. 3404 */ 3405 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3406 struct mtd_oob_ops *ops) 3407 { 3408 int ret; 3409 3410 ops->retlen = 0; 3411 3412 if (ops->mode != MTD_OPS_PLACE_OOB && 3413 ops->mode != MTD_OPS_AUTO_OOB && 3414 ops->mode != MTD_OPS_RAW) 3415 return -ENOTSUPP; 3416 3417 nand_get_device(mtd, FL_READING); 3418 3419 if (!ops->datbuf) 3420 ret = nand_do_read_oob(mtd, from, ops); 3421 else 3422 ret = nand_do_read_ops(mtd, from, ops); 3423 3424 nand_release_device(mtd); 3425 return ret; 3426 } 3427 3428 /** 3429 * nand_write_page_raw_notsupp - dummy raw page write function 3430 * @chip: nand chip info structure 3431 * @buf: data buffer 3432 * @oob_required: must write chip->oob_poi to OOB 3433 * @page: page number to write 3434 * 3435 * Returns -ENOTSUPP unconditionally. 3436 */ 3437 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 3438 int oob_required, int page) 3439 { 3440 return -ENOTSUPP; 3441 } 3442 3443 /** 3444 * nand_write_page_raw - [INTERN] raw page write function 3445 * @chip: nand chip info structure 3446 * @buf: data buffer 3447 * @oob_required: must write chip->oob_poi to OOB 3448 * @page: page number to write 3449 * 3450 * Not for syndrome calculating ECC controllers, which use a special oob layout. 3451 */ 3452 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 3453 int oob_required, int page) 3454 { 3455 struct mtd_info *mtd = nand_to_mtd(chip); 3456 int ret; 3457 3458 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 3459 if (ret) 3460 return ret; 3461 3462 if (oob_required) { 3463 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 3464 false); 3465 if (ret) 3466 return ret; 3467 } 3468 3469 return nand_prog_page_end_op(chip); 3470 } 3471 EXPORT_SYMBOL(nand_write_page_raw); 3472 3473 /** 3474 * nand_write_page_raw_syndrome - [INTERN] raw page write function 3475 * @chip: nand chip info structure 3476 * @buf: data buffer 3477 * @oob_required: must write chip->oob_poi to OOB 3478 * @page: page number to write 3479 * 3480 * We need a special oob layout and handling even when ECC isn't checked. 3481 */ 3482 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 3483 const uint8_t *buf, int oob_required, 3484 int page) 3485 { 3486 struct mtd_info *mtd = nand_to_mtd(chip); 3487 int eccsize = chip->ecc.size; 3488 int eccbytes = chip->ecc.bytes; 3489 uint8_t *oob = chip->oob_poi; 3490 int steps, size, ret; 3491 3492 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3493 if (ret) 3494 return ret; 3495 3496 for (steps = chip->ecc.steps; steps > 0; steps--) { 3497 ret = nand_write_data_op(chip, buf, eccsize, false); 3498 if (ret) 3499 return ret; 3500 3501 buf += eccsize; 3502 3503 if (chip->ecc.prepad) { 3504 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 3505 false); 3506 if (ret) 3507 return ret; 3508 3509 oob += chip->ecc.prepad; 3510 } 3511 3512 ret = nand_write_data_op(chip, oob, eccbytes, false); 3513 if (ret) 3514 return ret; 3515 3516 oob += eccbytes; 3517 3518 if (chip->ecc.postpad) { 3519 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 3520 false); 3521 if (ret) 3522 return ret; 3523 3524 oob += chip->ecc.postpad; 3525 } 3526 } 3527 3528 size = mtd->oobsize - (oob - chip->oob_poi); 3529 if (size) { 3530 ret = nand_write_data_op(chip, oob, size, false); 3531 if (ret) 3532 return ret; 3533 } 3534 3535 return nand_prog_page_end_op(chip); 3536 } 3537 /** 3538 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 3539 * @chip: nand chip info structure 3540 * @buf: data buffer 3541 * @oob_required: must write chip->oob_poi to OOB 3542 * @page: page number to write 3543 */ 3544 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 3545 int oob_required, int page) 3546 { 3547 struct mtd_info *mtd = nand_to_mtd(chip); 3548 int i, eccsize = chip->ecc.size, ret; 3549 int eccbytes = chip->ecc.bytes; 3550 int eccsteps = chip->ecc.steps; 3551 uint8_t *ecc_calc = chip->ecc.calc_buf; 3552 const uint8_t *p = buf; 3553 3554 /* Software ECC calculation */ 3555 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 3556 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3557 3558 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3559 chip->ecc.total); 3560 if (ret) 3561 return ret; 3562 3563 return chip->ecc.write_page_raw(chip, buf, 1, page); 3564 } 3565 3566 /** 3567 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 3568 * @chip: nand chip info structure 3569 * @buf: data buffer 3570 * @oob_required: must write chip->oob_poi to OOB 3571 * @page: page number to write 3572 */ 3573 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 3574 int oob_required, int page) 3575 { 3576 struct mtd_info *mtd = nand_to_mtd(chip); 3577 int i, eccsize = chip->ecc.size, ret; 3578 int eccbytes = chip->ecc.bytes; 3579 int eccsteps = chip->ecc.steps; 3580 uint8_t *ecc_calc = chip->ecc.calc_buf; 3581 const uint8_t *p = buf; 3582 3583 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3584 if (ret) 3585 return ret; 3586 3587 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3588 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3589 3590 ret = nand_write_data_op(chip, p, eccsize, false); 3591 if (ret) 3592 return ret; 3593 3594 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3595 } 3596 3597 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3598 chip->ecc.total); 3599 if (ret) 3600 return ret; 3601 3602 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 3603 if (ret) 3604 return ret; 3605 3606 return nand_prog_page_end_op(chip); 3607 } 3608 3609 3610 /** 3611 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 3612 * @chip: nand chip info structure 3613 * @offset: column address of subpage within the page 3614 * @data_len: data length 3615 * @buf: data buffer 3616 * @oob_required: must write chip->oob_poi to OOB 3617 * @page: page number to write 3618 */ 3619 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 3620 uint32_t data_len, const uint8_t *buf, 3621 int oob_required, int page) 3622 { 3623 struct mtd_info *mtd = nand_to_mtd(chip); 3624 uint8_t *oob_buf = chip->oob_poi; 3625 uint8_t *ecc_calc = chip->ecc.calc_buf; 3626 int ecc_size = chip->ecc.size; 3627 int ecc_bytes = chip->ecc.bytes; 3628 int ecc_steps = chip->ecc.steps; 3629 uint32_t start_step = offset / ecc_size; 3630 uint32_t end_step = (offset + data_len - 1) / ecc_size; 3631 int oob_bytes = mtd->oobsize / ecc_steps; 3632 int step, ret; 3633 3634 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3635 if (ret) 3636 return ret; 3637 3638 for (step = 0; step < ecc_steps; step++) { 3639 /* configure controller for WRITE access */ 3640 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3641 3642 /* write data (untouched subpages already masked by 0xFF) */ 3643 ret = nand_write_data_op(chip, buf, ecc_size, false); 3644 if (ret) 3645 return ret; 3646 3647 /* mask ECC of un-touched subpages by padding 0xFF */ 3648 if ((step < start_step) || (step > end_step)) 3649 memset(ecc_calc, 0xff, ecc_bytes); 3650 else 3651 chip->ecc.calculate(chip, buf, ecc_calc); 3652 3653 /* mask OOB of un-touched subpages by padding 0xFF */ 3654 /* if oob_required, preserve OOB metadata of written subpage */ 3655 if (!oob_required || (step < start_step) || (step > end_step)) 3656 memset(oob_buf, 0xff, oob_bytes); 3657 3658 buf += ecc_size; 3659 ecc_calc += ecc_bytes; 3660 oob_buf += oob_bytes; 3661 } 3662 3663 /* copy calculated ECC for whole page to chip->buffer->oob */ 3664 /* this include masked-value(0xFF) for unwritten subpages */ 3665 ecc_calc = chip->ecc.calc_buf; 3666 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3667 chip->ecc.total); 3668 if (ret) 3669 return ret; 3670 3671 /* write OOB buffer to NAND device */ 3672 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 3673 if (ret) 3674 return ret; 3675 3676 return nand_prog_page_end_op(chip); 3677 } 3678 3679 3680 /** 3681 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 3682 * @chip: nand chip info structure 3683 * @buf: data buffer 3684 * @oob_required: must write chip->oob_poi to OOB 3685 * @page: page number to write 3686 * 3687 * The hw generator calculates the error syndrome automatically. Therefore we 3688 * need a special oob layout and handling. 3689 */ 3690 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 3691 int oob_required, int page) 3692 { 3693 struct mtd_info *mtd = nand_to_mtd(chip); 3694 int i, eccsize = chip->ecc.size; 3695 int eccbytes = chip->ecc.bytes; 3696 int eccsteps = chip->ecc.steps; 3697 const uint8_t *p = buf; 3698 uint8_t *oob = chip->oob_poi; 3699 int ret; 3700 3701 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3702 if (ret) 3703 return ret; 3704 3705 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3706 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3707 3708 ret = nand_write_data_op(chip, p, eccsize, false); 3709 if (ret) 3710 return ret; 3711 3712 if (chip->ecc.prepad) { 3713 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 3714 false); 3715 if (ret) 3716 return ret; 3717 3718 oob += chip->ecc.prepad; 3719 } 3720 3721 chip->ecc.calculate(chip, p, oob); 3722 3723 ret = nand_write_data_op(chip, oob, eccbytes, false); 3724 if (ret) 3725 return ret; 3726 3727 oob += eccbytes; 3728 3729 if (chip->ecc.postpad) { 3730 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 3731 false); 3732 if (ret) 3733 return ret; 3734 3735 oob += chip->ecc.postpad; 3736 } 3737 } 3738 3739 /* Calculate remaining oob bytes */ 3740 i = mtd->oobsize - (oob - chip->oob_poi); 3741 if (i) { 3742 ret = nand_write_data_op(chip, oob, i, false); 3743 if (ret) 3744 return ret; 3745 } 3746 3747 return nand_prog_page_end_op(chip); 3748 } 3749 3750 /** 3751 * nand_write_page - write one page 3752 * @mtd: MTD device structure 3753 * @chip: NAND chip descriptor 3754 * @offset: address offset within the page 3755 * @data_len: length of actual data to be written 3756 * @buf: the data to write 3757 * @oob_required: must write chip->oob_poi to OOB 3758 * @page: page number to write 3759 * @raw: use _raw version of write_page 3760 */ 3761 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 3762 uint32_t offset, int data_len, const uint8_t *buf, 3763 int oob_required, int page, int raw) 3764 { 3765 int status, subpage; 3766 3767 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3768 chip->ecc.write_subpage) 3769 subpage = offset || (data_len < mtd->writesize); 3770 else 3771 subpage = 0; 3772 3773 if (unlikely(raw)) 3774 status = chip->ecc.write_page_raw(chip, buf, oob_required, 3775 page); 3776 else if (subpage) 3777 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 3778 oob_required, page); 3779 else 3780 status = chip->ecc.write_page(chip, buf, oob_required, page); 3781 3782 if (status < 0) 3783 return status; 3784 3785 return 0; 3786 } 3787 3788 /** 3789 * nand_fill_oob - [INTERN] Transfer client buffer to oob 3790 * @mtd: MTD device structure 3791 * @oob: oob data buffer 3792 * @len: oob data write length 3793 * @ops: oob ops structure 3794 */ 3795 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, 3796 struct mtd_oob_ops *ops) 3797 { 3798 struct nand_chip *chip = mtd_to_nand(mtd); 3799 int ret; 3800 3801 /* 3802 * Initialise to all 0xFF, to avoid the possibility of left over OOB 3803 * data from a previous OOB read. 3804 */ 3805 memset(chip->oob_poi, 0xff, mtd->oobsize); 3806 3807 switch (ops->mode) { 3808 3809 case MTD_OPS_PLACE_OOB: 3810 case MTD_OPS_RAW: 3811 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 3812 return oob + len; 3813 3814 case MTD_OPS_AUTO_OOB: 3815 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 3816 ops->ooboffs, len); 3817 BUG_ON(ret); 3818 return oob + len; 3819 3820 default: 3821 BUG(); 3822 } 3823 return NULL; 3824 } 3825 3826 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 3827 3828 /** 3829 * nand_do_write_ops - [INTERN] NAND write with ECC 3830 * @mtd: MTD device structure 3831 * @to: offset to write to 3832 * @ops: oob operations description structure 3833 * 3834 * NAND write with ECC. 3835 */ 3836 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 3837 struct mtd_oob_ops *ops) 3838 { 3839 int chipnr, realpage, page, column; 3840 struct nand_chip *chip = mtd_to_nand(mtd); 3841 uint32_t writelen = ops->len; 3842 3843 uint32_t oobwritelen = ops->ooblen; 3844 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 3845 3846 uint8_t *oob = ops->oobbuf; 3847 uint8_t *buf = ops->datbuf; 3848 int ret; 3849 int oob_required = oob ? 1 : 0; 3850 3851 ops->retlen = 0; 3852 if (!writelen) 3853 return 0; 3854 3855 /* Reject writes, which are not page aligned */ 3856 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 3857 pr_notice("%s: attempt to write non page aligned data\n", 3858 __func__); 3859 return -EINVAL; 3860 } 3861 3862 column = to & (mtd->writesize - 1); 3863 3864 chipnr = (int)(to >> chip->chip_shift); 3865 chip->select_chip(chip, chipnr); 3866 3867 /* Check, if it is write protected */ 3868 if (nand_check_wp(mtd)) { 3869 ret = -EIO; 3870 goto err_out; 3871 } 3872 3873 realpage = (int)(to >> chip->page_shift); 3874 page = realpage & chip->pagemask; 3875 3876 /* Invalidate the page cache, when we write to the cached page */ 3877 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && 3878 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len)) 3879 chip->pagebuf = -1; 3880 3881 /* Don't allow multipage oob writes with offset */ 3882 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 3883 ret = -EINVAL; 3884 goto err_out; 3885 } 3886 3887 while (1) { 3888 int bytes = mtd->writesize; 3889 uint8_t *wbuf = buf; 3890 int use_bufpoi; 3891 int part_pagewr = (column || writelen < mtd->writesize); 3892 3893 if (part_pagewr) 3894 use_bufpoi = 1; 3895 else if (chip->options & NAND_USE_BOUNCE_BUFFER) 3896 use_bufpoi = !virt_addr_valid(buf) || 3897 !IS_ALIGNED((unsigned long)buf, 3898 chip->buf_align); 3899 else 3900 use_bufpoi = 0; 3901 3902 /* Partial page write?, or need to use bounce buffer */ 3903 if (use_bufpoi) { 3904 pr_debug("%s: using write bounce buffer for buf@%p\n", 3905 __func__, buf); 3906 if (part_pagewr) 3907 bytes = min_t(int, bytes - column, writelen); 3908 chip->pagebuf = -1; 3909 memset(chip->data_buf, 0xff, mtd->writesize); 3910 memcpy(&chip->data_buf[column], buf, bytes); 3911 wbuf = chip->data_buf; 3912 } 3913 3914 if (unlikely(oob)) { 3915 size_t len = min(oobwritelen, oobmaxlen); 3916 oob = nand_fill_oob(mtd, oob, len, ops); 3917 oobwritelen -= len; 3918 } else { 3919 /* We still need to erase leftover OOB data */ 3920 memset(chip->oob_poi, 0xff, mtd->oobsize); 3921 } 3922 3923 ret = nand_write_page(mtd, chip, column, bytes, wbuf, 3924 oob_required, page, 3925 (ops->mode == MTD_OPS_RAW)); 3926 if (ret) 3927 break; 3928 3929 writelen -= bytes; 3930 if (!writelen) 3931 break; 3932 3933 column = 0; 3934 buf += bytes; 3935 realpage++; 3936 3937 page = realpage & chip->pagemask; 3938 /* Check, if we cross a chip boundary */ 3939 if (!page) { 3940 chipnr++; 3941 chip->select_chip(chip, -1); 3942 chip->select_chip(chip, chipnr); 3943 } 3944 } 3945 3946 ops->retlen = ops->len - writelen; 3947 if (unlikely(oob)) 3948 ops->oobretlen = ops->ooblen; 3949 3950 err_out: 3951 chip->select_chip(chip, -1); 3952 return ret; 3953 } 3954 3955 /** 3956 * panic_nand_write - [MTD Interface] NAND write with ECC 3957 * @mtd: MTD device structure 3958 * @to: offset to write to 3959 * @len: number of bytes to write 3960 * @retlen: pointer to variable to store the number of written bytes 3961 * @buf: the data to write 3962 * 3963 * NAND write with ECC. Used when performing writes in interrupt context, this 3964 * may for example be called by mtdoops when writing an oops while in panic. 3965 */ 3966 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 3967 size_t *retlen, const uint8_t *buf) 3968 { 3969 struct nand_chip *chip = mtd_to_nand(mtd); 3970 int chipnr = (int)(to >> chip->chip_shift); 3971 struct mtd_oob_ops ops; 3972 int ret; 3973 3974 /* Grab the device */ 3975 panic_nand_get_device(chip, mtd, FL_WRITING); 3976 3977 chip->select_chip(chip, chipnr); 3978 3979 /* Wait for the device to get ready */ 3980 panic_nand_wait(chip, 400); 3981 3982 memset(&ops, 0, sizeof(ops)); 3983 ops.len = len; 3984 ops.datbuf = (uint8_t *)buf; 3985 ops.mode = MTD_OPS_PLACE_OOB; 3986 3987 ret = nand_do_write_ops(mtd, to, &ops); 3988 3989 *retlen = ops.retlen; 3990 return ret; 3991 } 3992 3993 /** 3994 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 3995 * @mtd: MTD device structure 3996 * @to: offset to write to 3997 * @ops: oob operation description structure 3998 * 3999 * NAND write out-of-band. 4000 */ 4001 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 4002 struct mtd_oob_ops *ops) 4003 { 4004 int chipnr, page, status, len; 4005 struct nand_chip *chip = mtd_to_nand(mtd); 4006 4007 pr_debug("%s: to = 0x%08x, len = %i\n", 4008 __func__, (unsigned int)to, (int)ops->ooblen); 4009 4010 len = mtd_oobavail(mtd, ops); 4011 4012 /* Do not allow write past end of page */ 4013 if ((ops->ooboffs + ops->ooblen) > len) { 4014 pr_debug("%s: attempt to write past end of page\n", 4015 __func__); 4016 return -EINVAL; 4017 } 4018 4019 chipnr = (int)(to >> chip->chip_shift); 4020 4021 /* 4022 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 4023 * of my DiskOnChip 2000 test units) will clear the whole data page too 4024 * if we don't do this. I have no clue why, but I seem to have 'fixed' 4025 * it in the doc2000 driver in August 1999. dwmw2. 4026 */ 4027 nand_reset(chip, chipnr); 4028 4029 chip->select_chip(chip, chipnr); 4030 4031 /* Shift to get page */ 4032 page = (int)(to >> chip->page_shift); 4033 4034 /* Check, if it is write protected */ 4035 if (nand_check_wp(mtd)) { 4036 chip->select_chip(chip, -1); 4037 return -EROFS; 4038 } 4039 4040 /* Invalidate the page cache, if we write to the cached page */ 4041 if (page == chip->pagebuf) 4042 chip->pagebuf = -1; 4043 4044 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops); 4045 4046 if (ops->mode == MTD_OPS_RAW) 4047 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 4048 else 4049 status = chip->ecc.write_oob(chip, page & chip->pagemask); 4050 4051 chip->select_chip(chip, -1); 4052 4053 if (status) 4054 return status; 4055 4056 ops->oobretlen = ops->ooblen; 4057 4058 return 0; 4059 } 4060 4061 /** 4062 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4063 * @mtd: MTD device structure 4064 * @to: offset to write to 4065 * @ops: oob operation description structure 4066 */ 4067 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4068 struct mtd_oob_ops *ops) 4069 { 4070 int ret = -ENOTSUPP; 4071 4072 ops->retlen = 0; 4073 4074 nand_get_device(mtd, FL_WRITING); 4075 4076 switch (ops->mode) { 4077 case MTD_OPS_PLACE_OOB: 4078 case MTD_OPS_AUTO_OOB: 4079 case MTD_OPS_RAW: 4080 break; 4081 4082 default: 4083 goto out; 4084 } 4085 4086 if (!ops->datbuf) 4087 ret = nand_do_write_oob(mtd, to, ops); 4088 else 4089 ret = nand_do_write_ops(mtd, to, ops); 4090 4091 out: 4092 nand_release_device(mtd); 4093 return ret; 4094 } 4095 4096 /** 4097 * single_erase - [GENERIC] NAND standard block erase command function 4098 * @chip: NAND chip object 4099 * @page: the page address of the block which will be erased 4100 * 4101 * Standard erase command for NAND chips. Returns NAND status. 4102 */ 4103 static int single_erase(struct nand_chip *chip, int page) 4104 { 4105 unsigned int eraseblock; 4106 4107 /* Send commands to erase a block */ 4108 eraseblock = page >> (chip->phys_erase_shift - chip->page_shift); 4109 4110 return nand_erase_op(chip, eraseblock); 4111 } 4112 4113 /** 4114 * nand_erase - [MTD Interface] erase block(s) 4115 * @mtd: MTD device structure 4116 * @instr: erase instruction 4117 * 4118 * Erase one ore more blocks. 4119 */ 4120 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4121 { 4122 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4123 } 4124 4125 /** 4126 * nand_erase_nand - [INTERN] erase block(s) 4127 * @chip: NAND chip object 4128 * @instr: erase instruction 4129 * @allowbbt: allow erasing the bbt area 4130 * 4131 * Erase one ore more blocks. 4132 */ 4133 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4134 int allowbbt) 4135 { 4136 struct mtd_info *mtd = nand_to_mtd(chip); 4137 int page, status, pages_per_block, ret, chipnr; 4138 loff_t len; 4139 4140 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4141 __func__, (unsigned long long)instr->addr, 4142 (unsigned long long)instr->len); 4143 4144 if (check_offs_len(mtd, instr->addr, instr->len)) 4145 return -EINVAL; 4146 4147 /* Grab the lock and see if the device is available */ 4148 nand_get_device(mtd, FL_ERASING); 4149 4150 /* Shift to get first page */ 4151 page = (int)(instr->addr >> chip->page_shift); 4152 chipnr = (int)(instr->addr >> chip->chip_shift); 4153 4154 /* Calculate pages in each block */ 4155 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4156 4157 /* Select the NAND device */ 4158 chip->select_chip(chip, chipnr); 4159 4160 /* Check, if it is write protected */ 4161 if (nand_check_wp(mtd)) { 4162 pr_debug("%s: device is write protected!\n", 4163 __func__); 4164 ret = -EIO; 4165 goto erase_exit; 4166 } 4167 4168 /* Loop through the pages */ 4169 len = instr->len; 4170 4171 while (len) { 4172 /* Check if we have a bad block, we do not erase bad blocks! */ 4173 if (nand_block_checkbad(mtd, ((loff_t) page) << 4174 chip->page_shift, allowbbt)) { 4175 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 4176 __func__, page); 4177 ret = -EIO; 4178 goto erase_exit; 4179 } 4180 4181 /* 4182 * Invalidate the page cache, if we erase the block which 4183 * contains the current cached page. 4184 */ 4185 if (page <= chip->pagebuf && chip->pagebuf < 4186 (page + pages_per_block)) 4187 chip->pagebuf = -1; 4188 4189 if (chip->legacy.erase) 4190 status = chip->legacy.erase(chip, 4191 page & chip->pagemask); 4192 else 4193 status = single_erase(chip, page & chip->pagemask); 4194 4195 /* See if block erase succeeded */ 4196 if (status) { 4197 pr_debug("%s: failed erase, page 0x%08x\n", 4198 __func__, page); 4199 ret = -EIO; 4200 instr->fail_addr = 4201 ((loff_t)page << chip->page_shift); 4202 goto erase_exit; 4203 } 4204 4205 /* Increment page address and decrement length */ 4206 len -= (1ULL << chip->phys_erase_shift); 4207 page += pages_per_block; 4208 4209 /* Check, if we cross a chip boundary */ 4210 if (len && !(page & chip->pagemask)) { 4211 chipnr++; 4212 chip->select_chip(chip, -1); 4213 chip->select_chip(chip, chipnr); 4214 } 4215 } 4216 4217 ret = 0; 4218 erase_exit: 4219 4220 /* Deselect and wake up anyone waiting on the device */ 4221 chip->select_chip(chip, -1); 4222 nand_release_device(mtd); 4223 4224 /* Return more or less happy */ 4225 return ret; 4226 } 4227 4228 /** 4229 * nand_sync - [MTD Interface] sync 4230 * @mtd: MTD device structure 4231 * 4232 * Sync is actually a wait for chip ready function. 4233 */ 4234 static void nand_sync(struct mtd_info *mtd) 4235 { 4236 pr_debug("%s: called\n", __func__); 4237 4238 /* Grab the lock and see if the device is available */ 4239 nand_get_device(mtd, FL_SYNCING); 4240 /* Release it and go back */ 4241 nand_release_device(mtd); 4242 } 4243 4244 /** 4245 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4246 * @mtd: MTD device structure 4247 * @offs: offset relative to mtd start 4248 */ 4249 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4250 { 4251 struct nand_chip *chip = mtd_to_nand(mtd); 4252 int chipnr = (int)(offs >> chip->chip_shift); 4253 int ret; 4254 4255 /* Select the NAND device */ 4256 nand_get_device(mtd, FL_READING); 4257 chip->select_chip(chip, chipnr); 4258 4259 ret = nand_block_checkbad(mtd, offs, 0); 4260 4261 chip->select_chip(chip, -1); 4262 nand_release_device(mtd); 4263 4264 return ret; 4265 } 4266 4267 /** 4268 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4269 * @mtd: MTD device structure 4270 * @ofs: offset relative to mtd start 4271 */ 4272 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4273 { 4274 int ret; 4275 4276 ret = nand_block_isbad(mtd, ofs); 4277 if (ret) { 4278 /* If it was bad already, return success and do nothing */ 4279 if (ret > 0) 4280 return 0; 4281 return ret; 4282 } 4283 4284 return nand_block_markbad_lowlevel(mtd, ofs); 4285 } 4286 4287 /** 4288 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd 4289 * @mtd: MTD device structure 4290 * @ofs: offset relative to mtd start 4291 * @len: length of mtd 4292 */ 4293 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) 4294 { 4295 struct nand_chip *chip = mtd_to_nand(mtd); 4296 u32 part_start_block; 4297 u32 part_end_block; 4298 u32 part_start_die; 4299 u32 part_end_die; 4300 4301 /* 4302 * max_bb_per_die and blocks_per_die used to determine 4303 * the maximum bad block count. 4304 */ 4305 if (!chip->max_bb_per_die || !chip->blocks_per_die) 4306 return -ENOTSUPP; 4307 4308 /* Get the start and end of the partition in erase blocks. */ 4309 part_start_block = mtd_div_by_eb(ofs, mtd); 4310 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1; 4311 4312 /* Get the start and end LUNs of the partition. */ 4313 part_start_die = part_start_block / chip->blocks_per_die; 4314 part_end_die = part_end_block / chip->blocks_per_die; 4315 4316 /* 4317 * Look up the bad blocks per unit and multiply by the number of units 4318 * that the partition spans. 4319 */ 4320 return chip->max_bb_per_die * (part_end_die - part_start_die + 1); 4321 } 4322 4323 /** 4324 * nand_suspend - [MTD Interface] Suspend the NAND flash 4325 * @mtd: MTD device structure 4326 */ 4327 static int nand_suspend(struct mtd_info *mtd) 4328 { 4329 return nand_get_device(mtd, FL_PM_SUSPENDED); 4330 } 4331 4332 /** 4333 * nand_resume - [MTD Interface] Resume the NAND flash 4334 * @mtd: MTD device structure 4335 */ 4336 static void nand_resume(struct mtd_info *mtd) 4337 { 4338 struct nand_chip *chip = mtd_to_nand(mtd); 4339 4340 if (chip->state == FL_PM_SUSPENDED) 4341 nand_release_device(mtd); 4342 else 4343 pr_err("%s called for a chip which is not in suspended state\n", 4344 __func__); 4345 } 4346 4347 /** 4348 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4349 * prevent further operations 4350 * @mtd: MTD device structure 4351 */ 4352 static void nand_shutdown(struct mtd_info *mtd) 4353 { 4354 nand_get_device(mtd, FL_PM_SUSPENDED); 4355 } 4356 4357 /* Set default functions */ 4358 static void nand_set_defaults(struct nand_chip *chip) 4359 { 4360 nand_legacy_set_defaults(chip); 4361 4362 if (!chip->controller) { 4363 chip->controller = &chip->dummy_controller; 4364 nand_controller_init(chip->controller); 4365 } 4366 4367 if (!chip->buf_align) 4368 chip->buf_align = 1; 4369 } 4370 4371 /* Sanitize ONFI strings so we can safely print them */ 4372 void sanitize_string(uint8_t *s, size_t len) 4373 { 4374 ssize_t i; 4375 4376 /* Null terminate */ 4377 s[len - 1] = 0; 4378 4379 /* Remove non printable chars */ 4380 for (i = 0; i < len - 1; i++) { 4381 if (s[i] < ' ' || s[i] > 127) 4382 s[i] = '?'; 4383 } 4384 4385 /* Remove trailing spaces */ 4386 strim(s); 4387 } 4388 4389 /* 4390 * nand_id_has_period - Check if an ID string has a given wraparound period 4391 * @id_data: the ID string 4392 * @arrlen: the length of the @id_data array 4393 * @period: the period of repitition 4394 * 4395 * Check if an ID string is repeated within a given sequence of bytes at 4396 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4397 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4398 * if the repetition has a period of @period; otherwise, returns zero. 4399 */ 4400 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4401 { 4402 int i, j; 4403 for (i = 0; i < period; i++) 4404 for (j = i + period; j < arrlen; j += period) 4405 if (id_data[i] != id_data[j]) 4406 return 0; 4407 return 1; 4408 } 4409 4410 /* 4411 * nand_id_len - Get the length of an ID string returned by CMD_READID 4412 * @id_data: the ID string 4413 * @arrlen: the length of the @id_data array 4414 4415 * Returns the length of the ID string, according to known wraparound/trailing 4416 * zero patterns. If no pattern exists, returns the length of the array. 4417 */ 4418 static int nand_id_len(u8 *id_data, int arrlen) 4419 { 4420 int last_nonzero, period; 4421 4422 /* Find last non-zero byte */ 4423 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4424 if (id_data[last_nonzero]) 4425 break; 4426 4427 /* All zeros */ 4428 if (last_nonzero < 0) 4429 return 0; 4430 4431 /* Calculate wraparound period */ 4432 for (period = 1; period < arrlen; period++) 4433 if (nand_id_has_period(id_data, arrlen, period)) 4434 break; 4435 4436 /* There's a repeated pattern */ 4437 if (period < arrlen) 4438 return period; 4439 4440 /* There are trailing zeros */ 4441 if (last_nonzero < arrlen - 1) 4442 return last_nonzero + 1; 4443 4444 /* No pattern detected */ 4445 return arrlen; 4446 } 4447 4448 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4449 static int nand_get_bits_per_cell(u8 cellinfo) 4450 { 4451 int bits; 4452 4453 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4454 bits >>= NAND_CI_CELLTYPE_SHIFT; 4455 return bits + 1; 4456 } 4457 4458 /* 4459 * Many new NAND share similar device ID codes, which represent the size of the 4460 * chip. The rest of the parameters must be decoded according to generic or 4461 * manufacturer-specific "extended ID" decoding patterns. 4462 */ 4463 void nand_decode_ext_id(struct nand_chip *chip) 4464 { 4465 struct mtd_info *mtd = nand_to_mtd(chip); 4466 int extid; 4467 u8 *id_data = chip->id.data; 4468 /* The 3rd id byte holds MLC / multichip data */ 4469 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4470 /* The 4th id byte is the important one */ 4471 extid = id_data[3]; 4472 4473 /* Calc pagesize */ 4474 mtd->writesize = 1024 << (extid & 0x03); 4475 extid >>= 2; 4476 /* Calc oobsize */ 4477 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4478 extid >>= 2; 4479 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4480 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4481 extid >>= 2; 4482 /* Get buswidth information */ 4483 if (extid & 0x1) 4484 chip->options |= NAND_BUSWIDTH_16; 4485 } 4486 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 4487 4488 /* 4489 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 4490 * decodes a matching ID table entry and assigns the MTD size parameters for 4491 * the chip. 4492 */ 4493 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 4494 { 4495 struct mtd_info *mtd = nand_to_mtd(chip); 4496 4497 mtd->erasesize = type->erasesize; 4498 mtd->writesize = type->pagesize; 4499 mtd->oobsize = mtd->writesize / 32; 4500 4501 /* All legacy ID NAND are small-page, SLC */ 4502 chip->bits_per_cell = 1; 4503 } 4504 4505 /* 4506 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 4507 * heuristic patterns using various detected parameters (e.g., manufacturer, 4508 * page size, cell-type information). 4509 */ 4510 static void nand_decode_bbm_options(struct nand_chip *chip) 4511 { 4512 struct mtd_info *mtd = nand_to_mtd(chip); 4513 4514 /* Set the bad block position */ 4515 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 4516 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 4517 else 4518 chip->badblockpos = NAND_SMALL_BADBLOCK_POS; 4519 } 4520 4521 static inline bool is_full_id_nand(struct nand_flash_dev *type) 4522 { 4523 return type->id_len; 4524 } 4525 4526 static bool find_full_id_nand(struct nand_chip *chip, 4527 struct nand_flash_dev *type) 4528 { 4529 struct mtd_info *mtd = nand_to_mtd(chip); 4530 u8 *id_data = chip->id.data; 4531 4532 if (!strncmp(type->id, id_data, type->id_len)) { 4533 mtd->writesize = type->pagesize; 4534 mtd->erasesize = type->erasesize; 4535 mtd->oobsize = type->oobsize; 4536 4537 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4538 chip->chipsize = (uint64_t)type->chipsize << 20; 4539 chip->options |= type->options; 4540 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type); 4541 chip->ecc_step_ds = NAND_ECC_STEP(type); 4542 chip->onfi_timing_mode_default = 4543 type->onfi_timing_mode_default; 4544 4545 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4546 if (!chip->parameters.model) 4547 return false; 4548 4549 return true; 4550 } 4551 return false; 4552 } 4553 4554 /* 4555 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 4556 * compliant and does not have a full-id or legacy-id entry in the nand_ids 4557 * table. 4558 */ 4559 static void nand_manufacturer_detect(struct nand_chip *chip) 4560 { 4561 /* 4562 * Try manufacturer detection if available and use 4563 * nand_decode_ext_id() otherwise. 4564 */ 4565 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4566 chip->manufacturer.desc->ops->detect) { 4567 /* The 3rd id byte holds MLC / multichip data */ 4568 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 4569 chip->manufacturer.desc->ops->detect(chip); 4570 } else { 4571 nand_decode_ext_id(chip); 4572 } 4573 } 4574 4575 /* 4576 * Manufacturer initialization. This function is called for all NANDs including 4577 * ONFI and JEDEC compliant ones. 4578 * Manufacturer drivers should put all their specific initialization code in 4579 * their ->init() hook. 4580 */ 4581 static int nand_manufacturer_init(struct nand_chip *chip) 4582 { 4583 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 4584 !chip->manufacturer.desc->ops->init) 4585 return 0; 4586 4587 return chip->manufacturer.desc->ops->init(chip); 4588 } 4589 4590 /* 4591 * Manufacturer cleanup. This function is called for all NANDs including 4592 * ONFI and JEDEC compliant ones. 4593 * Manufacturer drivers should put all their specific cleanup code in their 4594 * ->cleanup() hook. 4595 */ 4596 static void nand_manufacturer_cleanup(struct nand_chip *chip) 4597 { 4598 /* Release manufacturer private data */ 4599 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4600 chip->manufacturer.desc->ops->cleanup) 4601 chip->manufacturer.desc->ops->cleanup(chip); 4602 } 4603 4604 static const char * 4605 nand_manufacturer_name(const struct nand_manufacturer *manufacturer) 4606 { 4607 return manufacturer ? manufacturer->name : "Unknown"; 4608 } 4609 4610 /* 4611 * Get the flash and manufacturer id and lookup if the type is supported. 4612 */ 4613 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 4614 { 4615 const struct nand_manufacturer *manufacturer; 4616 struct mtd_info *mtd = nand_to_mtd(chip); 4617 int busw, ret; 4618 u8 *id_data = chip->id.data; 4619 u8 maf_id, dev_id; 4620 4621 /* 4622 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 4623 * after power-up. 4624 */ 4625 ret = nand_reset(chip, 0); 4626 if (ret) 4627 return ret; 4628 4629 /* Select the device */ 4630 chip->select_chip(chip, 0); 4631 4632 /* Send the command for reading device ID */ 4633 ret = nand_readid_op(chip, 0, id_data, 2); 4634 if (ret) 4635 return ret; 4636 4637 /* Read manufacturer and device IDs */ 4638 maf_id = id_data[0]; 4639 dev_id = id_data[1]; 4640 4641 /* 4642 * Try again to make sure, as some systems the bus-hold or other 4643 * interface concerns can cause random data which looks like a 4644 * possibly credible NAND flash to appear. If the two results do 4645 * not match, ignore the device completely. 4646 */ 4647 4648 /* Read entire ID string */ 4649 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 4650 if (ret) 4651 return ret; 4652 4653 if (id_data[0] != maf_id || id_data[1] != dev_id) { 4654 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 4655 maf_id, dev_id, id_data[0], id_data[1]); 4656 return -ENODEV; 4657 } 4658 4659 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 4660 4661 /* Try to identify manufacturer */ 4662 manufacturer = nand_get_manufacturer(maf_id); 4663 chip->manufacturer.desc = manufacturer; 4664 4665 if (!type) 4666 type = nand_flash_ids; 4667 4668 /* 4669 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 4670 * override it. 4671 * This is required to make sure initial NAND bus width set by the 4672 * NAND controller driver is coherent with the real NAND bus width 4673 * (extracted by auto-detection code). 4674 */ 4675 busw = chip->options & NAND_BUSWIDTH_16; 4676 4677 /* 4678 * The flag is only set (never cleared), reset it to its default value 4679 * before starting auto-detection. 4680 */ 4681 chip->options &= ~NAND_BUSWIDTH_16; 4682 4683 for (; type->name != NULL; type++) { 4684 if (is_full_id_nand(type)) { 4685 if (find_full_id_nand(chip, type)) 4686 goto ident_done; 4687 } else if (dev_id == type->dev_id) { 4688 break; 4689 } 4690 } 4691 4692 if (!type->name || !type->pagesize) { 4693 /* Check if the chip is ONFI compliant */ 4694 ret = nand_onfi_detect(chip); 4695 if (ret < 0) 4696 return ret; 4697 else if (ret) 4698 goto ident_done; 4699 4700 /* Check if the chip is JEDEC compliant */ 4701 ret = nand_jedec_detect(chip); 4702 if (ret < 0) 4703 return ret; 4704 else if (ret) 4705 goto ident_done; 4706 } 4707 4708 if (!type->name) 4709 return -ENODEV; 4710 4711 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4712 if (!chip->parameters.model) 4713 return -ENOMEM; 4714 4715 chip->chipsize = (uint64_t)type->chipsize << 20; 4716 4717 if (!type->pagesize) 4718 nand_manufacturer_detect(chip); 4719 else 4720 nand_decode_id(chip, type); 4721 4722 /* Get chip options */ 4723 chip->options |= type->options; 4724 4725 ident_done: 4726 if (!mtd->name) 4727 mtd->name = chip->parameters.model; 4728 4729 if (chip->options & NAND_BUSWIDTH_AUTO) { 4730 WARN_ON(busw & NAND_BUSWIDTH_16); 4731 nand_set_defaults(chip); 4732 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 4733 /* 4734 * Check, if buswidth is correct. Hardware drivers should set 4735 * chip correct! 4736 */ 4737 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 4738 maf_id, dev_id); 4739 pr_info("%s %s\n", nand_manufacturer_name(manufacturer), 4740 mtd->name); 4741 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 4742 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 4743 ret = -EINVAL; 4744 4745 goto free_detect_allocation; 4746 } 4747 4748 nand_decode_bbm_options(chip); 4749 4750 /* Calculate the address shift from the page size */ 4751 chip->page_shift = ffs(mtd->writesize) - 1; 4752 /* Convert chipsize to number of pages per chip -1 */ 4753 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 4754 4755 chip->bbt_erase_shift = chip->phys_erase_shift = 4756 ffs(mtd->erasesize) - 1; 4757 if (chip->chipsize & 0xffffffff) 4758 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1; 4759 else { 4760 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)); 4761 chip->chip_shift += 32 - 1; 4762 } 4763 4764 if (chip->chip_shift - chip->page_shift > 16) 4765 chip->options |= NAND_ROW_ADDR_3; 4766 4767 chip->badblockbits = 8; 4768 4769 nand_legacy_adjust_cmdfunc(chip); 4770 4771 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 4772 maf_id, dev_id); 4773 pr_info("%s %s\n", nand_manufacturer_name(manufacturer), 4774 chip->parameters.model); 4775 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 4776 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 4777 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 4778 return 0; 4779 4780 free_detect_allocation: 4781 kfree(chip->parameters.model); 4782 4783 return ret; 4784 } 4785 4786 static const char * const nand_ecc_modes[] = { 4787 [NAND_ECC_NONE] = "none", 4788 [NAND_ECC_SOFT] = "soft", 4789 [NAND_ECC_HW] = "hw", 4790 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 4791 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", 4792 [NAND_ECC_ON_DIE] = "on-die", 4793 }; 4794 4795 static int of_get_nand_ecc_mode(struct device_node *np) 4796 { 4797 const char *pm; 4798 int err, i; 4799 4800 err = of_property_read_string(np, "nand-ecc-mode", &pm); 4801 if (err < 0) 4802 return err; 4803 4804 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) 4805 if (!strcasecmp(pm, nand_ecc_modes[i])) 4806 return i; 4807 4808 /* 4809 * For backward compatibility we support few obsoleted values that don't 4810 * have their mappings into nand_ecc_modes_t anymore (they were merged 4811 * with other enums). 4812 */ 4813 if (!strcasecmp(pm, "soft_bch")) 4814 return NAND_ECC_SOFT; 4815 4816 return -ENODEV; 4817 } 4818 4819 static const char * const nand_ecc_algos[] = { 4820 [NAND_ECC_HAMMING] = "hamming", 4821 [NAND_ECC_BCH] = "bch", 4822 [NAND_ECC_RS] = "rs", 4823 }; 4824 4825 static int of_get_nand_ecc_algo(struct device_node *np) 4826 { 4827 const char *pm; 4828 int err, i; 4829 4830 err = of_property_read_string(np, "nand-ecc-algo", &pm); 4831 if (!err) { 4832 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) 4833 if (!strcasecmp(pm, nand_ecc_algos[i])) 4834 return i; 4835 return -ENODEV; 4836 } 4837 4838 /* 4839 * For backward compatibility we also read "nand-ecc-mode" checking 4840 * for some obsoleted values that were specifying ECC algorithm. 4841 */ 4842 err = of_property_read_string(np, "nand-ecc-mode", &pm); 4843 if (err < 0) 4844 return err; 4845 4846 if (!strcasecmp(pm, "soft")) 4847 return NAND_ECC_HAMMING; 4848 else if (!strcasecmp(pm, "soft_bch")) 4849 return NAND_ECC_BCH; 4850 4851 return -ENODEV; 4852 } 4853 4854 static int of_get_nand_ecc_step_size(struct device_node *np) 4855 { 4856 int ret; 4857 u32 val; 4858 4859 ret = of_property_read_u32(np, "nand-ecc-step-size", &val); 4860 return ret ? ret : val; 4861 } 4862 4863 static int of_get_nand_ecc_strength(struct device_node *np) 4864 { 4865 int ret; 4866 u32 val; 4867 4868 ret = of_property_read_u32(np, "nand-ecc-strength", &val); 4869 return ret ? ret : val; 4870 } 4871 4872 static int of_get_nand_bus_width(struct device_node *np) 4873 { 4874 u32 val; 4875 4876 if (of_property_read_u32(np, "nand-bus-width", &val)) 4877 return 8; 4878 4879 switch (val) { 4880 case 8: 4881 case 16: 4882 return val; 4883 default: 4884 return -EIO; 4885 } 4886 } 4887 4888 static bool of_get_nand_on_flash_bbt(struct device_node *np) 4889 { 4890 return of_property_read_bool(np, "nand-on-flash-bbt"); 4891 } 4892 4893 static int nand_dt_init(struct nand_chip *chip) 4894 { 4895 struct device_node *dn = nand_get_flash_node(chip); 4896 int ecc_mode, ecc_algo, ecc_strength, ecc_step; 4897 4898 if (!dn) 4899 return 0; 4900 4901 if (of_get_nand_bus_width(dn) == 16) 4902 chip->options |= NAND_BUSWIDTH_16; 4903 4904 if (of_property_read_bool(dn, "nand-is-boot-medium")) 4905 chip->options |= NAND_IS_BOOT_MEDIUM; 4906 4907 if (of_get_nand_on_flash_bbt(dn)) 4908 chip->bbt_options |= NAND_BBT_USE_FLASH; 4909 4910 ecc_mode = of_get_nand_ecc_mode(dn); 4911 ecc_algo = of_get_nand_ecc_algo(dn); 4912 ecc_strength = of_get_nand_ecc_strength(dn); 4913 ecc_step = of_get_nand_ecc_step_size(dn); 4914 4915 if (ecc_mode >= 0) 4916 chip->ecc.mode = ecc_mode; 4917 4918 if (ecc_algo >= 0) 4919 chip->ecc.algo = ecc_algo; 4920 4921 if (ecc_strength >= 0) 4922 chip->ecc.strength = ecc_strength; 4923 4924 if (ecc_step > 0) 4925 chip->ecc.size = ecc_step; 4926 4927 if (of_property_read_bool(dn, "nand-ecc-maximize")) 4928 chip->ecc.options |= NAND_ECC_MAXIMIZE; 4929 4930 return 0; 4931 } 4932 4933 /** 4934 * nand_scan_ident - Scan for the NAND device 4935 * @chip: NAND chip object 4936 * @maxchips: number of chips to scan for 4937 * @table: alternative NAND ID table 4938 * 4939 * This is the first phase of the normal nand_scan() function. It reads the 4940 * flash ID and sets up MTD fields accordingly. 4941 * 4942 * This helper used to be called directly from controller drivers that needed 4943 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 4944 * prevented dynamic allocations during this phase which was unconvenient and 4945 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 4946 */ 4947 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 4948 struct nand_flash_dev *table) 4949 { 4950 struct mtd_info *mtd = nand_to_mtd(chip); 4951 int nand_maf_id, nand_dev_id; 4952 unsigned int i; 4953 int ret; 4954 4955 /* Enforce the right timings for reset/detection */ 4956 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); 4957 4958 ret = nand_dt_init(chip); 4959 if (ret) 4960 return ret; 4961 4962 if (!mtd->name && mtd->dev.parent) 4963 mtd->name = dev_name(mtd->dev.parent); 4964 4965 if (chip->exec_op && !chip->select_chip) { 4966 pr_err("->select_chip() is mandatory when implementing ->exec_op()\n"); 4967 return -EINVAL; 4968 } 4969 4970 ret = nand_legacy_check_hooks(chip); 4971 if (ret) 4972 return ret; 4973 4974 /* Set the default functions */ 4975 nand_set_defaults(chip); 4976 4977 /* Read the flash type */ 4978 ret = nand_detect(chip, table); 4979 if (ret) { 4980 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 4981 pr_warn("No NAND device found\n"); 4982 chip->select_chip(chip, -1); 4983 return ret; 4984 } 4985 4986 nand_maf_id = chip->id.data[0]; 4987 nand_dev_id = chip->id.data[1]; 4988 4989 chip->select_chip(chip, -1); 4990 4991 /* Check for a chip array */ 4992 for (i = 1; i < maxchips; i++) { 4993 u8 id[2]; 4994 4995 /* See comment in nand_get_flash_type for reset */ 4996 nand_reset(chip, i); 4997 4998 chip->select_chip(chip, i); 4999 /* Send the command for reading device ID */ 5000 nand_readid_op(chip, 0, id, sizeof(id)); 5001 /* Read manufacturer and device IDs */ 5002 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5003 chip->select_chip(chip, -1); 5004 break; 5005 } 5006 chip->select_chip(chip, -1); 5007 } 5008 if (i > 1) 5009 pr_info("%d chips detected\n", i); 5010 5011 /* Store the number of chips and calc total size for mtd */ 5012 chip->numchips = i; 5013 mtd->size = i * chip->chipsize; 5014 5015 return 0; 5016 } 5017 5018 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5019 { 5020 kfree(chip->parameters.model); 5021 kfree(chip->parameters.onfi); 5022 } 5023 5024 static int nand_set_ecc_soft_ops(struct mtd_info *mtd) 5025 { 5026 struct nand_chip *chip = mtd_to_nand(mtd); 5027 struct nand_ecc_ctrl *ecc = &chip->ecc; 5028 5029 if (WARN_ON(ecc->mode != NAND_ECC_SOFT)) 5030 return -EINVAL; 5031 5032 switch (ecc->algo) { 5033 case NAND_ECC_HAMMING: 5034 ecc->calculate = nand_calculate_ecc; 5035 ecc->correct = nand_correct_data; 5036 ecc->read_page = nand_read_page_swecc; 5037 ecc->read_subpage = nand_read_subpage; 5038 ecc->write_page = nand_write_page_swecc; 5039 ecc->read_page_raw = nand_read_page_raw; 5040 ecc->write_page_raw = nand_write_page_raw; 5041 ecc->read_oob = nand_read_oob_std; 5042 ecc->write_oob = nand_write_oob_std; 5043 if (!ecc->size) 5044 ecc->size = 256; 5045 ecc->bytes = 3; 5046 ecc->strength = 1; 5047 5048 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) 5049 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5050 5051 return 0; 5052 case NAND_ECC_BCH: 5053 if (!mtd_nand_has_bch()) { 5054 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n"); 5055 return -EINVAL; 5056 } 5057 ecc->calculate = nand_bch_calculate_ecc; 5058 ecc->correct = nand_bch_correct_data; 5059 ecc->read_page = nand_read_page_swecc; 5060 ecc->read_subpage = nand_read_subpage; 5061 ecc->write_page = nand_write_page_swecc; 5062 ecc->read_page_raw = nand_read_page_raw; 5063 ecc->write_page_raw = nand_write_page_raw; 5064 ecc->read_oob = nand_read_oob_std; 5065 ecc->write_oob = nand_write_oob_std; 5066 5067 /* 5068 * Board driver should supply ecc.size and ecc.strength 5069 * values to select how many bits are correctable. 5070 * Otherwise, default to 4 bits for large page devices. 5071 */ 5072 if (!ecc->size && (mtd->oobsize >= 64)) { 5073 ecc->size = 512; 5074 ecc->strength = 4; 5075 } 5076 5077 /* 5078 * if no ecc placement scheme was provided pickup the default 5079 * large page one. 5080 */ 5081 if (!mtd->ooblayout) { 5082 /* handle large page devices only */ 5083 if (mtd->oobsize < 64) { 5084 WARN(1, "OOB layout is required when using software BCH on small pages\n"); 5085 return -EINVAL; 5086 } 5087 5088 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 5089 5090 } 5091 5092 /* 5093 * We can only maximize ECC config when the default layout is 5094 * used, otherwise we don't know how many bytes can really be 5095 * used. 5096 */ 5097 if (mtd->ooblayout == &nand_ooblayout_lp_ops && 5098 ecc->options & NAND_ECC_MAXIMIZE) { 5099 int steps, bytes; 5100 5101 /* Always prefer 1k blocks over 512bytes ones */ 5102 ecc->size = 1024; 5103 steps = mtd->writesize / ecc->size; 5104 5105 /* Reserve 2 bytes for the BBM */ 5106 bytes = (mtd->oobsize - 2) / steps; 5107 ecc->strength = bytes * 8 / fls(8 * ecc->size); 5108 } 5109 5110 /* See nand_bch_init() for details. */ 5111 ecc->bytes = 0; 5112 ecc->priv = nand_bch_init(mtd); 5113 if (!ecc->priv) { 5114 WARN(1, "BCH ECC initialization failed!\n"); 5115 return -EINVAL; 5116 } 5117 return 0; 5118 default: 5119 WARN(1, "Unsupported ECC algorithm!\n"); 5120 return -EINVAL; 5121 } 5122 } 5123 5124 /** 5125 * nand_check_ecc_caps - check the sanity of preset ECC settings 5126 * @chip: nand chip info structure 5127 * @caps: ECC caps info structure 5128 * @oobavail: OOB size that the ECC engine can use 5129 * 5130 * When ECC step size and strength are already set, check if they are supported 5131 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5132 * On success, the calculated ECC bytes is set. 5133 */ 5134 static int 5135 nand_check_ecc_caps(struct nand_chip *chip, 5136 const struct nand_ecc_caps *caps, int oobavail) 5137 { 5138 struct mtd_info *mtd = nand_to_mtd(chip); 5139 const struct nand_ecc_step_info *stepinfo; 5140 int preset_step = chip->ecc.size; 5141 int preset_strength = chip->ecc.strength; 5142 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5143 int i, j; 5144 5145 for (i = 0; i < caps->nstepinfos; i++) { 5146 stepinfo = &caps->stepinfos[i]; 5147 5148 if (stepinfo->stepsize != preset_step) 5149 continue; 5150 5151 for (j = 0; j < stepinfo->nstrengths; j++) { 5152 if (stepinfo->strengths[j] != preset_strength) 5153 continue; 5154 5155 ecc_bytes = caps->calc_ecc_bytes(preset_step, 5156 preset_strength); 5157 if (WARN_ON_ONCE(ecc_bytes < 0)) 5158 return ecc_bytes; 5159 5160 if (ecc_bytes * nsteps > oobavail) { 5161 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 5162 preset_step, preset_strength); 5163 return -ENOSPC; 5164 } 5165 5166 chip->ecc.bytes = ecc_bytes; 5167 5168 return 0; 5169 } 5170 } 5171 5172 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 5173 preset_step, preset_strength); 5174 5175 return -ENOTSUPP; 5176 } 5177 5178 /** 5179 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 5180 * @chip: nand chip info structure 5181 * @caps: ECC engine caps info structure 5182 * @oobavail: OOB size that the ECC engine can use 5183 * 5184 * If a chip's ECC requirement is provided, try to meet it with the least 5185 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 5186 * On success, the chosen ECC settings are set. 5187 */ 5188 static int 5189 nand_match_ecc_req(struct nand_chip *chip, 5190 const struct nand_ecc_caps *caps, int oobavail) 5191 { 5192 struct mtd_info *mtd = nand_to_mtd(chip); 5193 const struct nand_ecc_step_info *stepinfo; 5194 int req_step = chip->ecc_step_ds; 5195 int req_strength = chip->ecc_strength_ds; 5196 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 5197 int best_step, best_strength, best_ecc_bytes; 5198 int best_ecc_bytes_total = INT_MAX; 5199 int i, j; 5200 5201 /* No information provided by the NAND chip */ 5202 if (!req_step || !req_strength) 5203 return -ENOTSUPP; 5204 5205 /* number of correctable bits the chip requires in a page */ 5206 req_corr = mtd->writesize / req_step * req_strength; 5207 5208 for (i = 0; i < caps->nstepinfos; i++) { 5209 stepinfo = &caps->stepinfos[i]; 5210 step_size = stepinfo->stepsize; 5211 5212 for (j = 0; j < stepinfo->nstrengths; j++) { 5213 strength = stepinfo->strengths[j]; 5214 5215 /* 5216 * If both step size and strength are smaller than the 5217 * chip's requirement, it is not easy to compare the 5218 * resulted reliability. 5219 */ 5220 if (step_size < req_step && strength < req_strength) 5221 continue; 5222 5223 if (mtd->writesize % step_size) 5224 continue; 5225 5226 nsteps = mtd->writesize / step_size; 5227 5228 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5229 if (WARN_ON_ONCE(ecc_bytes < 0)) 5230 continue; 5231 ecc_bytes_total = ecc_bytes * nsteps; 5232 5233 if (ecc_bytes_total > oobavail || 5234 strength * nsteps < req_corr) 5235 continue; 5236 5237 /* 5238 * We assume the best is to meet the chip's requrement 5239 * with the least number of ECC bytes. 5240 */ 5241 if (ecc_bytes_total < best_ecc_bytes_total) { 5242 best_ecc_bytes_total = ecc_bytes_total; 5243 best_step = step_size; 5244 best_strength = strength; 5245 best_ecc_bytes = ecc_bytes; 5246 } 5247 } 5248 } 5249 5250 if (best_ecc_bytes_total == INT_MAX) 5251 return -ENOTSUPP; 5252 5253 chip->ecc.size = best_step; 5254 chip->ecc.strength = best_strength; 5255 chip->ecc.bytes = best_ecc_bytes; 5256 5257 return 0; 5258 } 5259 5260 /** 5261 * nand_maximize_ecc - choose the max ECC strength available 5262 * @chip: nand chip info structure 5263 * @caps: ECC engine caps info structure 5264 * @oobavail: OOB size that the ECC engine can use 5265 * 5266 * Choose the max ECC strength that is supported on the controller, and can fit 5267 * within the chip's OOB. On success, the chosen ECC settings are set. 5268 */ 5269 static int 5270 nand_maximize_ecc(struct nand_chip *chip, 5271 const struct nand_ecc_caps *caps, int oobavail) 5272 { 5273 struct mtd_info *mtd = nand_to_mtd(chip); 5274 const struct nand_ecc_step_info *stepinfo; 5275 int step_size, strength, nsteps, ecc_bytes, corr; 5276 int best_corr = 0; 5277 int best_step = 0; 5278 int best_strength, best_ecc_bytes; 5279 int i, j; 5280 5281 for (i = 0; i < caps->nstepinfos; i++) { 5282 stepinfo = &caps->stepinfos[i]; 5283 step_size = stepinfo->stepsize; 5284 5285 /* If chip->ecc.size is already set, respect it */ 5286 if (chip->ecc.size && step_size != chip->ecc.size) 5287 continue; 5288 5289 for (j = 0; j < stepinfo->nstrengths; j++) { 5290 strength = stepinfo->strengths[j]; 5291 5292 if (mtd->writesize % step_size) 5293 continue; 5294 5295 nsteps = mtd->writesize / step_size; 5296 5297 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5298 if (WARN_ON_ONCE(ecc_bytes < 0)) 5299 continue; 5300 5301 if (ecc_bytes * nsteps > oobavail) 5302 continue; 5303 5304 corr = strength * nsteps; 5305 5306 /* 5307 * If the number of correctable bits is the same, 5308 * bigger step_size has more reliability. 5309 */ 5310 if (corr > best_corr || 5311 (corr == best_corr && step_size > best_step)) { 5312 best_corr = corr; 5313 best_step = step_size; 5314 best_strength = strength; 5315 best_ecc_bytes = ecc_bytes; 5316 } 5317 } 5318 } 5319 5320 if (!best_corr) 5321 return -ENOTSUPP; 5322 5323 chip->ecc.size = best_step; 5324 chip->ecc.strength = best_strength; 5325 chip->ecc.bytes = best_ecc_bytes; 5326 5327 return 0; 5328 } 5329 5330 /** 5331 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 5332 * @chip: nand chip info structure 5333 * @caps: ECC engine caps info structure 5334 * @oobavail: OOB size that the ECC engine can use 5335 * 5336 * Choose the ECC configuration according to following logic 5337 * 5338 * 1. If both ECC step size and ECC strength are already set (usually by DT) 5339 * then check if it is supported by this controller. 5340 * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength. 5341 * 3. Otherwise, try to match the ECC step size and ECC strength closest 5342 * to the chip's requirement. If available OOB size can't fit the chip 5343 * requirement then fallback to the maximum ECC step size and ECC strength. 5344 * 5345 * On success, the chosen ECC settings are set. 5346 */ 5347 int nand_ecc_choose_conf(struct nand_chip *chip, 5348 const struct nand_ecc_caps *caps, int oobavail) 5349 { 5350 struct mtd_info *mtd = nand_to_mtd(chip); 5351 5352 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 5353 return -EINVAL; 5354 5355 if (chip->ecc.size && chip->ecc.strength) 5356 return nand_check_ecc_caps(chip, caps, oobavail); 5357 5358 if (chip->ecc.options & NAND_ECC_MAXIMIZE) 5359 return nand_maximize_ecc(chip, caps, oobavail); 5360 5361 if (!nand_match_ecc_req(chip, caps, oobavail)) 5362 return 0; 5363 5364 return nand_maximize_ecc(chip, caps, oobavail); 5365 } 5366 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 5367 5368 /* 5369 * Check if the chip configuration meet the datasheet requirements. 5370 5371 * If our configuration corrects A bits per B bytes and the minimum 5372 * required correction level is X bits per Y bytes, then we must ensure 5373 * both of the following are true: 5374 * 5375 * (1) A / B >= X / Y 5376 * (2) A >= X 5377 * 5378 * Requirement (1) ensures we can correct for the required bitflip density. 5379 * Requirement (2) ensures we can correct even when all bitflips are clumped 5380 * in the same sector. 5381 */ 5382 static bool nand_ecc_strength_good(struct mtd_info *mtd) 5383 { 5384 struct nand_chip *chip = mtd_to_nand(mtd); 5385 struct nand_ecc_ctrl *ecc = &chip->ecc; 5386 int corr, ds_corr; 5387 5388 if (ecc->size == 0 || chip->ecc_step_ds == 0) 5389 /* Not enough information */ 5390 return true; 5391 5392 /* 5393 * We get the number of corrected bits per page to compare 5394 * the correction density. 5395 */ 5396 corr = (mtd->writesize * ecc->strength) / ecc->size; 5397 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds; 5398 5399 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds; 5400 } 5401 5402 /** 5403 * nand_scan_tail - Scan for the NAND device 5404 * @chip: NAND chip object 5405 * 5406 * This is the second phase of the normal nand_scan() function. It fills out 5407 * all the uninitialized function pointers with the defaults and scans for a 5408 * bad block table if appropriate. 5409 */ 5410 static int nand_scan_tail(struct nand_chip *chip) 5411 { 5412 struct mtd_info *mtd = nand_to_mtd(chip); 5413 struct nand_ecc_ctrl *ecc = &chip->ecc; 5414 int ret, i; 5415 5416 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 5417 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 5418 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 5419 return -EINVAL; 5420 } 5421 5422 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 5423 if (!chip->data_buf) 5424 return -ENOMEM; 5425 5426 /* 5427 * FIXME: some NAND manufacturer drivers expect the first die to be 5428 * selected when manufacturer->init() is called. They should be fixed 5429 * to explictly select the relevant die when interacting with the NAND 5430 * chip. 5431 */ 5432 chip->select_chip(chip, 0); 5433 ret = nand_manufacturer_init(chip); 5434 chip->select_chip(chip, -1); 5435 if (ret) 5436 goto err_free_buf; 5437 5438 /* Set the internal oob buffer location, just after the page data */ 5439 chip->oob_poi = chip->data_buf + mtd->writesize; 5440 5441 /* 5442 * If no default placement scheme is given, select an appropriate one. 5443 */ 5444 if (!mtd->ooblayout && 5445 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) { 5446 switch (mtd->oobsize) { 5447 case 8: 5448 case 16: 5449 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops); 5450 break; 5451 case 64: 5452 case 128: 5453 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); 5454 break; 5455 default: 5456 /* 5457 * Expose the whole OOB area to users if ECC_NONE 5458 * is passed. We could do that for all kind of 5459 * ->oobsize, but we must keep the old large/small 5460 * page with ECC layout when ->oobsize <= 128 for 5461 * compatibility reasons. 5462 */ 5463 if (ecc->mode == NAND_ECC_NONE) { 5464 mtd_set_ooblayout(mtd, 5465 &nand_ooblayout_lp_ops); 5466 break; 5467 } 5468 5469 WARN(1, "No oob scheme defined for oobsize %d\n", 5470 mtd->oobsize); 5471 ret = -EINVAL; 5472 goto err_nand_manuf_cleanup; 5473 } 5474 } 5475 5476 /* 5477 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 5478 * selected and we have 256 byte pagesize fallback to software ECC 5479 */ 5480 5481 switch (ecc->mode) { 5482 case NAND_ECC_HW_OOB_FIRST: 5483 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 5484 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { 5485 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5486 ret = -EINVAL; 5487 goto err_nand_manuf_cleanup; 5488 } 5489 if (!ecc->read_page) 5490 ecc->read_page = nand_read_page_hwecc_oob_first; 5491 5492 case NAND_ECC_HW: 5493 /* Use standard hwecc read page function? */ 5494 if (!ecc->read_page) 5495 ecc->read_page = nand_read_page_hwecc; 5496 if (!ecc->write_page) 5497 ecc->write_page = nand_write_page_hwecc; 5498 if (!ecc->read_page_raw) 5499 ecc->read_page_raw = nand_read_page_raw; 5500 if (!ecc->write_page_raw) 5501 ecc->write_page_raw = nand_write_page_raw; 5502 if (!ecc->read_oob) 5503 ecc->read_oob = nand_read_oob_std; 5504 if (!ecc->write_oob) 5505 ecc->write_oob = nand_write_oob_std; 5506 if (!ecc->read_subpage) 5507 ecc->read_subpage = nand_read_subpage; 5508 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5509 ecc->write_subpage = nand_write_subpage_hwecc; 5510 5511 case NAND_ECC_HW_SYNDROME: 5512 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5513 (!ecc->read_page || 5514 ecc->read_page == nand_read_page_hwecc || 5515 !ecc->write_page || 5516 ecc->write_page == nand_write_page_hwecc)) { 5517 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5518 ret = -EINVAL; 5519 goto err_nand_manuf_cleanup; 5520 } 5521 /* Use standard syndrome read/write page function? */ 5522 if (!ecc->read_page) 5523 ecc->read_page = nand_read_page_syndrome; 5524 if (!ecc->write_page) 5525 ecc->write_page = nand_write_page_syndrome; 5526 if (!ecc->read_page_raw) 5527 ecc->read_page_raw = nand_read_page_raw_syndrome; 5528 if (!ecc->write_page_raw) 5529 ecc->write_page_raw = nand_write_page_raw_syndrome; 5530 if (!ecc->read_oob) 5531 ecc->read_oob = nand_read_oob_syndrome; 5532 if (!ecc->write_oob) 5533 ecc->write_oob = nand_write_oob_syndrome; 5534 5535 if (mtd->writesize >= ecc->size) { 5536 if (!ecc->strength) { 5537 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 5538 ret = -EINVAL; 5539 goto err_nand_manuf_cleanup; 5540 } 5541 break; 5542 } 5543 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 5544 ecc->size, mtd->writesize); 5545 ecc->mode = NAND_ECC_SOFT; 5546 ecc->algo = NAND_ECC_HAMMING; 5547 5548 case NAND_ECC_SOFT: 5549 ret = nand_set_ecc_soft_ops(mtd); 5550 if (ret) { 5551 ret = -EINVAL; 5552 goto err_nand_manuf_cleanup; 5553 } 5554 break; 5555 5556 case NAND_ECC_ON_DIE: 5557 if (!ecc->read_page || !ecc->write_page) { 5558 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 5559 ret = -EINVAL; 5560 goto err_nand_manuf_cleanup; 5561 } 5562 if (!ecc->read_oob) 5563 ecc->read_oob = nand_read_oob_std; 5564 if (!ecc->write_oob) 5565 ecc->write_oob = nand_write_oob_std; 5566 break; 5567 5568 case NAND_ECC_NONE: 5569 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n"); 5570 ecc->read_page = nand_read_page_raw; 5571 ecc->write_page = nand_write_page_raw; 5572 ecc->read_oob = nand_read_oob_std; 5573 ecc->read_page_raw = nand_read_page_raw; 5574 ecc->write_page_raw = nand_write_page_raw; 5575 ecc->write_oob = nand_write_oob_std; 5576 ecc->size = mtd->writesize; 5577 ecc->bytes = 0; 5578 ecc->strength = 0; 5579 break; 5580 5581 default: 5582 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode); 5583 ret = -EINVAL; 5584 goto err_nand_manuf_cleanup; 5585 } 5586 5587 if (ecc->correct || ecc->calculate) { 5588 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 5589 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 5590 if (!ecc->calc_buf || !ecc->code_buf) { 5591 ret = -ENOMEM; 5592 goto err_nand_manuf_cleanup; 5593 } 5594 } 5595 5596 /* For many systems, the standard OOB write also works for raw */ 5597 if (!ecc->read_oob_raw) 5598 ecc->read_oob_raw = ecc->read_oob; 5599 if (!ecc->write_oob_raw) 5600 ecc->write_oob_raw = ecc->write_oob; 5601 5602 /* propagate ecc info to mtd_info */ 5603 mtd->ecc_strength = ecc->strength; 5604 mtd->ecc_step_size = ecc->size; 5605 5606 /* 5607 * Set the number of read / write steps for one page depending on ECC 5608 * mode. 5609 */ 5610 ecc->steps = mtd->writesize / ecc->size; 5611 if (ecc->steps * ecc->size != mtd->writesize) { 5612 WARN(1, "Invalid ECC parameters\n"); 5613 ret = -EINVAL; 5614 goto err_nand_manuf_cleanup; 5615 } 5616 ecc->total = ecc->steps * ecc->bytes; 5617 if (ecc->total > mtd->oobsize) { 5618 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 5619 ret = -EINVAL; 5620 goto err_nand_manuf_cleanup; 5621 } 5622 5623 /* 5624 * The number of bytes available for a client to place data into 5625 * the out of band area. 5626 */ 5627 ret = mtd_ooblayout_count_freebytes(mtd); 5628 if (ret < 0) 5629 ret = 0; 5630 5631 mtd->oobavail = ret; 5632 5633 /* ECC sanity check: warn if it's too weak */ 5634 if (!nand_ecc_strength_good(mtd)) 5635 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", 5636 mtd->name); 5637 5638 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 5639 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 5640 switch (ecc->steps) { 5641 case 2: 5642 mtd->subpage_sft = 1; 5643 break; 5644 case 4: 5645 case 8: 5646 case 16: 5647 mtd->subpage_sft = 2; 5648 break; 5649 } 5650 } 5651 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 5652 5653 /* Initialize state */ 5654 chip->state = FL_READY; 5655 5656 /* Invalidate the pagebuffer reference */ 5657 chip->pagebuf = -1; 5658 5659 /* Large page NAND with SOFT_ECC should support subpage reads */ 5660 switch (ecc->mode) { 5661 case NAND_ECC_SOFT: 5662 if (chip->page_shift > 9) 5663 chip->options |= NAND_SUBPAGE_READ; 5664 break; 5665 5666 default: 5667 break; 5668 } 5669 5670 /* Fill in remaining MTD driver data */ 5671 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH; 5672 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 5673 MTD_CAP_NANDFLASH; 5674 mtd->_erase = nand_erase; 5675 mtd->_point = NULL; 5676 mtd->_unpoint = NULL; 5677 mtd->_panic_write = panic_nand_write; 5678 mtd->_read_oob = nand_read_oob; 5679 mtd->_write_oob = nand_write_oob; 5680 mtd->_sync = nand_sync; 5681 mtd->_lock = NULL; 5682 mtd->_unlock = NULL; 5683 mtd->_suspend = nand_suspend; 5684 mtd->_resume = nand_resume; 5685 mtd->_reboot = nand_shutdown; 5686 mtd->_block_isreserved = nand_block_isreserved; 5687 mtd->_block_isbad = nand_block_isbad; 5688 mtd->_block_markbad = nand_block_markbad; 5689 mtd->_max_bad_blocks = nand_max_bad_blocks; 5690 mtd->writebufsize = mtd->writesize; 5691 5692 /* 5693 * Initialize bitflip_threshold to its default prior scan_bbt() call. 5694 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 5695 * properly set. 5696 */ 5697 if (!mtd->bitflip_threshold) 5698 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 5699 5700 /* Initialize the ->data_interface field. */ 5701 ret = nand_init_data_interface(chip); 5702 if (ret) 5703 goto err_nand_manuf_cleanup; 5704 5705 /* Enter fastest possible mode on all dies. */ 5706 for (i = 0; i < chip->numchips; i++) { 5707 ret = nand_setup_data_interface(chip, i); 5708 if (ret) 5709 goto err_nand_manuf_cleanup; 5710 } 5711 5712 /* Check, if we should skip the bad block table scan */ 5713 if (chip->options & NAND_SKIP_BBTSCAN) 5714 return 0; 5715 5716 /* Build bad block table */ 5717 ret = nand_create_bbt(chip); 5718 if (ret) 5719 goto err_nand_manuf_cleanup; 5720 5721 return 0; 5722 5723 5724 err_nand_manuf_cleanup: 5725 nand_manufacturer_cleanup(chip); 5726 5727 err_free_buf: 5728 kfree(chip->data_buf); 5729 kfree(ecc->code_buf); 5730 kfree(ecc->calc_buf); 5731 5732 return ret; 5733 } 5734 5735 static int nand_attach(struct nand_chip *chip) 5736 { 5737 if (chip->controller->ops && chip->controller->ops->attach_chip) 5738 return chip->controller->ops->attach_chip(chip); 5739 5740 return 0; 5741 } 5742 5743 static void nand_detach(struct nand_chip *chip) 5744 { 5745 if (chip->controller->ops && chip->controller->ops->detach_chip) 5746 chip->controller->ops->detach_chip(chip); 5747 } 5748 5749 /** 5750 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 5751 * @chip: NAND chip object 5752 * @maxchips: number of chips to scan for. 5753 * @ids: optional flash IDs table 5754 * 5755 * This fills out all the uninitialized function pointers with the defaults. 5756 * The flash ID is read and the mtd/chip structures are filled with the 5757 * appropriate values. 5758 */ 5759 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 5760 struct nand_flash_dev *ids) 5761 { 5762 int ret; 5763 5764 if (!maxchips) 5765 return -EINVAL; 5766 5767 ret = nand_scan_ident(chip, maxchips, ids); 5768 if (ret) 5769 return ret; 5770 5771 ret = nand_attach(chip); 5772 if (ret) 5773 goto cleanup_ident; 5774 5775 ret = nand_scan_tail(chip); 5776 if (ret) 5777 goto detach_chip; 5778 5779 return 0; 5780 5781 detach_chip: 5782 nand_detach(chip); 5783 cleanup_ident: 5784 nand_scan_ident_cleanup(chip); 5785 5786 return ret; 5787 } 5788 EXPORT_SYMBOL(nand_scan_with_ids); 5789 5790 /** 5791 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 5792 * @chip: NAND chip object 5793 */ 5794 void nand_cleanup(struct nand_chip *chip) 5795 { 5796 if (chip->ecc.mode == NAND_ECC_SOFT && 5797 chip->ecc.algo == NAND_ECC_BCH) 5798 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 5799 5800 /* Free bad block table memory */ 5801 kfree(chip->bbt); 5802 kfree(chip->data_buf); 5803 kfree(chip->ecc.code_buf); 5804 kfree(chip->ecc.calc_buf); 5805 5806 /* Free bad block descriptor memory */ 5807 if (chip->badblock_pattern && chip->badblock_pattern->options 5808 & NAND_BBT_DYNAMICSTRUCT) 5809 kfree(chip->badblock_pattern); 5810 5811 /* Free manufacturer priv data. */ 5812 nand_manufacturer_cleanup(chip); 5813 5814 /* Free controller specific allocations after chip identification */ 5815 nand_detach(chip); 5816 5817 /* Free identification phase allocations */ 5818 nand_scan_ident_cleanup(chip); 5819 } 5820 5821 EXPORT_SYMBOL_GPL(nand_cleanup); 5822 5823 /** 5824 * nand_release - [NAND Interface] Unregister the MTD device and free resources 5825 * held by the NAND device 5826 * @chip: NAND chip object 5827 */ 5828 void nand_release(struct nand_chip *chip) 5829 { 5830 mtd_device_unregister(nand_to_mtd(chip)); 5831 nand_cleanup(chip); 5832 } 5833 EXPORT_SYMBOL_GPL(nand_release); 5834 5835 MODULE_LICENSE("GPL"); 5836 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 5837 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 5838 MODULE_DESCRIPTION("Generic NAND flash driver code"); 5839