1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 24 { 25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 26 spinand->scratchbuf); 27 int ret; 28 29 ret = spi_mem_exec_op(spinand->spimem, &op); 30 if (ret) 31 return ret; 32 33 *val = *spinand->scratchbuf; 34 return 0; 35 } 36 37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 38 { 39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 40 spinand->scratchbuf); 41 42 *spinand->scratchbuf = val; 43 return spi_mem_exec_op(spinand->spimem, &op); 44 } 45 46 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 47 { 48 return spinand_read_reg_op(spinand, REG_STATUS, status); 49 } 50 51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 52 { 53 struct nand_device *nand = spinand_to_nand(spinand); 54 55 if (WARN_ON(spinand->cur_target < 0 || 56 spinand->cur_target >= nand->memorg.ntargets)) 57 return -EINVAL; 58 59 *cfg = spinand->cfg_cache[spinand->cur_target]; 60 return 0; 61 } 62 63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 64 { 65 struct nand_device *nand = spinand_to_nand(spinand); 66 int ret; 67 68 if (WARN_ON(spinand->cur_target < 0 || 69 spinand->cur_target >= nand->memorg.ntargets)) 70 return -EINVAL; 71 72 if (spinand->cfg_cache[spinand->cur_target] == cfg) 73 return 0; 74 75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 76 if (ret) 77 return ret; 78 79 spinand->cfg_cache[spinand->cur_target] = cfg; 80 return 0; 81 } 82 83 /** 84 * spinand_upd_cfg() - Update the configuration register 85 * @spinand: the spinand device 86 * @mask: the mask encoding the bits to update in the config reg 87 * @val: the new value to apply 88 * 89 * Update the configuration register. 90 * 91 * Return: 0 on success, a negative error code otherwise. 92 */ 93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 94 { 95 int ret; 96 u8 cfg; 97 98 ret = spinand_get_cfg(spinand, &cfg); 99 if (ret) 100 return ret; 101 102 cfg &= ~mask; 103 cfg |= val; 104 105 return spinand_set_cfg(spinand, cfg); 106 } 107 108 /** 109 * spinand_select_target() - Select a specific NAND target/die 110 * @spinand: the spinand device 111 * @target: the target/die to select 112 * 113 * Select a new target/die. If chip only has one die, this function is a NOOP. 114 * 115 * Return: 0 on success, a negative error code otherwise. 116 */ 117 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 118 { 119 struct nand_device *nand = spinand_to_nand(spinand); 120 int ret; 121 122 if (WARN_ON(target >= nand->memorg.ntargets)) 123 return -EINVAL; 124 125 if (spinand->cur_target == target) 126 return 0; 127 128 if (nand->memorg.ntargets == 1) { 129 spinand->cur_target = target; 130 return 0; 131 } 132 133 ret = spinand->select_target(spinand, target); 134 if (ret) 135 return ret; 136 137 spinand->cur_target = target; 138 return 0; 139 } 140 141 static int spinand_read_cfg(struct spinand_device *spinand) 142 { 143 struct nand_device *nand = spinand_to_nand(spinand); 144 unsigned int target; 145 int ret; 146 147 for (target = 0; target < nand->memorg.ntargets; target++) { 148 ret = spinand_select_target(spinand, target); 149 if (ret) 150 return ret; 151 152 /* 153 * We use spinand_read_reg_op() instead of spinand_get_cfg() 154 * here to bypass the config cache. 155 */ 156 ret = spinand_read_reg_op(spinand, REG_CFG, 157 &spinand->cfg_cache[target]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static int spinand_init_cfg_cache(struct spinand_device *spinand) 166 { 167 struct nand_device *nand = spinand_to_nand(spinand); 168 struct device *dev = &spinand->spimem->spi->dev; 169 170 spinand->cfg_cache = devm_kcalloc(dev, 171 nand->memorg.ntargets, 172 sizeof(*spinand->cfg_cache), 173 GFP_KERNEL); 174 if (!spinand->cfg_cache) 175 return -ENOMEM; 176 177 return 0; 178 } 179 180 static int spinand_init_quad_enable(struct spinand_device *spinand) 181 { 182 bool enable = false; 183 184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 185 return 0; 186 187 if (spinand->op_templates.read_cache->data.buswidth == 4 || 188 spinand->op_templates.write_cache->data.buswidth == 4 || 189 spinand->op_templates.update_cache->data.buswidth == 4) 190 enable = true; 191 192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 193 enable ? CFG_QUAD_ENABLE : 0); 194 } 195 196 static int spinand_ecc_enable(struct spinand_device *spinand, 197 bool enable) 198 { 199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 200 enable ? CFG_ECC_ENABLE : 0); 201 } 202 203 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 204 { 205 struct nand_device *nand = spinand_to_nand(spinand); 206 207 if (spinand->eccinfo.get_status) 208 return spinand->eccinfo.get_status(spinand, status); 209 210 switch (status & STATUS_ECC_MASK) { 211 case STATUS_ECC_NO_BITFLIPS: 212 return 0; 213 214 case STATUS_ECC_HAS_BITFLIPS: 215 /* 216 * We have no way to know exactly how many bitflips have been 217 * fixed, so let's return the maximum possible value so that 218 * wear-leveling layers move the data immediately. 219 */ 220 return nanddev_get_ecc_conf(nand)->strength; 221 222 case STATUS_ECC_UNCOR_ERROR: 223 return -EBADMSG; 224 225 default: 226 break; 227 } 228 229 return -EINVAL; 230 } 231 232 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 233 struct mtd_oob_region *region) 234 { 235 return -ERANGE; 236 } 237 238 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 239 struct mtd_oob_region *region) 240 { 241 if (section) 242 return -ERANGE; 243 244 /* Reserve 2 bytes for the BBM. */ 245 region->offset = 2; 246 region->length = 62; 247 248 return 0; 249 } 250 251 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 252 .ecc = spinand_noecc_ooblayout_ecc, 253 .free = spinand_noecc_ooblayout_free, 254 }; 255 256 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) 257 { 258 struct spinand_device *spinand = nand_to_spinand(nand); 259 struct mtd_info *mtd = nanddev_to_mtd(nand); 260 struct spinand_ondie_ecc_conf *engine_conf; 261 262 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 263 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; 264 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; 265 266 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); 267 if (!engine_conf) 268 return -ENOMEM; 269 270 nand->ecc.ctx.priv = engine_conf; 271 272 if (spinand->eccinfo.ooblayout) 273 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 274 else 275 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 276 277 return 0; 278 } 279 280 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) 281 { 282 kfree(nand->ecc.ctx.priv); 283 } 284 285 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, 286 struct nand_page_io_req *req) 287 { 288 struct spinand_device *spinand = nand_to_spinand(nand); 289 bool enable = (req->mode != MTD_OPS_RAW); 290 291 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); 292 293 /* Only enable or disable the engine */ 294 return spinand_ecc_enable(spinand, enable); 295 } 296 297 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, 298 struct nand_page_io_req *req) 299 { 300 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 301 struct spinand_device *spinand = nand_to_spinand(nand); 302 struct mtd_info *mtd = spinand_to_mtd(spinand); 303 int ret; 304 305 if (req->mode == MTD_OPS_RAW) 306 return 0; 307 308 /* Nothing to do when finishing a page write */ 309 if (req->type == NAND_PAGE_WRITE) 310 return 0; 311 312 /* Finish a page read: check the status, report errors/bitflips */ 313 ret = spinand_check_ecc_status(spinand, engine_conf->status); 314 if (ret == -EBADMSG) 315 mtd->ecc_stats.failed++; 316 else if (ret > 0) 317 mtd->ecc_stats.corrected += ret; 318 319 return ret; 320 } 321 322 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { 323 .init_ctx = spinand_ondie_ecc_init_ctx, 324 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, 325 .prepare_io_req = spinand_ondie_ecc_prepare_io_req, 326 .finish_io_req = spinand_ondie_ecc_finish_io_req, 327 }; 328 329 static struct nand_ecc_engine spinand_ondie_ecc_engine = { 330 .ops = &spinand_ondie_ecc_engine_ops, 331 }; 332 333 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) 334 { 335 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 336 337 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && 338 engine_conf) 339 engine_conf->status = status; 340 } 341 342 static int spinand_write_enable_op(struct spinand_device *spinand) 343 { 344 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 345 346 return spi_mem_exec_op(spinand->spimem, &op); 347 } 348 349 static int spinand_load_page_op(struct spinand_device *spinand, 350 const struct nand_page_io_req *req) 351 { 352 struct nand_device *nand = spinand_to_nand(spinand); 353 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 354 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 355 356 return spi_mem_exec_op(spinand->spimem, &op); 357 } 358 359 static int spinand_read_from_cache_op(struct spinand_device *spinand, 360 const struct nand_page_io_req *req) 361 { 362 struct nand_device *nand = spinand_to_nand(spinand); 363 struct mtd_info *mtd = spinand_to_mtd(spinand); 364 struct spi_mem_dirmap_desc *rdesc; 365 unsigned int nbytes = 0; 366 void *buf = NULL; 367 u16 column = 0; 368 ssize_t ret; 369 370 if (req->datalen) { 371 buf = spinand->databuf; 372 nbytes = nanddev_page_size(nand); 373 column = 0; 374 } 375 376 if (req->ooblen) { 377 nbytes += nanddev_per_page_oobsize(nand); 378 if (!buf) { 379 buf = spinand->oobbuf; 380 column = nanddev_page_size(nand); 381 } 382 } 383 384 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 385 386 while (nbytes) { 387 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 388 if (ret < 0) 389 return ret; 390 391 if (!ret || ret > nbytes) 392 return -EIO; 393 394 nbytes -= ret; 395 column += ret; 396 buf += ret; 397 } 398 399 if (req->datalen) 400 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 401 req->datalen); 402 403 if (req->ooblen) { 404 if (req->mode == MTD_OPS_AUTO_OOB) 405 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 406 spinand->oobbuf, 407 req->ooboffs, 408 req->ooblen); 409 else 410 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 411 req->ooblen); 412 } 413 414 return 0; 415 } 416 417 static int spinand_write_to_cache_op(struct spinand_device *spinand, 418 const struct nand_page_io_req *req) 419 { 420 struct nand_device *nand = spinand_to_nand(spinand); 421 struct mtd_info *mtd = spinand_to_mtd(spinand); 422 struct spi_mem_dirmap_desc *wdesc; 423 unsigned int nbytes, column = 0; 424 void *buf = spinand->databuf; 425 ssize_t ret; 426 427 /* 428 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 429 * the cache content to 0xFF (depends on vendor implementation), so we 430 * must fill the page cache entirely even if we only want to program 431 * the data portion of the page, otherwise we might corrupt the BBM or 432 * user data previously programmed in OOB area. 433 * 434 * Only reset the data buffer manually, the OOB buffer is prepared by 435 * ECC engines ->prepare_io_req() callback. 436 */ 437 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 438 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); 439 440 if (req->datalen) 441 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 442 req->datalen); 443 444 if (req->ooblen) { 445 if (req->mode == MTD_OPS_AUTO_OOB) 446 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 447 spinand->oobbuf, 448 req->ooboffs, 449 req->ooblen); 450 else 451 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 452 req->ooblen); 453 } 454 455 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 456 457 while (nbytes) { 458 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 459 if (ret < 0) 460 return ret; 461 462 if (!ret || ret > nbytes) 463 return -EIO; 464 465 nbytes -= ret; 466 column += ret; 467 buf += ret; 468 } 469 470 return 0; 471 } 472 473 static int spinand_program_op(struct spinand_device *spinand, 474 const struct nand_page_io_req *req) 475 { 476 struct nand_device *nand = spinand_to_nand(spinand); 477 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 478 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 479 480 return spi_mem_exec_op(spinand->spimem, &op); 481 } 482 483 static int spinand_erase_op(struct spinand_device *spinand, 484 const struct nand_pos *pos) 485 { 486 struct nand_device *nand = spinand_to_nand(spinand); 487 unsigned int row = nanddev_pos_to_row(nand, pos); 488 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 489 490 return spi_mem_exec_op(spinand->spimem, &op); 491 } 492 493 static int spinand_wait(struct spinand_device *spinand, 494 unsigned long initial_delay_us, 495 unsigned long poll_delay_us, 496 u8 *s) 497 { 498 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, 499 spinand->scratchbuf); 500 u8 status; 501 int ret; 502 503 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, 504 initial_delay_us, 505 poll_delay_us, 506 SPINAND_WAITRDY_TIMEOUT_MS); 507 if (ret) 508 return ret; 509 510 status = *spinand->scratchbuf; 511 if (!(status & STATUS_BUSY)) 512 goto out; 513 514 /* 515 * Extra read, just in case the STATUS_READY bit has changed 516 * since our last check 517 */ 518 ret = spinand_read_status(spinand, &status); 519 if (ret) 520 return ret; 521 522 out: 523 if (s) 524 *s = status; 525 526 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 527 } 528 529 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 530 u8 ndummy, u8 *buf) 531 { 532 struct spi_mem_op op = SPINAND_READID_OP( 533 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 534 int ret; 535 536 ret = spi_mem_exec_op(spinand->spimem, &op); 537 if (!ret) 538 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 539 540 return ret; 541 } 542 543 static int spinand_reset_op(struct spinand_device *spinand) 544 { 545 struct spi_mem_op op = SPINAND_RESET_OP; 546 int ret; 547 548 ret = spi_mem_exec_op(spinand->spimem, &op); 549 if (ret) 550 return ret; 551 552 return spinand_wait(spinand, 553 SPINAND_RESET_INITIAL_DELAY_US, 554 SPINAND_RESET_POLL_DELAY_US, 555 NULL); 556 } 557 558 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 559 { 560 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 561 } 562 563 static int spinand_read_page(struct spinand_device *spinand, 564 const struct nand_page_io_req *req) 565 { 566 struct nand_device *nand = spinand_to_nand(spinand); 567 u8 status; 568 int ret; 569 570 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 571 if (ret) 572 return ret; 573 574 ret = spinand_load_page_op(spinand, req); 575 if (ret) 576 return ret; 577 578 ret = spinand_wait(spinand, 579 SPINAND_READ_INITIAL_DELAY_US, 580 SPINAND_READ_POLL_DELAY_US, 581 &status); 582 if (ret < 0) 583 return ret; 584 585 spinand_ondie_ecc_save_status(nand, status); 586 587 ret = spinand_read_from_cache_op(spinand, req); 588 if (ret) 589 return ret; 590 591 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 592 } 593 594 static int spinand_write_page(struct spinand_device *spinand, 595 const struct nand_page_io_req *req) 596 { 597 struct nand_device *nand = spinand_to_nand(spinand); 598 u8 status; 599 int ret; 600 601 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 602 if (ret) 603 return ret; 604 605 ret = spinand_write_enable_op(spinand); 606 if (ret) 607 return ret; 608 609 ret = spinand_write_to_cache_op(spinand, req); 610 if (ret) 611 return ret; 612 613 ret = spinand_program_op(spinand, req); 614 if (ret) 615 return ret; 616 617 ret = spinand_wait(spinand, 618 SPINAND_WRITE_INITIAL_DELAY_US, 619 SPINAND_WRITE_POLL_DELAY_US, 620 &status); 621 if (!ret && (status & STATUS_PROG_FAILED)) 622 return -EIO; 623 624 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 625 } 626 627 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 628 struct mtd_oob_ops *ops) 629 { 630 struct spinand_device *spinand = mtd_to_spinand(mtd); 631 struct nand_device *nand = mtd_to_nanddev(mtd); 632 unsigned int max_bitflips = 0; 633 struct nand_io_iter iter; 634 bool disable_ecc = false; 635 bool ecc_failed = false; 636 int ret = 0; 637 638 if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) 639 disable_ecc = true; 640 641 mutex_lock(&spinand->lock); 642 643 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { 644 if (disable_ecc) 645 iter.req.mode = MTD_OPS_RAW; 646 647 ret = spinand_select_target(spinand, iter.req.pos.target); 648 if (ret) 649 break; 650 651 ret = spinand_read_page(spinand, &iter.req); 652 if (ret < 0 && ret != -EBADMSG) 653 break; 654 655 if (ret == -EBADMSG) 656 ecc_failed = true; 657 else 658 max_bitflips = max_t(unsigned int, max_bitflips, ret); 659 660 ret = 0; 661 ops->retlen += iter.req.datalen; 662 ops->oobretlen += iter.req.ooblen; 663 } 664 665 mutex_unlock(&spinand->lock); 666 667 if (ecc_failed && !ret) 668 ret = -EBADMSG; 669 670 return ret ? ret : max_bitflips; 671 } 672 673 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 674 struct mtd_oob_ops *ops) 675 { 676 struct spinand_device *spinand = mtd_to_spinand(mtd); 677 struct nand_device *nand = mtd_to_nanddev(mtd); 678 struct nand_io_iter iter; 679 bool disable_ecc = false; 680 int ret = 0; 681 682 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 683 disable_ecc = true; 684 685 mutex_lock(&spinand->lock); 686 687 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { 688 if (disable_ecc) 689 iter.req.mode = MTD_OPS_RAW; 690 691 ret = spinand_select_target(spinand, iter.req.pos.target); 692 if (ret) 693 break; 694 695 ret = spinand_write_page(spinand, &iter.req); 696 if (ret) 697 break; 698 699 ops->retlen += iter.req.datalen; 700 ops->oobretlen += iter.req.ooblen; 701 } 702 703 mutex_unlock(&spinand->lock); 704 705 return ret; 706 } 707 708 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 709 { 710 struct spinand_device *spinand = nand_to_spinand(nand); 711 u8 marker[2] = { }; 712 struct nand_page_io_req req = { 713 .pos = *pos, 714 .ooblen = sizeof(marker), 715 .ooboffs = 0, 716 .oobbuf.in = marker, 717 .mode = MTD_OPS_RAW, 718 }; 719 720 spinand_select_target(spinand, pos->target); 721 spinand_read_page(spinand, &req); 722 if (marker[0] != 0xff || marker[1] != 0xff) 723 return true; 724 725 return false; 726 } 727 728 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 729 { 730 struct nand_device *nand = mtd_to_nanddev(mtd); 731 struct spinand_device *spinand = nand_to_spinand(nand); 732 struct nand_pos pos; 733 int ret; 734 735 nanddev_offs_to_pos(nand, offs, &pos); 736 mutex_lock(&spinand->lock); 737 ret = nanddev_isbad(nand, &pos); 738 mutex_unlock(&spinand->lock); 739 740 return ret; 741 } 742 743 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 744 { 745 struct spinand_device *spinand = nand_to_spinand(nand); 746 u8 marker[2] = { }; 747 struct nand_page_io_req req = { 748 .pos = *pos, 749 .ooboffs = 0, 750 .ooblen = sizeof(marker), 751 .oobbuf.out = marker, 752 .mode = MTD_OPS_RAW, 753 }; 754 int ret; 755 756 ret = spinand_select_target(spinand, pos->target); 757 if (ret) 758 return ret; 759 760 ret = spinand_write_enable_op(spinand); 761 if (ret) 762 return ret; 763 764 return spinand_write_page(spinand, &req); 765 } 766 767 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 768 { 769 struct nand_device *nand = mtd_to_nanddev(mtd); 770 struct spinand_device *spinand = nand_to_spinand(nand); 771 struct nand_pos pos; 772 int ret; 773 774 nanddev_offs_to_pos(nand, offs, &pos); 775 mutex_lock(&spinand->lock); 776 ret = nanddev_markbad(nand, &pos); 777 mutex_unlock(&spinand->lock); 778 779 return ret; 780 } 781 782 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 783 { 784 struct spinand_device *spinand = nand_to_spinand(nand); 785 u8 status; 786 int ret; 787 788 ret = spinand_select_target(spinand, pos->target); 789 if (ret) 790 return ret; 791 792 ret = spinand_write_enable_op(spinand); 793 if (ret) 794 return ret; 795 796 ret = spinand_erase_op(spinand, pos); 797 if (ret) 798 return ret; 799 800 ret = spinand_wait(spinand, 801 SPINAND_ERASE_INITIAL_DELAY_US, 802 SPINAND_ERASE_POLL_DELAY_US, 803 &status); 804 805 if (!ret && (status & STATUS_ERASE_FAILED)) 806 ret = -EIO; 807 808 return ret; 809 } 810 811 static int spinand_mtd_erase(struct mtd_info *mtd, 812 struct erase_info *einfo) 813 { 814 struct spinand_device *spinand = mtd_to_spinand(mtd); 815 int ret; 816 817 mutex_lock(&spinand->lock); 818 ret = nanddev_mtd_erase(mtd, einfo); 819 mutex_unlock(&spinand->lock); 820 821 return ret; 822 } 823 824 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 825 { 826 struct spinand_device *spinand = mtd_to_spinand(mtd); 827 struct nand_device *nand = mtd_to_nanddev(mtd); 828 struct nand_pos pos; 829 int ret; 830 831 nanddev_offs_to_pos(nand, offs, &pos); 832 mutex_lock(&spinand->lock); 833 ret = nanddev_isreserved(nand, &pos); 834 mutex_unlock(&spinand->lock); 835 836 return ret; 837 } 838 839 static int spinand_create_dirmap(struct spinand_device *spinand, 840 unsigned int plane) 841 { 842 struct nand_device *nand = spinand_to_nand(spinand); 843 struct spi_mem_dirmap_info info = { 844 .length = nanddev_page_size(nand) + 845 nanddev_per_page_oobsize(nand), 846 }; 847 struct spi_mem_dirmap_desc *desc; 848 849 /* The plane number is passed in MSB just above the column address */ 850 info.offset = plane << fls(nand->memorg.pagesize); 851 852 info.op_tmpl = *spinand->op_templates.update_cache; 853 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 854 spinand->spimem, &info); 855 if (IS_ERR(desc)) 856 return PTR_ERR(desc); 857 858 spinand->dirmaps[plane].wdesc = desc; 859 860 info.op_tmpl = *spinand->op_templates.read_cache; 861 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 862 spinand->spimem, &info); 863 if (IS_ERR(desc)) 864 return PTR_ERR(desc); 865 866 spinand->dirmaps[plane].rdesc = desc; 867 868 return 0; 869 } 870 871 static int spinand_create_dirmaps(struct spinand_device *spinand) 872 { 873 struct nand_device *nand = spinand_to_nand(spinand); 874 int i, ret; 875 876 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 877 sizeof(*spinand->dirmaps) * 878 nand->memorg.planes_per_lun, 879 GFP_KERNEL); 880 if (!spinand->dirmaps) 881 return -ENOMEM; 882 883 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 884 ret = spinand_create_dirmap(spinand, i); 885 if (ret) 886 return ret; 887 } 888 889 return 0; 890 } 891 892 static const struct nand_ops spinand_ops = { 893 .erase = spinand_erase, 894 .markbad = spinand_markbad, 895 .isbad = spinand_isbad, 896 }; 897 898 static const struct spinand_manufacturer *spinand_manufacturers[] = { 899 &gigadevice_spinand_manufacturer, 900 ¯onix_spinand_manufacturer, 901 µn_spinand_manufacturer, 902 ¶gon_spinand_manufacturer, 903 &toshiba_spinand_manufacturer, 904 &winbond_spinand_manufacturer, 905 }; 906 907 static int spinand_manufacturer_match(struct spinand_device *spinand, 908 enum spinand_readid_method rdid_method) 909 { 910 u8 *id = spinand->id.data; 911 unsigned int i; 912 int ret; 913 914 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 915 const struct spinand_manufacturer *manufacturer = 916 spinand_manufacturers[i]; 917 918 if (id[0] != manufacturer->id) 919 continue; 920 921 ret = spinand_match_and_init(spinand, 922 manufacturer->chips, 923 manufacturer->nchips, 924 rdid_method); 925 if (ret < 0) 926 continue; 927 928 spinand->manufacturer = manufacturer; 929 return 0; 930 } 931 return -ENOTSUPP; 932 } 933 934 static int spinand_id_detect(struct spinand_device *spinand) 935 { 936 u8 *id = spinand->id.data; 937 int ret; 938 939 ret = spinand_read_id_op(spinand, 0, 0, id); 940 if (ret) 941 return ret; 942 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 943 if (!ret) 944 return 0; 945 946 ret = spinand_read_id_op(spinand, 1, 0, id); 947 if (ret) 948 return ret; 949 ret = spinand_manufacturer_match(spinand, 950 SPINAND_READID_METHOD_OPCODE_ADDR); 951 if (!ret) 952 return 0; 953 954 ret = spinand_read_id_op(spinand, 0, 1, id); 955 if (ret) 956 return ret; 957 ret = spinand_manufacturer_match(spinand, 958 SPINAND_READID_METHOD_OPCODE_DUMMY); 959 960 return ret; 961 } 962 963 static int spinand_manufacturer_init(struct spinand_device *spinand) 964 { 965 if (spinand->manufacturer->ops->init) 966 return spinand->manufacturer->ops->init(spinand); 967 968 return 0; 969 } 970 971 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 972 { 973 /* Release manufacturer private data */ 974 if (spinand->manufacturer->ops->cleanup) 975 return spinand->manufacturer->ops->cleanup(spinand); 976 } 977 978 static const struct spi_mem_op * 979 spinand_select_op_variant(struct spinand_device *spinand, 980 const struct spinand_op_variants *variants) 981 { 982 struct nand_device *nand = spinand_to_nand(spinand); 983 unsigned int i; 984 985 for (i = 0; i < variants->nops; i++) { 986 struct spi_mem_op op = variants->ops[i]; 987 unsigned int nbytes; 988 int ret; 989 990 nbytes = nanddev_per_page_oobsize(nand) + 991 nanddev_page_size(nand); 992 993 while (nbytes) { 994 op.data.nbytes = nbytes; 995 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 996 if (ret) 997 break; 998 999 if (!spi_mem_supports_op(spinand->spimem, &op)) 1000 break; 1001 1002 nbytes -= op.data.nbytes; 1003 } 1004 1005 if (!nbytes) 1006 return &variants->ops[i]; 1007 } 1008 1009 return NULL; 1010 } 1011 1012 /** 1013 * spinand_match_and_init() - Try to find a match between a device ID and an 1014 * entry in a spinand_info table 1015 * @spinand: SPI NAND object 1016 * @table: SPI NAND device description table 1017 * @table_size: size of the device description table 1018 * @rdid_method: read id method to match 1019 * 1020 * Match between a device ID retrieved through the READ_ID command and an 1021 * entry in the SPI NAND description table. If a match is found, the spinand 1022 * object will be initialized with information provided by the matching 1023 * spinand_info entry. 1024 * 1025 * Return: 0 on success, a negative error code otherwise. 1026 */ 1027 int spinand_match_and_init(struct spinand_device *spinand, 1028 const struct spinand_info *table, 1029 unsigned int table_size, 1030 enum spinand_readid_method rdid_method) 1031 { 1032 u8 *id = spinand->id.data; 1033 struct nand_device *nand = spinand_to_nand(spinand); 1034 unsigned int i; 1035 1036 for (i = 0; i < table_size; i++) { 1037 const struct spinand_info *info = &table[i]; 1038 const struct spi_mem_op *op; 1039 1040 if (rdid_method != info->devid.method) 1041 continue; 1042 1043 if (memcmp(id + 1, info->devid.id, info->devid.len)) 1044 continue; 1045 1046 nand->memorg = table[i].memorg; 1047 nanddev_set_ecc_requirements(nand, &table[i].eccreq); 1048 spinand->eccinfo = table[i].eccinfo; 1049 spinand->flags = table[i].flags; 1050 spinand->id.len = 1 + table[i].devid.len; 1051 spinand->select_target = table[i].select_target; 1052 1053 op = spinand_select_op_variant(spinand, 1054 info->op_variants.read_cache); 1055 if (!op) 1056 return -ENOTSUPP; 1057 1058 spinand->op_templates.read_cache = op; 1059 1060 op = spinand_select_op_variant(spinand, 1061 info->op_variants.write_cache); 1062 if (!op) 1063 return -ENOTSUPP; 1064 1065 spinand->op_templates.write_cache = op; 1066 1067 op = spinand_select_op_variant(spinand, 1068 info->op_variants.update_cache); 1069 spinand->op_templates.update_cache = op; 1070 1071 return 0; 1072 } 1073 1074 return -ENOTSUPP; 1075 } 1076 1077 static int spinand_detect(struct spinand_device *spinand) 1078 { 1079 struct device *dev = &spinand->spimem->spi->dev; 1080 struct nand_device *nand = spinand_to_nand(spinand); 1081 int ret; 1082 1083 ret = spinand_reset_op(spinand); 1084 if (ret) 1085 return ret; 1086 1087 ret = spinand_id_detect(spinand); 1088 if (ret) { 1089 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 1090 spinand->id.data); 1091 return ret; 1092 } 1093 1094 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 1095 dev_err(dev, 1096 "SPI NANDs with more than one die must implement ->select_target()\n"); 1097 return -EINVAL; 1098 } 1099 1100 dev_info(&spinand->spimem->spi->dev, 1101 "%s SPI NAND was found.\n", spinand->manufacturer->name); 1102 dev_info(&spinand->spimem->spi->dev, 1103 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 1104 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 1105 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 1106 1107 return 0; 1108 } 1109 1110 static int spinand_init_flash(struct spinand_device *spinand) 1111 { 1112 struct device *dev = &spinand->spimem->spi->dev; 1113 struct nand_device *nand = spinand_to_nand(spinand); 1114 int ret, i; 1115 1116 ret = spinand_read_cfg(spinand); 1117 if (ret) 1118 return ret; 1119 1120 ret = spinand_init_quad_enable(spinand); 1121 if (ret) 1122 return ret; 1123 1124 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1125 if (ret) 1126 return ret; 1127 1128 ret = spinand_manufacturer_init(spinand); 1129 if (ret) { 1130 dev_err(dev, 1131 "Failed to initialize the SPI NAND chip (err = %d)\n", 1132 ret); 1133 return ret; 1134 } 1135 1136 /* After power up, all blocks are locked, so unlock them here. */ 1137 for (i = 0; i < nand->memorg.ntargets; i++) { 1138 ret = spinand_select_target(spinand, i); 1139 if (ret) 1140 break; 1141 1142 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1143 if (ret) 1144 break; 1145 } 1146 1147 if (ret) 1148 spinand_manufacturer_cleanup(spinand); 1149 1150 return ret; 1151 } 1152 1153 static void spinand_mtd_resume(struct mtd_info *mtd) 1154 { 1155 struct spinand_device *spinand = mtd_to_spinand(mtd); 1156 int ret; 1157 1158 ret = spinand_reset_op(spinand); 1159 if (ret) 1160 return; 1161 1162 ret = spinand_init_flash(spinand); 1163 if (ret) 1164 return; 1165 1166 spinand_ecc_enable(spinand, false); 1167 } 1168 1169 static int spinand_init(struct spinand_device *spinand) 1170 { 1171 struct device *dev = &spinand->spimem->spi->dev; 1172 struct mtd_info *mtd = spinand_to_mtd(spinand); 1173 struct nand_device *nand = mtd_to_nanddev(mtd); 1174 int ret; 1175 1176 /* 1177 * We need a scratch buffer because the spi_mem interface requires that 1178 * buf passed in spi_mem_op->data.buf be DMA-able. 1179 */ 1180 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1181 if (!spinand->scratchbuf) 1182 return -ENOMEM; 1183 1184 ret = spinand_detect(spinand); 1185 if (ret) 1186 goto err_free_bufs; 1187 1188 /* 1189 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1190 * may use this buffer for DMA access. 1191 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1192 */ 1193 spinand->databuf = kzalloc(nanddev_page_size(nand) + 1194 nanddev_per_page_oobsize(nand), 1195 GFP_KERNEL); 1196 if (!spinand->databuf) { 1197 ret = -ENOMEM; 1198 goto err_free_bufs; 1199 } 1200 1201 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1202 1203 ret = spinand_init_cfg_cache(spinand); 1204 if (ret) 1205 goto err_free_bufs; 1206 1207 ret = spinand_init_flash(spinand); 1208 if (ret) 1209 goto err_free_bufs; 1210 1211 ret = spinand_create_dirmaps(spinand); 1212 if (ret) { 1213 dev_err(dev, 1214 "Failed to create direct mappings for read/write operations (err = %d)\n", 1215 ret); 1216 goto err_manuf_cleanup; 1217 } 1218 1219 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1220 if (ret) 1221 goto err_manuf_cleanup; 1222 1223 /* SPI-NAND default ECC engine is on-die */ 1224 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 1225 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; 1226 1227 spinand_ecc_enable(spinand, false); 1228 ret = nanddev_ecc_engine_init(nand); 1229 if (ret) 1230 goto err_cleanup_nanddev; 1231 1232 mtd->_read_oob = spinand_mtd_read; 1233 mtd->_write_oob = spinand_mtd_write; 1234 mtd->_block_isbad = spinand_mtd_block_isbad; 1235 mtd->_block_markbad = spinand_mtd_block_markbad; 1236 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1237 mtd->_erase = spinand_mtd_erase; 1238 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1239 mtd->_resume = spinand_mtd_resume; 1240 1241 if (nand->ecc.engine) { 1242 ret = mtd_ooblayout_count_freebytes(mtd); 1243 if (ret < 0) 1244 goto err_cleanup_ecc_engine; 1245 } 1246 1247 mtd->oobavail = ret; 1248 1249 /* Propagate ECC information to mtd_info */ 1250 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; 1251 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; 1252 1253 return 0; 1254 1255 err_cleanup_ecc_engine: 1256 nanddev_ecc_engine_cleanup(nand); 1257 1258 err_cleanup_nanddev: 1259 nanddev_cleanup(nand); 1260 1261 err_manuf_cleanup: 1262 spinand_manufacturer_cleanup(spinand); 1263 1264 err_free_bufs: 1265 kfree(spinand->databuf); 1266 kfree(spinand->scratchbuf); 1267 return ret; 1268 } 1269 1270 static void spinand_cleanup(struct spinand_device *spinand) 1271 { 1272 struct nand_device *nand = spinand_to_nand(spinand); 1273 1274 nanddev_cleanup(nand); 1275 spinand_manufacturer_cleanup(spinand); 1276 kfree(spinand->databuf); 1277 kfree(spinand->scratchbuf); 1278 } 1279 1280 static int spinand_probe(struct spi_mem *mem) 1281 { 1282 struct spinand_device *spinand; 1283 struct mtd_info *mtd; 1284 int ret; 1285 1286 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1287 GFP_KERNEL); 1288 if (!spinand) 1289 return -ENOMEM; 1290 1291 spinand->spimem = mem; 1292 spi_mem_set_drvdata(mem, spinand); 1293 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1294 mutex_init(&spinand->lock); 1295 mtd = spinand_to_mtd(spinand); 1296 mtd->dev.parent = &mem->spi->dev; 1297 1298 ret = spinand_init(spinand); 1299 if (ret) 1300 return ret; 1301 1302 ret = mtd_device_register(mtd, NULL, 0); 1303 if (ret) 1304 goto err_spinand_cleanup; 1305 1306 return 0; 1307 1308 err_spinand_cleanup: 1309 spinand_cleanup(spinand); 1310 1311 return ret; 1312 } 1313 1314 static int spinand_remove(struct spi_mem *mem) 1315 { 1316 struct spinand_device *spinand; 1317 struct mtd_info *mtd; 1318 int ret; 1319 1320 spinand = spi_mem_get_drvdata(mem); 1321 mtd = spinand_to_mtd(spinand); 1322 1323 ret = mtd_device_unregister(mtd); 1324 if (ret) 1325 return ret; 1326 1327 spinand_cleanup(spinand); 1328 1329 return 0; 1330 } 1331 1332 static const struct spi_device_id spinand_ids[] = { 1333 { .name = "spi-nand" }, 1334 { /* sentinel */ }, 1335 }; 1336 MODULE_DEVICE_TABLE(spi, spinand_ids); 1337 1338 #ifdef CONFIG_OF 1339 static const struct of_device_id spinand_of_ids[] = { 1340 { .compatible = "spi-nand" }, 1341 { /* sentinel */ }, 1342 }; 1343 MODULE_DEVICE_TABLE(of, spinand_of_ids); 1344 #endif 1345 1346 static struct spi_mem_driver spinand_drv = { 1347 .spidrv = { 1348 .id_table = spinand_ids, 1349 .driver = { 1350 .name = "spi-nand", 1351 .of_match_table = of_match_ptr(spinand_of_ids), 1352 }, 1353 }, 1354 .probe = spinand_probe, 1355 .remove = spinand_remove, 1356 }; 1357 module_spi_mem_driver(spinand_drv); 1358 1359 MODULE_DESCRIPTION("SPI NAND framework"); 1360 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1361 MODULE_LICENSE("GPL v2"); 1362