1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 22 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 23 { 24 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 25 spinand->scratchbuf); 26 int ret; 27 28 ret = spi_mem_exec_op(spinand->spimem, &op); 29 if (ret) 30 return ret; 31 32 *val = *spinand->scratchbuf; 33 return 0; 34 } 35 36 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 37 { 38 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 39 spinand->scratchbuf); 40 41 *spinand->scratchbuf = val; 42 return spi_mem_exec_op(spinand->spimem, &op); 43 } 44 45 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 46 { 47 return spinand_read_reg_op(spinand, REG_STATUS, status); 48 } 49 50 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 51 { 52 struct nand_device *nand = spinand_to_nand(spinand); 53 54 if (WARN_ON(spinand->cur_target < 0 || 55 spinand->cur_target >= nand->memorg.ntargets)) 56 return -EINVAL; 57 58 *cfg = spinand->cfg_cache[spinand->cur_target]; 59 return 0; 60 } 61 62 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 63 { 64 struct nand_device *nand = spinand_to_nand(spinand); 65 int ret; 66 67 if (WARN_ON(spinand->cur_target < 0 || 68 spinand->cur_target >= nand->memorg.ntargets)) 69 return -EINVAL; 70 71 if (spinand->cfg_cache[spinand->cur_target] == cfg) 72 return 0; 73 74 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 75 if (ret) 76 return ret; 77 78 spinand->cfg_cache[spinand->cur_target] = cfg; 79 return 0; 80 } 81 82 /** 83 * spinand_upd_cfg() - Update the configuration register 84 * @spinand: the spinand device 85 * @mask: the mask encoding the bits to update in the config reg 86 * @val: the new value to apply 87 * 88 * Update the configuration register. 89 * 90 * Return: 0 on success, a negative error code otherwise. 91 */ 92 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 93 { 94 int ret; 95 u8 cfg; 96 97 ret = spinand_get_cfg(spinand, &cfg); 98 if (ret) 99 return ret; 100 101 cfg &= ~mask; 102 cfg |= val; 103 104 return spinand_set_cfg(spinand, cfg); 105 } 106 107 /** 108 * spinand_select_target() - Select a specific NAND target/die 109 * @spinand: the spinand device 110 * @target: the target/die to select 111 * 112 * Select a new target/die. If chip only has one die, this function is a NOOP. 113 * 114 * Return: 0 on success, a negative error code otherwise. 115 */ 116 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 117 { 118 struct nand_device *nand = spinand_to_nand(spinand); 119 int ret; 120 121 if (WARN_ON(target >= nand->memorg.ntargets)) 122 return -EINVAL; 123 124 if (spinand->cur_target == target) 125 return 0; 126 127 if (nand->memorg.ntargets == 1) { 128 spinand->cur_target = target; 129 return 0; 130 } 131 132 ret = spinand->select_target(spinand, target); 133 if (ret) 134 return ret; 135 136 spinand->cur_target = target; 137 return 0; 138 } 139 140 static int spinand_init_cfg_cache(struct spinand_device *spinand) 141 { 142 struct nand_device *nand = spinand_to_nand(spinand); 143 struct device *dev = &spinand->spimem->spi->dev; 144 unsigned int target; 145 int ret; 146 147 spinand->cfg_cache = devm_kcalloc(dev, 148 nand->memorg.ntargets, 149 sizeof(*spinand->cfg_cache), 150 GFP_KERNEL); 151 if (!spinand->cfg_cache) 152 return -ENOMEM; 153 154 for (target = 0; target < nand->memorg.ntargets; target++) { 155 ret = spinand_select_target(spinand, target); 156 if (ret) 157 return ret; 158 159 /* 160 * We use spinand_read_reg_op() instead of spinand_get_cfg() 161 * here to bypass the config cache. 162 */ 163 ret = spinand_read_reg_op(spinand, REG_CFG, 164 &spinand->cfg_cache[target]); 165 if (ret) 166 return ret; 167 } 168 169 return 0; 170 } 171 172 static int spinand_init_quad_enable(struct spinand_device *spinand) 173 { 174 bool enable = false; 175 176 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 177 return 0; 178 179 if (spinand->op_templates.read_cache->data.buswidth == 4 || 180 spinand->op_templates.write_cache->data.buswidth == 4 || 181 spinand->op_templates.update_cache->data.buswidth == 4) 182 enable = true; 183 184 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 185 enable ? CFG_QUAD_ENABLE : 0); 186 } 187 188 static int spinand_ecc_enable(struct spinand_device *spinand, 189 bool enable) 190 { 191 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 192 enable ? CFG_ECC_ENABLE : 0); 193 } 194 195 static int spinand_write_enable_op(struct spinand_device *spinand) 196 { 197 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 198 199 return spi_mem_exec_op(spinand->spimem, &op); 200 } 201 202 static int spinand_load_page_op(struct spinand_device *spinand, 203 const struct nand_page_io_req *req) 204 { 205 struct nand_device *nand = spinand_to_nand(spinand); 206 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 207 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 208 209 return spi_mem_exec_op(spinand->spimem, &op); 210 } 211 212 static int spinand_read_from_cache_op(struct spinand_device *spinand, 213 const struct nand_page_io_req *req) 214 { 215 struct nand_device *nand = spinand_to_nand(spinand); 216 struct mtd_info *mtd = nanddev_to_mtd(nand); 217 struct spi_mem_dirmap_desc *rdesc; 218 unsigned int nbytes = 0; 219 void *buf = NULL; 220 u16 column = 0; 221 ssize_t ret; 222 223 if (req->datalen) { 224 buf = spinand->databuf; 225 nbytes = nanddev_page_size(nand); 226 column = 0; 227 } 228 229 if (req->ooblen) { 230 nbytes += nanddev_per_page_oobsize(nand); 231 if (!buf) { 232 buf = spinand->oobbuf; 233 column = nanddev_page_size(nand); 234 } 235 } 236 237 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 238 239 while (nbytes) { 240 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 241 if (ret < 0) 242 return ret; 243 244 if (!ret || ret > nbytes) 245 return -EIO; 246 247 nbytes -= ret; 248 column += ret; 249 buf += ret; 250 } 251 252 if (req->datalen) 253 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 254 req->datalen); 255 256 if (req->ooblen) { 257 if (req->mode == MTD_OPS_AUTO_OOB) 258 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 259 spinand->oobbuf, 260 req->ooboffs, 261 req->ooblen); 262 else 263 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 264 req->ooblen); 265 } 266 267 return 0; 268 } 269 270 static int spinand_write_to_cache_op(struct spinand_device *spinand, 271 const struct nand_page_io_req *req) 272 { 273 struct nand_device *nand = spinand_to_nand(spinand); 274 struct mtd_info *mtd = nanddev_to_mtd(nand); 275 struct spi_mem_dirmap_desc *wdesc; 276 unsigned int nbytes, column = 0; 277 void *buf = spinand->databuf; 278 ssize_t ret; 279 280 /* 281 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 282 * the cache content to 0xFF (depends on vendor implementation), so we 283 * must fill the page cache entirely even if we only want to program 284 * the data portion of the page, otherwise we might corrupt the BBM or 285 * user data previously programmed in OOB area. 286 */ 287 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 288 memset(spinand->databuf, 0xff, nbytes); 289 290 if (req->datalen) 291 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 292 req->datalen); 293 294 if (req->ooblen) { 295 if (req->mode == MTD_OPS_AUTO_OOB) 296 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 297 spinand->oobbuf, 298 req->ooboffs, 299 req->ooblen); 300 else 301 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 302 req->ooblen); 303 } 304 305 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 306 307 while (nbytes) { 308 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 309 if (ret < 0) 310 return ret; 311 312 if (!ret || ret > nbytes) 313 return -EIO; 314 315 nbytes -= ret; 316 column += ret; 317 buf += ret; 318 } 319 320 return 0; 321 } 322 323 static int spinand_program_op(struct spinand_device *spinand, 324 const struct nand_page_io_req *req) 325 { 326 struct nand_device *nand = spinand_to_nand(spinand); 327 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 328 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 329 330 return spi_mem_exec_op(spinand->spimem, &op); 331 } 332 333 static int spinand_erase_op(struct spinand_device *spinand, 334 const struct nand_pos *pos) 335 { 336 struct nand_device *nand = spinand_to_nand(spinand); 337 unsigned int row = nanddev_pos_to_row(nand, pos); 338 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 339 340 return spi_mem_exec_op(spinand->spimem, &op); 341 } 342 343 static int spinand_wait(struct spinand_device *spinand, u8 *s) 344 { 345 unsigned long timeo = jiffies + msecs_to_jiffies(400); 346 u8 status; 347 int ret; 348 349 do { 350 ret = spinand_read_status(spinand, &status); 351 if (ret) 352 return ret; 353 354 if (!(status & STATUS_BUSY)) 355 goto out; 356 } while (time_before(jiffies, timeo)); 357 358 /* 359 * Extra read, just in case the STATUS_READY bit has changed 360 * since our last check 361 */ 362 ret = spinand_read_status(spinand, &status); 363 if (ret) 364 return ret; 365 366 out: 367 if (s) 368 *s = status; 369 370 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 371 } 372 373 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf) 374 { 375 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf, 376 SPINAND_MAX_ID_LEN); 377 int ret; 378 379 ret = spi_mem_exec_op(spinand->spimem, &op); 380 if (!ret) 381 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 382 383 return ret; 384 } 385 386 static int spinand_reset_op(struct spinand_device *spinand) 387 { 388 struct spi_mem_op op = SPINAND_RESET_OP; 389 int ret; 390 391 ret = spi_mem_exec_op(spinand->spimem, &op); 392 if (ret) 393 return ret; 394 395 return spinand_wait(spinand, NULL); 396 } 397 398 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 399 { 400 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 401 } 402 403 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 404 { 405 struct nand_device *nand = spinand_to_nand(spinand); 406 407 if (spinand->eccinfo.get_status) 408 return spinand->eccinfo.get_status(spinand, status); 409 410 switch (status & STATUS_ECC_MASK) { 411 case STATUS_ECC_NO_BITFLIPS: 412 return 0; 413 414 case STATUS_ECC_HAS_BITFLIPS: 415 /* 416 * We have no way to know exactly how many bitflips have been 417 * fixed, so let's return the maximum possible value so that 418 * wear-leveling layers move the data immediately. 419 */ 420 return nand->eccreq.strength; 421 422 case STATUS_ECC_UNCOR_ERROR: 423 return -EBADMSG; 424 425 default: 426 break; 427 } 428 429 return -EINVAL; 430 } 431 432 static int spinand_read_page(struct spinand_device *spinand, 433 const struct nand_page_io_req *req, 434 bool ecc_enabled) 435 { 436 u8 status; 437 int ret; 438 439 ret = spinand_load_page_op(spinand, req); 440 if (ret) 441 return ret; 442 443 ret = spinand_wait(spinand, &status); 444 if (ret < 0) 445 return ret; 446 447 ret = spinand_read_from_cache_op(spinand, req); 448 if (ret) 449 return ret; 450 451 if (!ecc_enabled) 452 return 0; 453 454 return spinand_check_ecc_status(spinand, status); 455 } 456 457 static int spinand_write_page(struct spinand_device *spinand, 458 const struct nand_page_io_req *req) 459 { 460 u8 status; 461 int ret; 462 463 ret = spinand_write_enable_op(spinand); 464 if (ret) 465 return ret; 466 467 ret = spinand_write_to_cache_op(spinand, req); 468 if (ret) 469 return ret; 470 471 ret = spinand_program_op(spinand, req); 472 if (ret) 473 return ret; 474 475 ret = spinand_wait(spinand, &status); 476 if (!ret && (status & STATUS_PROG_FAILED)) 477 ret = -EIO; 478 479 return ret; 480 } 481 482 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 483 struct mtd_oob_ops *ops) 484 { 485 struct spinand_device *spinand = mtd_to_spinand(mtd); 486 struct nand_device *nand = mtd_to_nanddev(mtd); 487 unsigned int max_bitflips = 0; 488 struct nand_io_iter iter; 489 bool enable_ecc = false; 490 bool ecc_failed = false; 491 int ret = 0; 492 493 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) 494 enable_ecc = true; 495 496 mutex_lock(&spinand->lock); 497 498 nanddev_io_for_each_page(nand, from, ops, &iter) { 499 ret = spinand_select_target(spinand, iter.req.pos.target); 500 if (ret) 501 break; 502 503 ret = spinand_ecc_enable(spinand, enable_ecc); 504 if (ret) 505 break; 506 507 ret = spinand_read_page(spinand, &iter.req, enable_ecc); 508 if (ret < 0 && ret != -EBADMSG) 509 break; 510 511 if (ret == -EBADMSG) { 512 ecc_failed = true; 513 mtd->ecc_stats.failed++; 514 ret = 0; 515 } else { 516 mtd->ecc_stats.corrected += ret; 517 max_bitflips = max_t(unsigned int, max_bitflips, ret); 518 } 519 520 ops->retlen += iter.req.datalen; 521 ops->oobretlen += iter.req.ooblen; 522 } 523 524 mutex_unlock(&spinand->lock); 525 526 if (ecc_failed && !ret) 527 ret = -EBADMSG; 528 529 return ret ? ret : max_bitflips; 530 } 531 532 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 533 struct mtd_oob_ops *ops) 534 { 535 struct spinand_device *spinand = mtd_to_spinand(mtd); 536 struct nand_device *nand = mtd_to_nanddev(mtd); 537 struct nand_io_iter iter; 538 bool enable_ecc = false; 539 int ret = 0; 540 541 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) 542 enable_ecc = true; 543 544 mutex_lock(&spinand->lock); 545 546 nanddev_io_for_each_page(nand, to, ops, &iter) { 547 ret = spinand_select_target(spinand, iter.req.pos.target); 548 if (ret) 549 break; 550 551 ret = spinand_ecc_enable(spinand, enable_ecc); 552 if (ret) 553 break; 554 555 ret = spinand_write_page(spinand, &iter.req); 556 if (ret) 557 break; 558 559 ops->retlen += iter.req.datalen; 560 ops->oobretlen += iter.req.ooblen; 561 } 562 563 mutex_unlock(&spinand->lock); 564 565 return ret; 566 } 567 568 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 569 { 570 struct spinand_device *spinand = nand_to_spinand(nand); 571 struct nand_page_io_req req = { 572 .pos = *pos, 573 .ooblen = 2, 574 .ooboffs = 0, 575 .oobbuf.in = spinand->oobbuf, 576 .mode = MTD_OPS_RAW, 577 }; 578 579 memset(spinand->oobbuf, 0, 2); 580 spinand_select_target(spinand, pos->target); 581 spinand_read_page(spinand, &req, false); 582 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff) 583 return true; 584 585 return false; 586 } 587 588 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 589 { 590 struct nand_device *nand = mtd_to_nanddev(mtd); 591 struct spinand_device *spinand = nand_to_spinand(nand); 592 struct nand_pos pos; 593 int ret; 594 595 nanddev_offs_to_pos(nand, offs, &pos); 596 mutex_lock(&spinand->lock); 597 ret = nanddev_isbad(nand, &pos); 598 mutex_unlock(&spinand->lock); 599 600 return ret; 601 } 602 603 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 604 { 605 struct spinand_device *spinand = nand_to_spinand(nand); 606 struct nand_page_io_req req = { 607 .pos = *pos, 608 .ooboffs = 0, 609 .ooblen = 2, 610 .oobbuf.out = spinand->oobbuf, 611 }; 612 int ret; 613 614 /* Erase block before marking it bad. */ 615 ret = spinand_select_target(spinand, pos->target); 616 if (ret) 617 return ret; 618 619 ret = spinand_write_enable_op(spinand); 620 if (ret) 621 return ret; 622 623 spinand_erase_op(spinand, pos); 624 625 memset(spinand->oobbuf, 0, 2); 626 return spinand_write_page(spinand, &req); 627 } 628 629 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 630 { 631 struct nand_device *nand = mtd_to_nanddev(mtd); 632 struct spinand_device *spinand = nand_to_spinand(nand); 633 struct nand_pos pos; 634 int ret; 635 636 nanddev_offs_to_pos(nand, offs, &pos); 637 mutex_lock(&spinand->lock); 638 ret = nanddev_markbad(nand, &pos); 639 mutex_unlock(&spinand->lock); 640 641 return ret; 642 } 643 644 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 645 { 646 struct spinand_device *spinand = nand_to_spinand(nand); 647 u8 status; 648 int ret; 649 650 ret = spinand_select_target(spinand, pos->target); 651 if (ret) 652 return ret; 653 654 ret = spinand_write_enable_op(spinand); 655 if (ret) 656 return ret; 657 658 ret = spinand_erase_op(spinand, pos); 659 if (ret) 660 return ret; 661 662 ret = spinand_wait(spinand, &status); 663 if (!ret && (status & STATUS_ERASE_FAILED)) 664 ret = -EIO; 665 666 return ret; 667 } 668 669 static int spinand_mtd_erase(struct mtd_info *mtd, 670 struct erase_info *einfo) 671 { 672 struct spinand_device *spinand = mtd_to_spinand(mtd); 673 int ret; 674 675 mutex_lock(&spinand->lock); 676 ret = nanddev_mtd_erase(mtd, einfo); 677 mutex_unlock(&spinand->lock); 678 679 return ret; 680 } 681 682 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 683 { 684 struct spinand_device *spinand = mtd_to_spinand(mtd); 685 struct nand_device *nand = mtd_to_nanddev(mtd); 686 struct nand_pos pos; 687 int ret; 688 689 nanddev_offs_to_pos(nand, offs, &pos); 690 mutex_lock(&spinand->lock); 691 ret = nanddev_isreserved(nand, &pos); 692 mutex_unlock(&spinand->lock); 693 694 return ret; 695 } 696 697 static int spinand_create_dirmap(struct spinand_device *spinand, 698 unsigned int plane) 699 { 700 struct nand_device *nand = spinand_to_nand(spinand); 701 struct spi_mem_dirmap_info info = { 702 .length = nanddev_page_size(nand) + 703 nanddev_per_page_oobsize(nand), 704 }; 705 struct spi_mem_dirmap_desc *desc; 706 707 /* The plane number is passed in MSB just above the column address */ 708 info.offset = plane << fls(nand->memorg.pagesize); 709 710 info.op_tmpl = *spinand->op_templates.update_cache; 711 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 712 spinand->spimem, &info); 713 if (IS_ERR(desc)) 714 return PTR_ERR(desc); 715 716 spinand->dirmaps[plane].wdesc = desc; 717 718 info.op_tmpl = *spinand->op_templates.read_cache; 719 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 720 spinand->spimem, &info); 721 if (IS_ERR(desc)) 722 return PTR_ERR(desc); 723 724 spinand->dirmaps[plane].rdesc = desc; 725 726 return 0; 727 } 728 729 static int spinand_create_dirmaps(struct spinand_device *spinand) 730 { 731 struct nand_device *nand = spinand_to_nand(spinand); 732 int i, ret; 733 734 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 735 sizeof(*spinand->dirmaps) * 736 nand->memorg.planes_per_lun, 737 GFP_KERNEL); 738 if (!spinand->dirmaps) 739 return -ENOMEM; 740 741 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 742 ret = spinand_create_dirmap(spinand, i); 743 if (ret) 744 return ret; 745 } 746 747 return 0; 748 } 749 750 static const struct nand_ops spinand_ops = { 751 .erase = spinand_erase, 752 .markbad = spinand_markbad, 753 .isbad = spinand_isbad, 754 }; 755 756 static const struct spinand_manufacturer *spinand_manufacturers[] = { 757 &gigadevice_spinand_manufacturer, 758 ¯onix_spinand_manufacturer, 759 µn_spinand_manufacturer, 760 &toshiba_spinand_manufacturer, 761 &winbond_spinand_manufacturer, 762 }; 763 764 static int spinand_manufacturer_detect(struct spinand_device *spinand) 765 { 766 unsigned int i; 767 int ret; 768 769 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 770 ret = spinand_manufacturers[i]->ops->detect(spinand); 771 if (ret > 0) { 772 spinand->manufacturer = spinand_manufacturers[i]; 773 return 0; 774 } else if (ret < 0) { 775 return ret; 776 } 777 } 778 779 return -ENOTSUPP; 780 } 781 782 static int spinand_manufacturer_init(struct spinand_device *spinand) 783 { 784 if (spinand->manufacturer->ops->init) 785 return spinand->manufacturer->ops->init(spinand); 786 787 return 0; 788 } 789 790 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 791 { 792 /* Release manufacturer private data */ 793 if (spinand->manufacturer->ops->cleanup) 794 return spinand->manufacturer->ops->cleanup(spinand); 795 } 796 797 static const struct spi_mem_op * 798 spinand_select_op_variant(struct spinand_device *spinand, 799 const struct spinand_op_variants *variants) 800 { 801 struct nand_device *nand = spinand_to_nand(spinand); 802 unsigned int i; 803 804 for (i = 0; i < variants->nops; i++) { 805 struct spi_mem_op op = variants->ops[i]; 806 unsigned int nbytes; 807 int ret; 808 809 nbytes = nanddev_per_page_oobsize(nand) + 810 nanddev_page_size(nand); 811 812 while (nbytes) { 813 op.data.nbytes = nbytes; 814 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 815 if (ret) 816 break; 817 818 if (!spi_mem_supports_op(spinand->spimem, &op)) 819 break; 820 821 nbytes -= op.data.nbytes; 822 } 823 824 if (!nbytes) 825 return &variants->ops[i]; 826 } 827 828 return NULL; 829 } 830 831 /** 832 * spinand_match_and_init() - Try to find a match between a device ID and an 833 * entry in a spinand_info table 834 * @spinand: SPI NAND object 835 * @table: SPI NAND device description table 836 * @table_size: size of the device description table 837 * 838 * Should be used by SPI NAND manufacturer drivers when they want to find a 839 * match between a device ID retrieved through the READ_ID command and an 840 * entry in the SPI NAND description table. If a match is found, the spinand 841 * object will be initialized with information provided by the matching 842 * spinand_info entry. 843 * 844 * Return: 0 on success, a negative error code otherwise. 845 */ 846 int spinand_match_and_init(struct spinand_device *spinand, 847 const struct spinand_info *table, 848 unsigned int table_size, u8 devid) 849 { 850 struct nand_device *nand = spinand_to_nand(spinand); 851 unsigned int i; 852 853 for (i = 0; i < table_size; i++) { 854 const struct spinand_info *info = &table[i]; 855 const struct spi_mem_op *op; 856 857 if (devid != info->devid) 858 continue; 859 860 nand->memorg = table[i].memorg; 861 nand->eccreq = table[i].eccreq; 862 spinand->eccinfo = table[i].eccinfo; 863 spinand->flags = table[i].flags; 864 spinand->select_target = table[i].select_target; 865 866 op = spinand_select_op_variant(spinand, 867 info->op_variants.read_cache); 868 if (!op) 869 return -ENOTSUPP; 870 871 spinand->op_templates.read_cache = op; 872 873 op = spinand_select_op_variant(spinand, 874 info->op_variants.write_cache); 875 if (!op) 876 return -ENOTSUPP; 877 878 spinand->op_templates.write_cache = op; 879 880 op = spinand_select_op_variant(spinand, 881 info->op_variants.update_cache); 882 spinand->op_templates.update_cache = op; 883 884 return 0; 885 } 886 887 return -ENOTSUPP; 888 } 889 890 static int spinand_detect(struct spinand_device *spinand) 891 { 892 struct device *dev = &spinand->spimem->spi->dev; 893 struct nand_device *nand = spinand_to_nand(spinand); 894 int ret; 895 896 ret = spinand_reset_op(spinand); 897 if (ret) 898 return ret; 899 900 ret = spinand_read_id_op(spinand, spinand->id.data); 901 if (ret) 902 return ret; 903 904 spinand->id.len = SPINAND_MAX_ID_LEN; 905 906 ret = spinand_manufacturer_detect(spinand); 907 if (ret) { 908 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 909 spinand->id.data); 910 return ret; 911 } 912 913 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 914 dev_err(dev, 915 "SPI NANDs with more than one die must implement ->select_target()\n"); 916 return -EINVAL; 917 } 918 919 dev_info(&spinand->spimem->spi->dev, 920 "%s SPI NAND was found.\n", spinand->manufacturer->name); 921 dev_info(&spinand->spimem->spi->dev, 922 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 923 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 924 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 925 926 return 0; 927 } 928 929 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 930 struct mtd_oob_region *region) 931 { 932 return -ERANGE; 933 } 934 935 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 936 struct mtd_oob_region *region) 937 { 938 if (section) 939 return -ERANGE; 940 941 /* Reserve 2 bytes for the BBM. */ 942 region->offset = 2; 943 region->length = 62; 944 945 return 0; 946 } 947 948 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 949 .ecc = spinand_noecc_ooblayout_ecc, 950 .free = spinand_noecc_ooblayout_free, 951 }; 952 953 static int spinand_init(struct spinand_device *spinand) 954 { 955 struct device *dev = &spinand->spimem->spi->dev; 956 struct mtd_info *mtd = spinand_to_mtd(spinand); 957 struct nand_device *nand = mtd_to_nanddev(mtd); 958 int ret, i; 959 960 /* 961 * We need a scratch buffer because the spi_mem interface requires that 962 * buf passed in spi_mem_op->data.buf be DMA-able. 963 */ 964 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 965 if (!spinand->scratchbuf) 966 return -ENOMEM; 967 968 ret = spinand_detect(spinand); 969 if (ret) 970 goto err_free_bufs; 971 972 /* 973 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 974 * may use this buffer for DMA access. 975 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 976 */ 977 spinand->databuf = kzalloc(nanddev_page_size(nand) + 978 nanddev_per_page_oobsize(nand), 979 GFP_KERNEL); 980 if (!spinand->databuf) { 981 ret = -ENOMEM; 982 goto err_free_bufs; 983 } 984 985 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 986 987 ret = spinand_init_cfg_cache(spinand); 988 if (ret) 989 goto err_free_bufs; 990 991 ret = spinand_init_quad_enable(spinand); 992 if (ret) 993 goto err_free_bufs; 994 995 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 996 if (ret) 997 goto err_free_bufs; 998 999 ret = spinand_manufacturer_init(spinand); 1000 if (ret) { 1001 dev_err(dev, 1002 "Failed to initialize the SPI NAND chip (err = %d)\n", 1003 ret); 1004 goto err_free_bufs; 1005 } 1006 1007 ret = spinand_create_dirmaps(spinand); 1008 if (ret) { 1009 dev_err(dev, 1010 "Failed to create direct mappings for read/write operations (err = %d)\n", 1011 ret); 1012 goto err_manuf_cleanup; 1013 } 1014 1015 /* After power up, all blocks are locked, so unlock them here. */ 1016 for (i = 0; i < nand->memorg.ntargets; i++) { 1017 ret = spinand_select_target(spinand, i); 1018 if (ret) 1019 goto err_manuf_cleanup; 1020 1021 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1022 if (ret) 1023 goto err_manuf_cleanup; 1024 } 1025 1026 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1027 if (ret) 1028 goto err_manuf_cleanup; 1029 1030 /* 1031 * Right now, we don't support ECC, so let the whole oob 1032 * area is available for user. 1033 */ 1034 mtd->_read_oob = spinand_mtd_read; 1035 mtd->_write_oob = spinand_mtd_write; 1036 mtd->_block_isbad = spinand_mtd_block_isbad; 1037 mtd->_block_markbad = spinand_mtd_block_markbad; 1038 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1039 mtd->_erase = spinand_mtd_erase; 1040 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1041 1042 if (spinand->eccinfo.ooblayout) 1043 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 1044 else 1045 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 1046 1047 ret = mtd_ooblayout_count_freebytes(mtd); 1048 if (ret < 0) 1049 goto err_cleanup_nanddev; 1050 1051 mtd->oobavail = ret; 1052 1053 return 0; 1054 1055 err_cleanup_nanddev: 1056 nanddev_cleanup(nand); 1057 1058 err_manuf_cleanup: 1059 spinand_manufacturer_cleanup(spinand); 1060 1061 err_free_bufs: 1062 kfree(spinand->databuf); 1063 kfree(spinand->scratchbuf); 1064 return ret; 1065 } 1066 1067 static void spinand_cleanup(struct spinand_device *spinand) 1068 { 1069 struct nand_device *nand = spinand_to_nand(spinand); 1070 1071 nanddev_cleanup(nand); 1072 spinand_manufacturer_cleanup(spinand); 1073 kfree(spinand->databuf); 1074 kfree(spinand->scratchbuf); 1075 } 1076 1077 static int spinand_probe(struct spi_mem *mem) 1078 { 1079 struct spinand_device *spinand; 1080 struct mtd_info *mtd; 1081 int ret; 1082 1083 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1084 GFP_KERNEL); 1085 if (!spinand) 1086 return -ENOMEM; 1087 1088 spinand->spimem = mem; 1089 spi_mem_set_drvdata(mem, spinand); 1090 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1091 mutex_init(&spinand->lock); 1092 mtd = spinand_to_mtd(spinand); 1093 mtd->dev.parent = &mem->spi->dev; 1094 1095 ret = spinand_init(spinand); 1096 if (ret) 1097 return ret; 1098 1099 ret = mtd_device_register(mtd, NULL, 0); 1100 if (ret) 1101 goto err_spinand_cleanup; 1102 1103 return 0; 1104 1105 err_spinand_cleanup: 1106 spinand_cleanup(spinand); 1107 1108 return ret; 1109 } 1110 1111 static int spinand_remove(struct spi_mem *mem) 1112 { 1113 struct spinand_device *spinand; 1114 struct mtd_info *mtd; 1115 int ret; 1116 1117 spinand = spi_mem_get_drvdata(mem); 1118 mtd = spinand_to_mtd(spinand); 1119 1120 ret = mtd_device_unregister(mtd); 1121 if (ret) 1122 return ret; 1123 1124 spinand_cleanup(spinand); 1125 1126 return 0; 1127 } 1128 1129 static const struct spi_device_id spinand_ids[] = { 1130 { .name = "spi-nand" }, 1131 { /* sentinel */ }, 1132 }; 1133 1134 #ifdef CONFIG_OF 1135 static const struct of_device_id spinand_of_ids[] = { 1136 { .compatible = "spi-nand" }, 1137 { /* sentinel */ }, 1138 }; 1139 #endif 1140 1141 static struct spi_mem_driver spinand_drv = { 1142 .spidrv = { 1143 .id_table = spinand_ids, 1144 .driver = { 1145 .name = "spi-nand", 1146 .of_match_table = of_match_ptr(spinand_of_ids), 1147 }, 1148 }, 1149 .probe = spinand_probe, 1150 .remove = spinand_remove, 1151 }; 1152 module_spi_mem_driver(spinand_drv); 1153 1154 MODULE_DESCRIPTION("SPI NAND framework"); 1155 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1156 MODULE_LICENSE("GPL v2"); 1157