1 /* 2 * SuperH FLCTL nand controller 3 * 4 * Copyright (c) 2008 Renesas Solutions Corp. 5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd. 6 * 7 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/completion.h> 27 #include <linux/delay.h> 28 #include <linux/dmaengine.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/interrupt.h> 31 #include <linux/io.h> 32 #include <linux/of.h> 33 #include <linux/of_device.h> 34 #include <linux/platform_device.h> 35 #include <linux/pm_runtime.h> 36 #include <linux/sh_dma.h> 37 #include <linux/slab.h> 38 #include <linux/string.h> 39 40 #include <linux/mtd/mtd.h> 41 #include <linux/mtd/rawnand.h> 42 #include <linux/mtd/partitions.h> 43 #include <linux/mtd/sh_flctl.h> 44 45 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, 46 struct mtd_oob_region *oobregion) 47 { 48 struct nand_chip *chip = mtd_to_nand(mtd); 49 50 if (section) 51 return -ERANGE; 52 53 oobregion->offset = 0; 54 oobregion->length = chip->ecc.bytes; 55 56 return 0; 57 } 58 59 static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, 60 struct mtd_oob_region *oobregion) 61 { 62 if (section) 63 return -ERANGE; 64 65 oobregion->offset = 12; 66 oobregion->length = 4; 67 68 return 0; 69 } 70 71 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { 72 .ecc = flctl_4secc_ooblayout_sp_ecc, 73 .free = flctl_4secc_ooblayout_sp_free, 74 }; 75 76 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, 77 struct mtd_oob_region *oobregion) 78 { 79 struct nand_chip *chip = mtd_to_nand(mtd); 80 81 if (section >= chip->ecc.steps) 82 return -ERANGE; 83 84 oobregion->offset = (section * 16) + 6; 85 oobregion->length = chip->ecc.bytes; 86 87 return 0; 88 } 89 90 static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, 91 struct mtd_oob_region *oobregion) 92 { 93 struct nand_chip *chip = mtd_to_nand(mtd); 94 95 if (section >= chip->ecc.steps) 96 return -ERANGE; 97 98 oobregion->offset = section * 16; 99 oobregion->length = 6; 100 101 if (!section) { 102 oobregion->offset += 2; 103 oobregion->length -= 2; 104 } 105 106 return 0; 107 } 108 109 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { 110 .ecc = flctl_4secc_ooblayout_lp_ecc, 111 .free = flctl_4secc_ooblayout_lp_free, 112 }; 113 114 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 115 116 static struct nand_bbt_descr flctl_4secc_smallpage = { 117 .options = NAND_BBT_SCAN2NDPAGE, 118 .offs = 11, 119 .len = 1, 120 .pattern = scan_ff_pattern, 121 }; 122 123 static struct nand_bbt_descr flctl_4secc_largepage = { 124 .options = NAND_BBT_SCAN2NDPAGE, 125 .offs = 0, 126 .len = 2, 127 .pattern = scan_ff_pattern, 128 }; 129 130 static void empty_fifo(struct sh_flctl *flctl) 131 { 132 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); 133 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 134 } 135 136 static void start_translation(struct sh_flctl *flctl) 137 { 138 writeb(TRSTRT, FLTRCR(flctl)); 139 } 140 141 static void timeout_error(struct sh_flctl *flctl, const char *str) 142 { 143 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str); 144 } 145 146 static void wait_completion(struct sh_flctl *flctl) 147 { 148 uint32_t timeout = LOOP_TIMEOUT_MAX; 149 150 while (timeout--) { 151 if (readb(FLTRCR(flctl)) & TREND) { 152 writeb(0x0, FLTRCR(flctl)); 153 return; 154 } 155 udelay(1); 156 } 157 158 timeout_error(flctl, __func__); 159 writeb(0x0, FLTRCR(flctl)); 160 } 161 162 static void flctl_dma_complete(void *param) 163 { 164 struct sh_flctl *flctl = param; 165 166 complete(&flctl->dma_complete); 167 } 168 169 static void flctl_release_dma(struct sh_flctl *flctl) 170 { 171 if (flctl->chan_fifo0_rx) { 172 dma_release_channel(flctl->chan_fifo0_rx); 173 flctl->chan_fifo0_rx = NULL; 174 } 175 if (flctl->chan_fifo0_tx) { 176 dma_release_channel(flctl->chan_fifo0_tx); 177 flctl->chan_fifo0_tx = NULL; 178 } 179 } 180 181 static void flctl_setup_dma(struct sh_flctl *flctl) 182 { 183 dma_cap_mask_t mask; 184 struct dma_slave_config cfg; 185 struct platform_device *pdev = flctl->pdev; 186 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev); 187 int ret; 188 189 if (!pdata) 190 return; 191 192 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) 193 return; 194 195 /* We can only either use DMA for both Tx and Rx or not use it at all */ 196 dma_cap_zero(mask); 197 dma_cap_set(DMA_SLAVE, mask); 198 199 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, 200 (void *)(uintptr_t)pdata->slave_id_fifo0_tx); 201 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, 202 flctl->chan_fifo0_tx); 203 204 if (!flctl->chan_fifo0_tx) 205 return; 206 207 memset(&cfg, 0, sizeof(cfg)); 208 cfg.direction = DMA_MEM_TO_DEV; 209 cfg.dst_addr = flctl->fifo; 210 cfg.src_addr = 0; 211 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 212 if (ret < 0) 213 goto err; 214 215 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, 216 (void *)(uintptr_t)pdata->slave_id_fifo0_rx); 217 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, 218 flctl->chan_fifo0_rx); 219 220 if (!flctl->chan_fifo0_rx) 221 goto err; 222 223 cfg.direction = DMA_DEV_TO_MEM; 224 cfg.dst_addr = 0; 225 cfg.src_addr = flctl->fifo; 226 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 227 if (ret < 0) 228 goto err; 229 230 init_completion(&flctl->dma_complete); 231 232 return; 233 234 err: 235 flctl_release_dma(flctl); 236 } 237 238 static void set_addr(struct mtd_info *mtd, int column, int page_addr) 239 { 240 struct sh_flctl *flctl = mtd_to_flctl(mtd); 241 uint32_t addr = 0; 242 243 if (column == -1) { 244 addr = page_addr; /* ERASE1 */ 245 } else if (page_addr != -1) { 246 /* SEQIN, READ0, etc.. */ 247 if (flctl->chip.options & NAND_BUSWIDTH_16) 248 column >>= 1; 249 if (flctl->page_size) { 250 addr = column & 0x0FFF; 251 addr |= (page_addr & 0xff) << 16; 252 addr |= ((page_addr >> 8) & 0xff) << 24; 253 /* big than 128MB */ 254 if (flctl->rw_ADRCNT == ADRCNT2_E) { 255 uint32_t addr2; 256 addr2 = (page_addr >> 16) & 0xff; 257 writel(addr2, FLADR2(flctl)); 258 } 259 } else { 260 addr = column; 261 addr |= (page_addr & 0xff) << 8; 262 addr |= ((page_addr >> 8) & 0xff) << 16; 263 addr |= ((page_addr >> 16) & 0xff) << 24; 264 } 265 } 266 writel(addr, FLADR(flctl)); 267 } 268 269 static void wait_rfifo_ready(struct sh_flctl *flctl) 270 { 271 uint32_t timeout = LOOP_TIMEOUT_MAX; 272 273 while (timeout--) { 274 uint32_t val; 275 /* check FIFO */ 276 val = readl(FLDTCNTR(flctl)) >> 16; 277 if (val & 0xFF) 278 return; 279 udelay(1); 280 } 281 timeout_error(flctl, __func__); 282 } 283 284 static void wait_wfifo_ready(struct sh_flctl *flctl) 285 { 286 uint32_t len, timeout = LOOP_TIMEOUT_MAX; 287 288 while (timeout--) { 289 /* check FIFO */ 290 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF; 291 if (len >= 4) 292 return; 293 udelay(1); 294 } 295 timeout_error(flctl, __func__); 296 } 297 298 static enum flctl_ecc_res_t wait_recfifo_ready 299 (struct sh_flctl *flctl, int sector_number) 300 { 301 uint32_t timeout = LOOP_TIMEOUT_MAX; 302 void __iomem *ecc_reg[4]; 303 int i; 304 int state = FL_SUCCESS; 305 uint32_t data, size; 306 307 /* 308 * First this loops checks in FLDTCNTR if we are ready to read out the 309 * oob data. This is the case if either all went fine without errors or 310 * if the bottom part of the loop corrected the errors or marked them as 311 * uncorrectable and the controller is given time to push the data into 312 * the FIFO. 313 */ 314 while (timeout--) { 315 /* check if all is ok and we can read out the OOB */ 316 size = readl(FLDTCNTR(flctl)) >> 24; 317 if ((size & 0xFF) == 4) 318 return state; 319 320 /* check if a correction code has been calculated */ 321 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) { 322 /* 323 * either we wait for the fifo to be filled or a 324 * correction pattern is being generated 325 */ 326 udelay(1); 327 continue; 328 } 329 330 /* check for an uncorrectable error */ 331 if (readl(FL4ECCCR(flctl)) & _4ECCFA) { 332 /* check if we face a non-empty page */ 333 for (i = 0; i < 512; i++) { 334 if (flctl->done_buff[i] != 0xff) { 335 state = FL_ERROR; /* can't correct */ 336 break; 337 } 338 } 339 340 if (state == FL_SUCCESS) 341 dev_dbg(&flctl->pdev->dev, 342 "reading empty sector %d, ecc error ignored\n", 343 sector_number); 344 345 writel(0, FL4ECCCR(flctl)); 346 continue; 347 } 348 349 /* start error correction */ 350 ecc_reg[0] = FL4ECCRESULT0(flctl); 351 ecc_reg[1] = FL4ECCRESULT1(flctl); 352 ecc_reg[2] = FL4ECCRESULT2(flctl); 353 ecc_reg[3] = FL4ECCRESULT3(flctl); 354 355 for (i = 0; i < 3; i++) { 356 uint8_t org; 357 unsigned int index; 358 359 data = readl(ecc_reg[i]); 360 361 if (flctl->page_size) 362 index = (512 * sector_number) + 363 (data >> 16); 364 else 365 index = data >> 16; 366 367 org = flctl->done_buff[index]; 368 flctl->done_buff[index] = org ^ (data & 0xFF); 369 } 370 state = FL_REPAIRABLE; 371 writel(0, FL4ECCCR(flctl)); 372 } 373 374 timeout_error(flctl, __func__); 375 return FL_TIMEOUT; /* timeout */ 376 } 377 378 static void wait_wecfifo_ready(struct sh_flctl *flctl) 379 { 380 uint32_t timeout = LOOP_TIMEOUT_MAX; 381 uint32_t len; 382 383 while (timeout--) { 384 /* check FLECFIFO */ 385 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF; 386 if (len >= 4) 387 return; 388 udelay(1); 389 } 390 timeout_error(flctl, __func__); 391 } 392 393 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, 394 int len, enum dma_data_direction dir) 395 { 396 struct dma_async_tx_descriptor *desc = NULL; 397 struct dma_chan *chan; 398 enum dma_transfer_direction tr_dir; 399 dma_addr_t dma_addr; 400 dma_cookie_t cookie; 401 uint32_t reg; 402 int ret; 403 404 if (dir == DMA_FROM_DEVICE) { 405 chan = flctl->chan_fifo0_rx; 406 tr_dir = DMA_DEV_TO_MEM; 407 } else { 408 chan = flctl->chan_fifo0_tx; 409 tr_dir = DMA_MEM_TO_DEV; 410 } 411 412 dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 413 414 if (!dma_mapping_error(chan->device->dev, dma_addr)) 415 desc = dmaengine_prep_slave_single(chan, dma_addr, len, 416 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 417 418 if (desc) { 419 reg = readl(FLINTDMACR(flctl)); 420 reg |= DREQ0EN; 421 writel(reg, FLINTDMACR(flctl)); 422 423 desc->callback = flctl_dma_complete; 424 desc->callback_param = flctl; 425 cookie = dmaengine_submit(desc); 426 if (dma_submit_error(cookie)) { 427 ret = dma_submit_error(cookie); 428 dev_warn(&flctl->pdev->dev, 429 "DMA submit failed, falling back to PIO\n"); 430 goto out; 431 } 432 433 dma_async_issue_pending(chan); 434 } else { 435 /* DMA failed, fall back to PIO */ 436 flctl_release_dma(flctl); 437 dev_warn(&flctl->pdev->dev, 438 "DMA failed, falling back to PIO\n"); 439 ret = -EIO; 440 goto out; 441 } 442 443 ret = 444 wait_for_completion_timeout(&flctl->dma_complete, 445 msecs_to_jiffies(3000)); 446 447 if (ret <= 0) { 448 dmaengine_terminate_all(chan); 449 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); 450 } 451 452 out: 453 reg = readl(FLINTDMACR(flctl)); 454 reg &= ~DREQ0EN; 455 writel(reg, FLINTDMACR(flctl)); 456 457 dma_unmap_single(chan->device->dev, dma_addr, len, dir); 458 459 /* ret > 0 is success */ 460 return ret; 461 } 462 463 static void read_datareg(struct sh_flctl *flctl, int offset) 464 { 465 unsigned long data; 466 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 467 468 wait_completion(flctl); 469 470 data = readl(FLDATAR(flctl)); 471 *buf = le32_to_cpu(data); 472 } 473 474 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 475 { 476 int i, len_4align; 477 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 478 479 len_4align = (rlen + 3) / 4; 480 481 /* initiate DMA transfer */ 482 if (flctl->chan_fifo0_rx && rlen >= 32 && 483 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0) 484 goto convert; /* DMA success */ 485 486 /* do polling transfer */ 487 for (i = 0; i < len_4align; i++) { 488 wait_rfifo_ready(flctl); 489 buf[i] = readl(FLDTFIFO(flctl)); 490 } 491 492 convert: 493 for (i = 0; i < len_4align; i++) 494 buf[i] = be32_to_cpu(buf[i]); 495 } 496 497 static enum flctl_ecc_res_t read_ecfiforeg 498 (struct sh_flctl *flctl, uint8_t *buff, int sector) 499 { 500 int i; 501 enum flctl_ecc_res_t res; 502 unsigned long *ecc_buf = (unsigned long *)buff; 503 504 res = wait_recfifo_ready(flctl , sector); 505 506 if (res != FL_ERROR) { 507 for (i = 0; i < 4; i++) { 508 ecc_buf[i] = readl(FLECFIFO(flctl)); 509 ecc_buf[i] = be32_to_cpu(ecc_buf[i]); 510 } 511 } 512 513 return res; 514 } 515 516 static void write_fiforeg(struct sh_flctl *flctl, int rlen, 517 unsigned int offset) 518 { 519 int i, len_4align; 520 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 521 522 len_4align = (rlen + 3) / 4; 523 for (i = 0; i < len_4align; i++) { 524 wait_wfifo_ready(flctl); 525 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 526 } 527 } 528 529 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, 530 unsigned int offset) 531 { 532 int i, len_4align; 533 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 534 535 len_4align = (rlen + 3) / 4; 536 537 for (i = 0; i < len_4align; i++) 538 buf[i] = cpu_to_be32(buf[i]); 539 540 /* initiate DMA transfer */ 541 if (flctl->chan_fifo0_tx && rlen >= 32 && 542 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0) 543 return; /* DMA success */ 544 545 /* do polling transfer */ 546 for (i = 0; i < len_4align; i++) { 547 wait_wecfifo_ready(flctl); 548 writel(buf[i], FLECFIFO(flctl)); 549 } 550 } 551 552 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 553 { 554 struct sh_flctl *flctl = mtd_to_flctl(mtd); 555 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; 556 uint32_t flcmdcr_val, addr_len_bytes = 0; 557 558 /* Set SNAND bit if page size is 2048byte */ 559 if (flctl->page_size) 560 flcmncr_val |= SNAND_E; 561 else 562 flcmncr_val &= ~SNAND_E; 563 564 /* default FLCMDCR val */ 565 flcmdcr_val = DOCMD1_E | DOADR_E; 566 567 /* Set for FLCMDCR */ 568 switch (cmd) { 569 case NAND_CMD_ERASE1: 570 addr_len_bytes = flctl->erase_ADRCNT; 571 flcmdcr_val |= DOCMD2_E; 572 break; 573 case NAND_CMD_READ0: 574 case NAND_CMD_READOOB: 575 case NAND_CMD_RNDOUT: 576 addr_len_bytes = flctl->rw_ADRCNT; 577 flcmdcr_val |= CDSRC_E; 578 if (flctl->chip.options & NAND_BUSWIDTH_16) 579 flcmncr_val |= SEL_16BIT; 580 break; 581 case NAND_CMD_SEQIN: 582 /* This case is that cmd is READ0 or READ1 or READ00 */ 583 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */ 584 break; 585 case NAND_CMD_PAGEPROG: 586 addr_len_bytes = flctl->rw_ADRCNT; 587 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; 588 if (flctl->chip.options & NAND_BUSWIDTH_16) 589 flcmncr_val |= SEL_16BIT; 590 break; 591 case NAND_CMD_READID: 592 flcmncr_val &= ~SNAND_E; 593 flcmdcr_val |= CDSRC_E; 594 addr_len_bytes = ADRCNT_1; 595 break; 596 case NAND_CMD_STATUS: 597 case NAND_CMD_RESET: 598 flcmncr_val &= ~SNAND_E; 599 flcmdcr_val &= ~(DOADR_E | DOSR_E); 600 break; 601 default: 602 break; 603 } 604 605 /* Set address bytes parameter */ 606 flcmdcr_val |= addr_len_bytes; 607 608 /* Now actually write */ 609 writel(flcmncr_val, FLCMNCR(flctl)); 610 writel(flcmdcr_val, FLCMDCR(flctl)); 611 writel(flcmcdr_val, FLCMCDR(flctl)); 612 } 613 614 static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 615 int oob_required, int page) 616 { 617 struct mtd_info *mtd = nand_to_mtd(chip); 618 619 nand_read_page_op(chip, page, 0, buf, mtd->writesize); 620 if (oob_required) 621 chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize); 622 return 0; 623 } 624 625 static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 626 int oob_required, int page) 627 { 628 struct mtd_info *mtd = nand_to_mtd(chip); 629 630 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 631 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize); 632 return nand_prog_page_end_op(chip); 633 } 634 635 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 636 { 637 struct sh_flctl *flctl = mtd_to_flctl(mtd); 638 int sector, page_sectors; 639 enum flctl_ecc_res_t ecc_result; 640 641 page_sectors = flctl->page_size ? 4 : 1; 642 643 set_cmd_regs(mtd, NAND_CMD_READ0, 644 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 645 646 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, 647 FLCMNCR(flctl)); 648 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 649 writel(page_addr << 2, FLADR(flctl)); 650 651 empty_fifo(flctl); 652 start_translation(flctl); 653 654 for (sector = 0; sector < page_sectors; sector++) { 655 read_fiforeg(flctl, 512, 512 * sector); 656 657 ecc_result = read_ecfiforeg(flctl, 658 &flctl->done_buff[mtd->writesize + 16 * sector], 659 sector); 660 661 switch (ecc_result) { 662 case FL_REPAIRABLE: 663 dev_info(&flctl->pdev->dev, 664 "applied ecc on page 0x%x", page_addr); 665 mtd->ecc_stats.corrected++; 666 break; 667 case FL_ERROR: 668 dev_warn(&flctl->pdev->dev, 669 "page 0x%x contains corrupted data\n", 670 page_addr); 671 mtd->ecc_stats.failed++; 672 break; 673 default: 674 ; 675 } 676 } 677 678 wait_completion(flctl); 679 680 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 681 FLCMNCR(flctl)); 682 } 683 684 static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 685 { 686 struct sh_flctl *flctl = mtd_to_flctl(mtd); 687 int page_sectors = flctl->page_size ? 4 : 1; 688 int i; 689 690 set_cmd_regs(mtd, NAND_CMD_READ0, 691 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 692 693 empty_fifo(flctl); 694 695 for (i = 0; i < page_sectors; i++) { 696 set_addr(mtd, (512 + 16) * i + 512 , page_addr); 697 writel(16, FLDTCNTR(flctl)); 698 699 start_translation(flctl); 700 read_fiforeg(flctl, 16, 16 * i); 701 wait_completion(flctl); 702 } 703 } 704 705 static void execmd_write_page_sector(struct mtd_info *mtd) 706 { 707 struct sh_flctl *flctl = mtd_to_flctl(mtd); 708 int page_addr = flctl->seqin_page_addr; 709 int sector, page_sectors; 710 711 page_sectors = flctl->page_size ? 4 : 1; 712 713 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 714 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 715 716 empty_fifo(flctl); 717 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); 718 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 719 writel(page_addr << 2, FLADR(flctl)); 720 start_translation(flctl); 721 722 for (sector = 0; sector < page_sectors; sector++) { 723 write_fiforeg(flctl, 512, 512 * sector); 724 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); 725 } 726 727 wait_completion(flctl); 728 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 729 } 730 731 static void execmd_write_oob(struct mtd_info *mtd) 732 { 733 struct sh_flctl *flctl = mtd_to_flctl(mtd); 734 int page_addr = flctl->seqin_page_addr; 735 int sector, page_sectors; 736 737 page_sectors = flctl->page_size ? 4 : 1; 738 739 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 740 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 741 742 for (sector = 0; sector < page_sectors; sector++) { 743 empty_fifo(flctl); 744 set_addr(mtd, sector * 528 + 512, page_addr); 745 writel(16, FLDTCNTR(flctl)); /* set read size */ 746 747 start_translation(flctl); 748 write_fiforeg(flctl, 16, 16 * sector); 749 wait_completion(flctl); 750 } 751 } 752 753 static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command, 754 int column, int page_addr) 755 { 756 struct mtd_info *mtd = nand_to_mtd(chip); 757 struct sh_flctl *flctl = mtd_to_flctl(mtd); 758 uint32_t read_cmd = 0; 759 760 pm_runtime_get_sync(&flctl->pdev->dev); 761 762 flctl->read_bytes = 0; 763 if (command != NAND_CMD_PAGEPROG) 764 flctl->index = 0; 765 766 switch (command) { 767 case NAND_CMD_READ1: 768 case NAND_CMD_READ0: 769 if (flctl->hwecc) { 770 /* read page with hwecc */ 771 execmd_read_page_sector(mtd, page_addr); 772 break; 773 } 774 if (flctl->page_size) 775 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 776 | command); 777 else 778 set_cmd_regs(mtd, command, command); 779 780 set_addr(mtd, 0, page_addr); 781 782 flctl->read_bytes = mtd->writesize + mtd->oobsize; 783 if (flctl->chip.options & NAND_BUSWIDTH_16) 784 column >>= 1; 785 flctl->index += column; 786 goto read_normal_exit; 787 788 case NAND_CMD_READOOB: 789 if (flctl->hwecc) { 790 /* read page with hwecc */ 791 execmd_read_oob(mtd, page_addr); 792 break; 793 } 794 795 if (flctl->page_size) { 796 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 797 | NAND_CMD_READ0); 798 set_addr(mtd, mtd->writesize, page_addr); 799 } else { 800 set_cmd_regs(mtd, command, command); 801 set_addr(mtd, 0, page_addr); 802 } 803 flctl->read_bytes = mtd->oobsize; 804 goto read_normal_exit; 805 806 case NAND_CMD_RNDOUT: 807 if (flctl->hwecc) 808 break; 809 810 if (flctl->page_size) 811 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8) 812 | command); 813 else 814 set_cmd_regs(mtd, command, command); 815 816 set_addr(mtd, column, 0); 817 818 flctl->read_bytes = mtd->writesize + mtd->oobsize - column; 819 goto read_normal_exit; 820 821 case NAND_CMD_READID: 822 set_cmd_regs(mtd, command, command); 823 824 /* READID is always performed using an 8-bit bus */ 825 if (flctl->chip.options & NAND_BUSWIDTH_16) 826 column <<= 1; 827 set_addr(mtd, column, 0); 828 829 flctl->read_bytes = 8; 830 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 831 empty_fifo(flctl); 832 start_translation(flctl); 833 read_fiforeg(flctl, flctl->read_bytes, 0); 834 wait_completion(flctl); 835 break; 836 837 case NAND_CMD_ERASE1: 838 flctl->erase1_page_addr = page_addr; 839 break; 840 841 case NAND_CMD_ERASE2: 842 set_cmd_regs(mtd, NAND_CMD_ERASE1, 843 (command << 8) | NAND_CMD_ERASE1); 844 set_addr(mtd, -1, flctl->erase1_page_addr); 845 start_translation(flctl); 846 wait_completion(flctl); 847 break; 848 849 case NAND_CMD_SEQIN: 850 if (!flctl->page_size) { 851 /* output read command */ 852 if (column >= mtd->writesize) { 853 column -= mtd->writesize; 854 read_cmd = NAND_CMD_READOOB; 855 } else if (column < 256) { 856 read_cmd = NAND_CMD_READ0; 857 } else { 858 column -= 256; 859 read_cmd = NAND_CMD_READ1; 860 } 861 } 862 flctl->seqin_column = column; 863 flctl->seqin_page_addr = page_addr; 864 flctl->seqin_read_cmd = read_cmd; 865 break; 866 867 case NAND_CMD_PAGEPROG: 868 empty_fifo(flctl); 869 if (!flctl->page_size) { 870 set_cmd_regs(mtd, NAND_CMD_SEQIN, 871 flctl->seqin_read_cmd); 872 set_addr(mtd, -1, -1); 873 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 874 start_translation(flctl); 875 wait_completion(flctl); 876 } 877 if (flctl->hwecc) { 878 /* write page with hwecc */ 879 if (flctl->seqin_column == mtd->writesize) 880 execmd_write_oob(mtd); 881 else if (!flctl->seqin_column) 882 execmd_write_page_sector(mtd); 883 else 884 pr_err("Invalid address !?\n"); 885 break; 886 } 887 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN); 888 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr); 889 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */ 890 start_translation(flctl); 891 write_fiforeg(flctl, flctl->index, 0); 892 wait_completion(flctl); 893 break; 894 895 case NAND_CMD_STATUS: 896 set_cmd_regs(mtd, command, command); 897 set_addr(mtd, -1, -1); 898 899 flctl->read_bytes = 1; 900 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 901 start_translation(flctl); 902 read_datareg(flctl, 0); /* read and end */ 903 break; 904 905 case NAND_CMD_RESET: 906 set_cmd_regs(mtd, command, command); 907 set_addr(mtd, -1, -1); 908 909 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 910 start_translation(flctl); 911 wait_completion(flctl); 912 break; 913 914 default: 915 break; 916 } 917 goto runtime_exit; 918 919 read_normal_exit: 920 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 921 empty_fifo(flctl); 922 start_translation(flctl); 923 read_fiforeg(flctl, flctl->read_bytes, 0); 924 wait_completion(flctl); 925 runtime_exit: 926 pm_runtime_put_sync(&flctl->pdev->dev); 927 return; 928 } 929 930 static void flctl_select_chip(struct nand_chip *chip, int chipnr) 931 { 932 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 933 int ret; 934 935 switch (chipnr) { 936 case -1: 937 flctl->flcmncr_base &= ~CE0_ENABLE; 938 939 pm_runtime_get_sync(&flctl->pdev->dev); 940 writel(flctl->flcmncr_base, FLCMNCR(flctl)); 941 942 if (flctl->qos_request) { 943 dev_pm_qos_remove_request(&flctl->pm_qos); 944 flctl->qos_request = 0; 945 } 946 947 pm_runtime_put_sync(&flctl->pdev->dev); 948 break; 949 case 0: 950 flctl->flcmncr_base |= CE0_ENABLE; 951 952 if (!flctl->qos_request) { 953 ret = dev_pm_qos_add_request(&flctl->pdev->dev, 954 &flctl->pm_qos, 955 DEV_PM_QOS_RESUME_LATENCY, 956 100); 957 if (ret < 0) 958 dev_err(&flctl->pdev->dev, 959 "PM QoS request failed: %d\n", ret); 960 flctl->qos_request = 1; 961 } 962 963 if (flctl->holden) { 964 pm_runtime_get_sync(&flctl->pdev->dev); 965 writel(HOLDEN, FLHOLDCR(flctl)); 966 pm_runtime_put_sync(&flctl->pdev->dev); 967 } 968 break; 969 default: 970 BUG(); 971 } 972 } 973 974 static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) 975 { 976 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 977 978 memcpy(&flctl->done_buff[flctl->index], buf, len); 979 flctl->index += len; 980 } 981 982 static uint8_t flctl_read_byte(struct nand_chip *chip) 983 { 984 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 985 uint8_t data; 986 987 data = flctl->done_buff[flctl->index]; 988 flctl->index++; 989 return data; 990 } 991 992 static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 993 { 994 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 995 996 memcpy(buf, &flctl->done_buff[flctl->index], len); 997 flctl->index += len; 998 } 999 1000 static int flctl_chip_attach_chip(struct nand_chip *chip) 1001 { 1002 struct mtd_info *mtd = nand_to_mtd(chip); 1003 struct sh_flctl *flctl = mtd_to_flctl(mtd); 1004 1005 /* 1006 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). 1007 * Add the SEL_16BIT flag in flctl->flcmncr_base. 1008 */ 1009 if (chip->options & NAND_BUSWIDTH_16) 1010 flctl->flcmncr_base |= SEL_16BIT; 1011 1012 if (mtd->writesize == 512) { 1013 flctl->page_size = 0; 1014 if (chip->chipsize > (32 << 20)) { 1015 /* big than 32MB */ 1016 flctl->rw_ADRCNT = ADRCNT_4; 1017 flctl->erase_ADRCNT = ADRCNT_3; 1018 } else if (chip->chipsize > (2 << 16)) { 1019 /* big than 128KB */ 1020 flctl->rw_ADRCNT = ADRCNT_3; 1021 flctl->erase_ADRCNT = ADRCNT_2; 1022 } else { 1023 flctl->rw_ADRCNT = ADRCNT_2; 1024 flctl->erase_ADRCNT = ADRCNT_1; 1025 } 1026 } else { 1027 flctl->page_size = 1; 1028 if (chip->chipsize > (128 << 20)) { 1029 /* big than 128MB */ 1030 flctl->rw_ADRCNT = ADRCNT2_E; 1031 flctl->erase_ADRCNT = ADRCNT_3; 1032 } else if (chip->chipsize > (8 << 16)) { 1033 /* big than 512KB */ 1034 flctl->rw_ADRCNT = ADRCNT_4; 1035 flctl->erase_ADRCNT = ADRCNT_2; 1036 } else { 1037 flctl->rw_ADRCNT = ADRCNT_3; 1038 flctl->erase_ADRCNT = ADRCNT_1; 1039 } 1040 } 1041 1042 if (flctl->hwecc) { 1043 if (mtd->writesize == 512) { 1044 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); 1045 chip->badblock_pattern = &flctl_4secc_smallpage; 1046 } else { 1047 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); 1048 chip->badblock_pattern = &flctl_4secc_largepage; 1049 } 1050 1051 chip->ecc.size = 512; 1052 chip->ecc.bytes = 10; 1053 chip->ecc.strength = 4; 1054 chip->ecc.read_page = flctl_read_page_hwecc; 1055 chip->ecc.write_page = flctl_write_page_hwecc; 1056 chip->ecc.mode = NAND_ECC_HW; 1057 1058 /* 4 symbols ECC enabled */ 1059 flctl->flcmncr_base |= _4ECCEN; 1060 } else { 1061 chip->ecc.mode = NAND_ECC_SOFT; 1062 chip->ecc.algo = NAND_ECC_HAMMING; 1063 } 1064 1065 return 0; 1066 } 1067 1068 static const struct nand_controller_ops flctl_nand_controller_ops = { 1069 .attach_chip = flctl_chip_attach_chip, 1070 }; 1071 1072 static irqreturn_t flctl_handle_flste(int irq, void *dev_id) 1073 { 1074 struct sh_flctl *flctl = dev_id; 1075 1076 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl))); 1077 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 1078 1079 return IRQ_HANDLED; 1080 } 1081 1082 struct flctl_soc_config { 1083 unsigned long flcmncr_val; 1084 unsigned has_hwecc:1; 1085 unsigned use_holden:1; 1086 }; 1087 1088 static struct flctl_soc_config flctl_sh7372_config = { 1089 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL, 1090 .has_hwecc = 1, 1091 .use_holden = 1, 1092 }; 1093 1094 static const struct of_device_id of_flctl_match[] = { 1095 { .compatible = "renesas,shmobile-flctl-sh7372", 1096 .data = &flctl_sh7372_config }, 1097 {}, 1098 }; 1099 MODULE_DEVICE_TABLE(of, of_flctl_match); 1100 1101 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1102 { 1103 const struct flctl_soc_config *config; 1104 struct sh_flctl_platform_data *pdata; 1105 1106 config = of_device_get_match_data(dev); 1107 if (!config) { 1108 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1109 return NULL; 1110 } 1111 1112 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data), 1113 GFP_KERNEL); 1114 if (!pdata) 1115 return NULL; 1116 1117 /* set SoC specific options */ 1118 pdata->flcmncr_val = config->flcmncr_val; 1119 pdata->has_hwecc = config->has_hwecc; 1120 pdata->use_holden = config->use_holden; 1121 1122 return pdata; 1123 } 1124 1125 static int flctl_probe(struct platform_device *pdev) 1126 { 1127 struct resource *res; 1128 struct sh_flctl *flctl; 1129 struct mtd_info *flctl_mtd; 1130 struct nand_chip *nand; 1131 struct sh_flctl_platform_data *pdata; 1132 int ret; 1133 int irq; 1134 1135 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL); 1136 if (!flctl) 1137 return -ENOMEM; 1138 1139 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1140 flctl->reg = devm_ioremap_resource(&pdev->dev, res); 1141 if (IS_ERR(flctl->reg)) 1142 return PTR_ERR(flctl->reg); 1143 flctl->fifo = res->start + 0x24; /* FLDTFIFO */ 1144 1145 irq = platform_get_irq(pdev, 0); 1146 if (irq < 0) { 1147 dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq); 1148 return irq; 1149 } 1150 1151 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, 1152 "flste", flctl); 1153 if (ret) { 1154 dev_err(&pdev->dev, "request interrupt failed.\n"); 1155 return ret; 1156 } 1157 1158 if (pdev->dev.of_node) 1159 pdata = flctl_parse_dt(&pdev->dev); 1160 else 1161 pdata = dev_get_platdata(&pdev->dev); 1162 1163 if (!pdata) { 1164 dev_err(&pdev->dev, "no setup data defined\n"); 1165 return -EINVAL; 1166 } 1167 1168 platform_set_drvdata(pdev, flctl); 1169 nand = &flctl->chip; 1170 flctl_mtd = nand_to_mtd(nand); 1171 nand_set_flash_node(nand, pdev->dev.of_node); 1172 flctl_mtd->dev.parent = &pdev->dev; 1173 flctl->pdev = pdev; 1174 flctl->hwecc = pdata->has_hwecc; 1175 flctl->holden = pdata->use_holden; 1176 flctl->flcmncr_base = pdata->flcmncr_val; 1177 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; 1178 1179 /* Set address of hardware control function */ 1180 /* 20 us command delay time */ 1181 nand->legacy.chip_delay = 20; 1182 1183 nand->legacy.read_byte = flctl_read_byte; 1184 nand->legacy.write_buf = flctl_write_buf; 1185 nand->legacy.read_buf = flctl_read_buf; 1186 nand->select_chip = flctl_select_chip; 1187 nand->legacy.cmdfunc = flctl_cmdfunc; 1188 nand->legacy.set_features = nand_get_set_features_notsupp; 1189 nand->legacy.get_features = nand_get_set_features_notsupp; 1190 1191 if (pdata->flcmncr_val & SEL_16BIT) 1192 nand->options |= NAND_BUSWIDTH_16; 1193 1194 pm_runtime_enable(&pdev->dev); 1195 pm_runtime_resume(&pdev->dev); 1196 1197 flctl_setup_dma(flctl); 1198 1199 nand->dummy_controller.ops = &flctl_nand_controller_ops; 1200 ret = nand_scan(nand, 1); 1201 if (ret) 1202 goto err_chip; 1203 1204 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 1205 if (ret) 1206 goto cleanup_nand; 1207 1208 return 0; 1209 1210 cleanup_nand: 1211 nand_cleanup(nand); 1212 err_chip: 1213 flctl_release_dma(flctl); 1214 pm_runtime_disable(&pdev->dev); 1215 return ret; 1216 } 1217 1218 static int flctl_remove(struct platform_device *pdev) 1219 { 1220 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1221 1222 flctl_release_dma(flctl); 1223 nand_release(&flctl->chip); 1224 pm_runtime_disable(&pdev->dev); 1225 1226 return 0; 1227 } 1228 1229 static struct platform_driver flctl_driver = { 1230 .remove = flctl_remove, 1231 .driver = { 1232 .name = "sh_flctl", 1233 .of_match_table = of_match_ptr(of_flctl_match), 1234 }, 1235 }; 1236 1237 module_platform_driver_probe(flctl_driver, flctl_probe); 1238 1239 MODULE_LICENSE("GPL"); 1240 MODULE_AUTHOR("Yoshihiro Shimoda"); 1241 MODULE_DESCRIPTION("SuperH FLCTL driver"); 1242 MODULE_ALIAS("platform:sh_flctl"); 1243