1 /* 2 * SuperH FLCTL nand controller 3 * 4 * Copyright (c) 2008 Renesas Solutions Corp. 5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd. 6 * 7 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/completion.h> 27 #include <linux/delay.h> 28 #include <linux/dmaengine.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/interrupt.h> 31 #include <linux/io.h> 32 #include <linux/of.h> 33 #include <linux/of_device.h> 34 #include <linux/platform_device.h> 35 #include <linux/pm_runtime.h> 36 #include <linux/sh_dma.h> 37 #include <linux/slab.h> 38 #include <linux/string.h> 39 40 #include <linux/mtd/mtd.h> 41 #include <linux/mtd/rawnand.h> 42 #include <linux/mtd/partitions.h> 43 #include <linux/mtd/sh_flctl.h> 44 45 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, 46 struct mtd_oob_region *oobregion) 47 { 48 struct nand_chip *chip = mtd_to_nand(mtd); 49 50 if (section) 51 return -ERANGE; 52 53 oobregion->offset = 0; 54 oobregion->length = chip->ecc.bytes; 55 56 return 0; 57 } 58 59 static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, 60 struct mtd_oob_region *oobregion) 61 { 62 if (section) 63 return -ERANGE; 64 65 oobregion->offset = 12; 66 oobregion->length = 4; 67 68 return 0; 69 } 70 71 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { 72 .ecc = flctl_4secc_ooblayout_sp_ecc, 73 .free = flctl_4secc_ooblayout_sp_free, 74 }; 75 76 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, 77 struct mtd_oob_region *oobregion) 78 { 79 struct nand_chip *chip = mtd_to_nand(mtd); 80 81 if (section >= chip->ecc.steps) 82 return -ERANGE; 83 84 oobregion->offset = (section * 16) + 6; 85 oobregion->length = chip->ecc.bytes; 86 87 return 0; 88 } 89 90 static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, 91 struct mtd_oob_region *oobregion) 92 { 93 struct nand_chip *chip = mtd_to_nand(mtd); 94 95 if (section >= chip->ecc.steps) 96 return -ERANGE; 97 98 oobregion->offset = section * 16; 99 oobregion->length = 6; 100 101 if (!section) { 102 oobregion->offset += 2; 103 oobregion->length -= 2; 104 } 105 106 return 0; 107 } 108 109 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { 110 .ecc = flctl_4secc_ooblayout_lp_ecc, 111 .free = flctl_4secc_ooblayout_lp_free, 112 }; 113 114 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 115 116 static struct nand_bbt_descr flctl_4secc_smallpage = { 117 .options = NAND_BBT_SCAN2NDPAGE, 118 .offs = 11, 119 .len = 1, 120 .pattern = scan_ff_pattern, 121 }; 122 123 static struct nand_bbt_descr flctl_4secc_largepage = { 124 .options = NAND_BBT_SCAN2NDPAGE, 125 .offs = 0, 126 .len = 2, 127 .pattern = scan_ff_pattern, 128 }; 129 130 static void empty_fifo(struct sh_flctl *flctl) 131 { 132 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); 133 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 134 } 135 136 static void start_translation(struct sh_flctl *flctl) 137 { 138 writeb(TRSTRT, FLTRCR(flctl)); 139 } 140 141 static void timeout_error(struct sh_flctl *flctl, const char *str) 142 { 143 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str); 144 } 145 146 static void wait_completion(struct sh_flctl *flctl) 147 { 148 uint32_t timeout = LOOP_TIMEOUT_MAX; 149 150 while (timeout--) { 151 if (readb(FLTRCR(flctl)) & TREND) { 152 writeb(0x0, FLTRCR(flctl)); 153 return; 154 } 155 udelay(1); 156 } 157 158 timeout_error(flctl, __func__); 159 writeb(0x0, FLTRCR(flctl)); 160 } 161 162 static void flctl_dma_complete(void *param) 163 { 164 struct sh_flctl *flctl = param; 165 166 complete(&flctl->dma_complete); 167 } 168 169 static void flctl_release_dma(struct sh_flctl *flctl) 170 { 171 if (flctl->chan_fifo0_rx) { 172 dma_release_channel(flctl->chan_fifo0_rx); 173 flctl->chan_fifo0_rx = NULL; 174 } 175 if (flctl->chan_fifo0_tx) { 176 dma_release_channel(flctl->chan_fifo0_tx); 177 flctl->chan_fifo0_tx = NULL; 178 } 179 } 180 181 static void flctl_setup_dma(struct sh_flctl *flctl) 182 { 183 dma_cap_mask_t mask; 184 struct dma_slave_config cfg; 185 struct platform_device *pdev = flctl->pdev; 186 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev); 187 int ret; 188 189 if (!pdata) 190 return; 191 192 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) 193 return; 194 195 /* We can only either use DMA for both Tx and Rx or not use it at all */ 196 dma_cap_zero(mask); 197 dma_cap_set(DMA_SLAVE, mask); 198 199 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, 200 (void *)(uintptr_t)pdata->slave_id_fifo0_tx); 201 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, 202 flctl->chan_fifo0_tx); 203 204 if (!flctl->chan_fifo0_tx) 205 return; 206 207 memset(&cfg, 0, sizeof(cfg)); 208 cfg.direction = DMA_MEM_TO_DEV; 209 cfg.dst_addr = flctl->fifo; 210 cfg.src_addr = 0; 211 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 212 if (ret < 0) 213 goto err; 214 215 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, 216 (void *)(uintptr_t)pdata->slave_id_fifo0_rx); 217 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, 218 flctl->chan_fifo0_rx); 219 220 if (!flctl->chan_fifo0_rx) 221 goto err; 222 223 cfg.direction = DMA_DEV_TO_MEM; 224 cfg.dst_addr = 0; 225 cfg.src_addr = flctl->fifo; 226 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 227 if (ret < 0) 228 goto err; 229 230 init_completion(&flctl->dma_complete); 231 232 return; 233 234 err: 235 flctl_release_dma(flctl); 236 } 237 238 static void set_addr(struct mtd_info *mtd, int column, int page_addr) 239 { 240 struct sh_flctl *flctl = mtd_to_flctl(mtd); 241 uint32_t addr = 0; 242 243 if (column == -1) { 244 addr = page_addr; /* ERASE1 */ 245 } else if (page_addr != -1) { 246 /* SEQIN, READ0, etc.. */ 247 if (flctl->chip.options & NAND_BUSWIDTH_16) 248 column >>= 1; 249 if (flctl->page_size) { 250 addr = column & 0x0FFF; 251 addr |= (page_addr & 0xff) << 16; 252 addr |= ((page_addr >> 8) & 0xff) << 24; 253 /* big than 128MB */ 254 if (flctl->rw_ADRCNT == ADRCNT2_E) { 255 uint32_t addr2; 256 addr2 = (page_addr >> 16) & 0xff; 257 writel(addr2, FLADR2(flctl)); 258 } 259 } else { 260 addr = column; 261 addr |= (page_addr & 0xff) << 8; 262 addr |= ((page_addr >> 8) & 0xff) << 16; 263 addr |= ((page_addr >> 16) & 0xff) << 24; 264 } 265 } 266 writel(addr, FLADR(flctl)); 267 } 268 269 static void wait_rfifo_ready(struct sh_flctl *flctl) 270 { 271 uint32_t timeout = LOOP_TIMEOUT_MAX; 272 273 while (timeout--) { 274 uint32_t val; 275 /* check FIFO */ 276 val = readl(FLDTCNTR(flctl)) >> 16; 277 if (val & 0xFF) 278 return; 279 udelay(1); 280 } 281 timeout_error(flctl, __func__); 282 } 283 284 static void wait_wfifo_ready(struct sh_flctl *flctl) 285 { 286 uint32_t len, timeout = LOOP_TIMEOUT_MAX; 287 288 while (timeout--) { 289 /* check FIFO */ 290 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF; 291 if (len >= 4) 292 return; 293 udelay(1); 294 } 295 timeout_error(flctl, __func__); 296 } 297 298 static enum flctl_ecc_res_t wait_recfifo_ready 299 (struct sh_flctl *flctl, int sector_number) 300 { 301 uint32_t timeout = LOOP_TIMEOUT_MAX; 302 void __iomem *ecc_reg[4]; 303 int i; 304 int state = FL_SUCCESS; 305 uint32_t data, size; 306 307 /* 308 * First this loops checks in FLDTCNTR if we are ready to read out the 309 * oob data. This is the case if either all went fine without errors or 310 * if the bottom part of the loop corrected the errors or marked them as 311 * uncorrectable and the controller is given time to push the data into 312 * the FIFO. 313 */ 314 while (timeout--) { 315 /* check if all is ok and we can read out the OOB */ 316 size = readl(FLDTCNTR(flctl)) >> 24; 317 if ((size & 0xFF) == 4) 318 return state; 319 320 /* check if a correction code has been calculated */ 321 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) { 322 /* 323 * either we wait for the fifo to be filled or a 324 * correction pattern is being generated 325 */ 326 udelay(1); 327 continue; 328 } 329 330 /* check for an uncorrectable error */ 331 if (readl(FL4ECCCR(flctl)) & _4ECCFA) { 332 /* check if we face a non-empty page */ 333 for (i = 0; i < 512; i++) { 334 if (flctl->done_buff[i] != 0xff) { 335 state = FL_ERROR; /* can't correct */ 336 break; 337 } 338 } 339 340 if (state == FL_SUCCESS) 341 dev_dbg(&flctl->pdev->dev, 342 "reading empty sector %d, ecc error ignored\n", 343 sector_number); 344 345 writel(0, FL4ECCCR(flctl)); 346 continue; 347 } 348 349 /* start error correction */ 350 ecc_reg[0] = FL4ECCRESULT0(flctl); 351 ecc_reg[1] = FL4ECCRESULT1(flctl); 352 ecc_reg[2] = FL4ECCRESULT2(flctl); 353 ecc_reg[3] = FL4ECCRESULT3(flctl); 354 355 for (i = 0; i < 3; i++) { 356 uint8_t org; 357 unsigned int index; 358 359 data = readl(ecc_reg[i]); 360 361 if (flctl->page_size) 362 index = (512 * sector_number) + 363 (data >> 16); 364 else 365 index = data >> 16; 366 367 org = flctl->done_buff[index]; 368 flctl->done_buff[index] = org ^ (data & 0xFF); 369 } 370 state = FL_REPAIRABLE; 371 writel(0, FL4ECCCR(flctl)); 372 } 373 374 timeout_error(flctl, __func__); 375 return FL_TIMEOUT; /* timeout */ 376 } 377 378 static void wait_wecfifo_ready(struct sh_flctl *flctl) 379 { 380 uint32_t timeout = LOOP_TIMEOUT_MAX; 381 uint32_t len; 382 383 while (timeout--) { 384 /* check FLECFIFO */ 385 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF; 386 if (len >= 4) 387 return; 388 udelay(1); 389 } 390 timeout_error(flctl, __func__); 391 } 392 393 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, 394 int len, enum dma_data_direction dir) 395 { 396 struct dma_async_tx_descriptor *desc = NULL; 397 struct dma_chan *chan; 398 enum dma_transfer_direction tr_dir; 399 dma_addr_t dma_addr; 400 dma_cookie_t cookie; 401 uint32_t reg; 402 int ret; 403 404 if (dir == DMA_FROM_DEVICE) { 405 chan = flctl->chan_fifo0_rx; 406 tr_dir = DMA_DEV_TO_MEM; 407 } else { 408 chan = flctl->chan_fifo0_tx; 409 tr_dir = DMA_MEM_TO_DEV; 410 } 411 412 dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 413 414 if (!dma_mapping_error(chan->device->dev, dma_addr)) 415 desc = dmaengine_prep_slave_single(chan, dma_addr, len, 416 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 417 418 if (desc) { 419 reg = readl(FLINTDMACR(flctl)); 420 reg |= DREQ0EN; 421 writel(reg, FLINTDMACR(flctl)); 422 423 desc->callback = flctl_dma_complete; 424 desc->callback_param = flctl; 425 cookie = dmaengine_submit(desc); 426 if (dma_submit_error(cookie)) { 427 ret = dma_submit_error(cookie); 428 dev_warn(&flctl->pdev->dev, 429 "DMA submit failed, falling back to PIO\n"); 430 goto out; 431 } 432 433 dma_async_issue_pending(chan); 434 } else { 435 /* DMA failed, fall back to PIO */ 436 flctl_release_dma(flctl); 437 dev_warn(&flctl->pdev->dev, 438 "DMA failed, falling back to PIO\n"); 439 ret = -EIO; 440 goto out; 441 } 442 443 ret = 444 wait_for_completion_timeout(&flctl->dma_complete, 445 msecs_to_jiffies(3000)); 446 447 if (ret <= 0) { 448 dmaengine_terminate_all(chan); 449 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); 450 } 451 452 out: 453 reg = readl(FLINTDMACR(flctl)); 454 reg &= ~DREQ0EN; 455 writel(reg, FLINTDMACR(flctl)); 456 457 dma_unmap_single(chan->device->dev, dma_addr, len, dir); 458 459 /* ret > 0 is success */ 460 return ret; 461 } 462 463 static void read_datareg(struct sh_flctl *flctl, int offset) 464 { 465 unsigned long data; 466 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 467 468 wait_completion(flctl); 469 470 data = readl(FLDATAR(flctl)); 471 *buf = le32_to_cpu(data); 472 } 473 474 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 475 { 476 int i, len_4align; 477 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 478 479 len_4align = (rlen + 3) / 4; 480 481 /* initiate DMA transfer */ 482 if (flctl->chan_fifo0_rx && rlen >= 32 && 483 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) 484 goto convert; /* DMA success */ 485 486 /* do polling transfer */ 487 for (i = 0; i < len_4align; i++) { 488 wait_rfifo_ready(flctl); 489 buf[i] = readl(FLDTFIFO(flctl)); 490 } 491 492 convert: 493 for (i = 0; i < len_4align; i++) 494 buf[i] = be32_to_cpu(buf[i]); 495 } 496 497 static enum flctl_ecc_res_t read_ecfiforeg 498 (struct sh_flctl *flctl, uint8_t *buff, int sector) 499 { 500 int i; 501 enum flctl_ecc_res_t res; 502 unsigned long *ecc_buf = (unsigned long *)buff; 503 504 res = wait_recfifo_ready(flctl , sector); 505 506 if (res != FL_ERROR) { 507 for (i = 0; i < 4; i++) { 508 ecc_buf[i] = readl(FLECFIFO(flctl)); 509 ecc_buf[i] = be32_to_cpu(ecc_buf[i]); 510 } 511 } 512 513 return res; 514 } 515 516 static void write_fiforeg(struct sh_flctl *flctl, int rlen, 517 unsigned int offset) 518 { 519 int i, len_4align; 520 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 521 522 len_4align = (rlen + 3) / 4; 523 for (i = 0; i < len_4align; i++) { 524 wait_wfifo_ready(flctl); 525 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 526 } 527 } 528 529 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, 530 unsigned int offset) 531 { 532 int i, len_4align; 533 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 534 535 len_4align = (rlen + 3) / 4; 536 537 for (i = 0; i < len_4align; i++) 538 buf[i] = cpu_to_be32(buf[i]); 539 540 /* initiate DMA transfer */ 541 if (flctl->chan_fifo0_tx && rlen >= 32 && 542 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) 543 return; /* DMA success */ 544 545 /* do polling transfer */ 546 for (i = 0; i < len_4align; i++) { 547 wait_wecfifo_ready(flctl); 548 writel(buf[i], FLECFIFO(flctl)); 549 } 550 } 551 552 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 553 { 554 struct sh_flctl *flctl = mtd_to_flctl(mtd); 555 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; 556 uint32_t flcmdcr_val, addr_len_bytes = 0; 557 558 /* Set SNAND bit if page size is 2048byte */ 559 if (flctl->page_size) 560 flcmncr_val |= SNAND_E; 561 else 562 flcmncr_val &= ~SNAND_E; 563 564 /* default FLCMDCR val */ 565 flcmdcr_val = DOCMD1_E | DOADR_E; 566 567 /* Set for FLCMDCR */ 568 switch (cmd) { 569 case NAND_CMD_ERASE1: 570 addr_len_bytes = flctl->erase_ADRCNT; 571 flcmdcr_val |= DOCMD2_E; 572 break; 573 case NAND_CMD_READ0: 574 case NAND_CMD_READOOB: 575 case NAND_CMD_RNDOUT: 576 addr_len_bytes = flctl->rw_ADRCNT; 577 flcmdcr_val |= CDSRC_E; 578 if (flctl->chip.options & NAND_BUSWIDTH_16) 579 flcmncr_val |= SEL_16BIT; 580 break; 581 case NAND_CMD_SEQIN: 582 /* This case is that cmd is READ0 or READ1 or READ00 */ 583 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */ 584 break; 585 case NAND_CMD_PAGEPROG: 586 addr_len_bytes = flctl->rw_ADRCNT; 587 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; 588 if (flctl->chip.options & NAND_BUSWIDTH_16) 589 flcmncr_val |= SEL_16BIT; 590 break; 591 case NAND_CMD_READID: 592 flcmncr_val &= ~SNAND_E; 593 flcmdcr_val |= CDSRC_E; 594 addr_len_bytes = ADRCNT_1; 595 break; 596 case NAND_CMD_STATUS: 597 case NAND_CMD_RESET: 598 flcmncr_val &= ~SNAND_E; 599 flcmdcr_val &= ~(DOADR_E | DOSR_E); 600 break; 601 default: 602 break; 603 } 604 605 /* Set address bytes parameter */ 606 flcmdcr_val |= addr_len_bytes; 607 608 /* Now actually write */ 609 writel(flcmncr_val, FLCMNCR(flctl)); 610 writel(flcmdcr_val, FLCMDCR(flctl)); 611 writel(flcmcdr_val, FLCMCDR(flctl)); 612 } 613 614 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 615 uint8_t *buf, int oob_required, int page) 616 { 617 nand_read_page_op(chip, page, 0, buf, mtd->writesize); 618 if (oob_required) 619 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 620 return 0; 621 } 622 623 static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 624 const uint8_t *buf, int oob_required, 625 int page) 626 { 627 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 628 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 629 return nand_prog_page_end_op(chip); 630 } 631 632 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 633 { 634 struct sh_flctl *flctl = mtd_to_flctl(mtd); 635 int sector, page_sectors; 636 enum flctl_ecc_res_t ecc_result; 637 638 page_sectors = flctl->page_size ? 4 : 1; 639 640 set_cmd_regs(mtd, NAND_CMD_READ0, 641 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 642 643 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, 644 FLCMNCR(flctl)); 645 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 646 writel(page_addr << 2, FLADR(flctl)); 647 648 empty_fifo(flctl); 649 start_translation(flctl); 650 651 for (sector = 0; sector < page_sectors; sector++) { 652 read_fiforeg(flctl, 512, 512 * sector); 653 654 ecc_result = read_ecfiforeg(flctl, 655 &flctl->done_buff[mtd->writesize + 16 * sector], 656 sector); 657 658 switch (ecc_result) { 659 case FL_REPAIRABLE: 660 dev_info(&flctl->pdev->dev, 661 "applied ecc on page 0x%x", page_addr); 662 mtd->ecc_stats.corrected++; 663 break; 664 case FL_ERROR: 665 dev_warn(&flctl->pdev->dev, 666 "page 0x%x contains corrupted data\n", 667 page_addr); 668 mtd->ecc_stats.failed++; 669 break; 670 default: 671 ; 672 } 673 } 674 675 wait_completion(flctl); 676 677 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 678 FLCMNCR(flctl)); 679 } 680 681 static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 682 { 683 struct sh_flctl *flctl = mtd_to_flctl(mtd); 684 int page_sectors = flctl->page_size ? 4 : 1; 685 int i; 686 687 set_cmd_regs(mtd, NAND_CMD_READ0, 688 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 689 690 empty_fifo(flctl); 691 692 for (i = 0; i < page_sectors; i++) { 693 set_addr(mtd, (512 + 16) * i + 512 , page_addr); 694 writel(16, FLDTCNTR(flctl)); 695 696 start_translation(flctl); 697 read_fiforeg(flctl, 16, 16 * i); 698 wait_completion(flctl); 699 } 700 } 701 702 static void execmd_write_page_sector(struct mtd_info *mtd) 703 { 704 struct sh_flctl *flctl = mtd_to_flctl(mtd); 705 int page_addr = flctl->seqin_page_addr; 706 int sector, page_sectors; 707 708 page_sectors = flctl->page_size ? 4 : 1; 709 710 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 711 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 712 713 empty_fifo(flctl); 714 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); 715 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 716 writel(page_addr << 2, FLADR(flctl)); 717 start_translation(flctl); 718 719 for (sector = 0; sector < page_sectors; sector++) { 720 write_fiforeg(flctl, 512, 512 * sector); 721 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); 722 } 723 724 wait_completion(flctl); 725 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 726 } 727 728 static void execmd_write_oob(struct mtd_info *mtd) 729 { 730 struct sh_flctl *flctl = mtd_to_flctl(mtd); 731 int page_addr = flctl->seqin_page_addr; 732 int sector, page_sectors; 733 734 page_sectors = flctl->page_size ? 4 : 1; 735 736 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 737 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 738 739 for (sector = 0; sector < page_sectors; sector++) { 740 empty_fifo(flctl); 741 set_addr(mtd, sector * 528 + 512, page_addr); 742 writel(16, FLDTCNTR(flctl)); /* set read size */ 743 744 start_translation(flctl); 745 write_fiforeg(flctl, 16, 16 * sector); 746 wait_completion(flctl); 747 } 748 } 749 750 static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, 751 int column, int page_addr) 752 { 753 struct sh_flctl *flctl = mtd_to_flctl(mtd); 754 uint32_t read_cmd = 0; 755 756 pm_runtime_get_sync(&flctl->pdev->dev); 757 758 flctl->read_bytes = 0; 759 if (command != NAND_CMD_PAGEPROG) 760 flctl->index = 0; 761 762 switch (command) { 763 case NAND_CMD_READ1: 764 case NAND_CMD_READ0: 765 if (flctl->hwecc) { 766 /* read page with hwecc */ 767 execmd_read_page_sector(mtd, page_addr); 768 break; 769 } 770 if (flctl->page_size) 771 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 772 | command); 773 else 774 set_cmd_regs(mtd, command, command); 775 776 set_addr(mtd, 0, page_addr); 777 778 flctl->read_bytes = mtd->writesize + mtd->oobsize; 779 if (flctl->chip.options & NAND_BUSWIDTH_16) 780 column >>= 1; 781 flctl->index += column; 782 goto read_normal_exit; 783 784 case NAND_CMD_READOOB: 785 if (flctl->hwecc) { 786 /* read page with hwecc */ 787 execmd_read_oob(mtd, page_addr); 788 break; 789 } 790 791 if (flctl->page_size) { 792 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 793 | NAND_CMD_READ0); 794 set_addr(mtd, mtd->writesize, page_addr); 795 } else { 796 set_cmd_regs(mtd, command, command); 797 set_addr(mtd, 0, page_addr); 798 } 799 flctl->read_bytes = mtd->oobsize; 800 goto read_normal_exit; 801 802 case NAND_CMD_RNDOUT: 803 if (flctl->hwecc) 804 break; 805 806 if (flctl->page_size) 807 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8) 808 | command); 809 else 810 set_cmd_regs(mtd, command, command); 811 812 set_addr(mtd, column, 0); 813 814 flctl->read_bytes = mtd->writesize + mtd->oobsize - column; 815 goto read_normal_exit; 816 817 case NAND_CMD_READID: 818 set_cmd_regs(mtd, command, command); 819 820 /* READID is always performed using an 8-bit bus */ 821 if (flctl->chip.options & NAND_BUSWIDTH_16) 822 column <<= 1; 823 set_addr(mtd, column, 0); 824 825 flctl->read_bytes = 8; 826 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 827 empty_fifo(flctl); 828 start_translation(flctl); 829 read_fiforeg(flctl, flctl->read_bytes, 0); 830 wait_completion(flctl); 831 break; 832 833 case NAND_CMD_ERASE1: 834 flctl->erase1_page_addr = page_addr; 835 break; 836 837 case NAND_CMD_ERASE2: 838 set_cmd_regs(mtd, NAND_CMD_ERASE1, 839 (command << 8) | NAND_CMD_ERASE1); 840 set_addr(mtd, -1, flctl->erase1_page_addr); 841 start_translation(flctl); 842 wait_completion(flctl); 843 break; 844 845 case NAND_CMD_SEQIN: 846 if (!flctl->page_size) { 847 /* output read command */ 848 if (column >= mtd->writesize) { 849 column -= mtd->writesize; 850 read_cmd = NAND_CMD_READOOB; 851 } else if (column < 256) { 852 read_cmd = NAND_CMD_READ0; 853 } else { 854 column -= 256; 855 read_cmd = NAND_CMD_READ1; 856 } 857 } 858 flctl->seqin_column = column; 859 flctl->seqin_page_addr = page_addr; 860 flctl->seqin_read_cmd = read_cmd; 861 break; 862 863 case NAND_CMD_PAGEPROG: 864 empty_fifo(flctl); 865 if (!flctl->page_size) { 866 set_cmd_regs(mtd, NAND_CMD_SEQIN, 867 flctl->seqin_read_cmd); 868 set_addr(mtd, -1, -1); 869 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 870 start_translation(flctl); 871 wait_completion(flctl); 872 } 873 if (flctl->hwecc) { 874 /* write page with hwecc */ 875 if (flctl->seqin_column == mtd->writesize) 876 execmd_write_oob(mtd); 877 else if (!flctl->seqin_column) 878 execmd_write_page_sector(mtd); 879 else 880 pr_err("Invalid address !?\n"); 881 break; 882 } 883 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN); 884 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr); 885 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */ 886 start_translation(flctl); 887 write_fiforeg(flctl, flctl->index, 0); 888 wait_completion(flctl); 889 break; 890 891 case NAND_CMD_STATUS: 892 set_cmd_regs(mtd, command, command); 893 set_addr(mtd, -1, -1); 894 895 flctl->read_bytes = 1; 896 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 897 start_translation(flctl); 898 read_datareg(flctl, 0); /* read and end */ 899 break; 900 901 case NAND_CMD_RESET: 902 set_cmd_regs(mtd, command, command); 903 set_addr(mtd, -1, -1); 904 905 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 906 start_translation(flctl); 907 wait_completion(flctl); 908 break; 909 910 default: 911 break; 912 } 913 goto runtime_exit; 914 915 read_normal_exit: 916 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 917 empty_fifo(flctl); 918 start_translation(flctl); 919 read_fiforeg(flctl, flctl->read_bytes, 0); 920 wait_completion(flctl); 921 runtime_exit: 922 pm_runtime_put_sync(&flctl->pdev->dev); 923 return; 924 } 925 926 static void flctl_select_chip(struct mtd_info *mtd, int chipnr) 927 { 928 struct sh_flctl *flctl = mtd_to_flctl(mtd); 929 int ret; 930 931 switch (chipnr) { 932 case -1: 933 flctl->flcmncr_base &= ~CE0_ENABLE; 934 935 pm_runtime_get_sync(&flctl->pdev->dev); 936 writel(flctl->flcmncr_base, FLCMNCR(flctl)); 937 938 if (flctl->qos_request) { 939 dev_pm_qos_remove_request(&flctl->pm_qos); 940 flctl->qos_request = 0; 941 } 942 943 pm_runtime_put_sync(&flctl->pdev->dev); 944 break; 945 case 0: 946 flctl->flcmncr_base |= CE0_ENABLE; 947 948 if (!flctl->qos_request) { 949 ret = dev_pm_qos_add_request(&flctl->pdev->dev, 950 &flctl->pm_qos, 951 DEV_PM_QOS_RESUME_LATENCY, 952 100); 953 if (ret < 0) 954 dev_err(&flctl->pdev->dev, 955 "PM QoS request failed: %d\n", ret); 956 flctl->qos_request = 1; 957 } 958 959 if (flctl->holden) { 960 pm_runtime_get_sync(&flctl->pdev->dev); 961 writel(HOLDEN, FLHOLDCR(flctl)); 962 pm_runtime_put_sync(&flctl->pdev->dev); 963 } 964 break; 965 default: 966 BUG(); 967 } 968 } 969 970 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 971 { 972 struct sh_flctl *flctl = mtd_to_flctl(mtd); 973 974 memcpy(&flctl->done_buff[flctl->index], buf, len); 975 flctl->index += len; 976 } 977 978 static uint8_t flctl_read_byte(struct mtd_info *mtd) 979 { 980 struct sh_flctl *flctl = mtd_to_flctl(mtd); 981 uint8_t data; 982 983 data = flctl->done_buff[flctl->index]; 984 flctl->index++; 985 return data; 986 } 987 988 static uint16_t flctl_read_word(struct mtd_info *mtd) 989 { 990 struct sh_flctl *flctl = mtd_to_flctl(mtd); 991 uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index]; 992 993 flctl->index += 2; 994 return *buf; 995 } 996 997 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 998 { 999 struct sh_flctl *flctl = mtd_to_flctl(mtd); 1000 1001 memcpy(buf, &flctl->done_buff[flctl->index], len); 1002 flctl->index += len; 1003 } 1004 1005 static int flctl_chip_init_tail(struct mtd_info *mtd) 1006 { 1007 struct sh_flctl *flctl = mtd_to_flctl(mtd); 1008 struct nand_chip *chip = &flctl->chip; 1009 1010 if (mtd->writesize == 512) { 1011 flctl->page_size = 0; 1012 if (chip->chipsize > (32 << 20)) { 1013 /* big than 32MB */ 1014 flctl->rw_ADRCNT = ADRCNT_4; 1015 flctl->erase_ADRCNT = ADRCNT_3; 1016 } else if (chip->chipsize > (2 << 16)) { 1017 /* big than 128KB */ 1018 flctl->rw_ADRCNT = ADRCNT_3; 1019 flctl->erase_ADRCNT = ADRCNT_2; 1020 } else { 1021 flctl->rw_ADRCNT = ADRCNT_2; 1022 flctl->erase_ADRCNT = ADRCNT_1; 1023 } 1024 } else { 1025 flctl->page_size = 1; 1026 if (chip->chipsize > (128 << 20)) { 1027 /* big than 128MB */ 1028 flctl->rw_ADRCNT = ADRCNT2_E; 1029 flctl->erase_ADRCNT = ADRCNT_3; 1030 } else if (chip->chipsize > (8 << 16)) { 1031 /* big than 512KB */ 1032 flctl->rw_ADRCNT = ADRCNT_4; 1033 flctl->erase_ADRCNT = ADRCNT_2; 1034 } else { 1035 flctl->rw_ADRCNT = ADRCNT_3; 1036 flctl->erase_ADRCNT = ADRCNT_1; 1037 } 1038 } 1039 1040 if (flctl->hwecc) { 1041 if (mtd->writesize == 512) { 1042 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); 1043 chip->badblock_pattern = &flctl_4secc_smallpage; 1044 } else { 1045 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); 1046 chip->badblock_pattern = &flctl_4secc_largepage; 1047 } 1048 1049 chip->ecc.size = 512; 1050 chip->ecc.bytes = 10; 1051 chip->ecc.strength = 4; 1052 chip->ecc.read_page = flctl_read_page_hwecc; 1053 chip->ecc.write_page = flctl_write_page_hwecc; 1054 chip->ecc.mode = NAND_ECC_HW; 1055 1056 /* 4 symbols ECC enabled */ 1057 flctl->flcmncr_base |= _4ECCEN; 1058 } else { 1059 chip->ecc.mode = NAND_ECC_SOFT; 1060 chip->ecc.algo = NAND_ECC_HAMMING; 1061 } 1062 1063 return 0; 1064 } 1065 1066 static irqreturn_t flctl_handle_flste(int irq, void *dev_id) 1067 { 1068 struct sh_flctl *flctl = dev_id; 1069 1070 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl))); 1071 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 1072 1073 return IRQ_HANDLED; 1074 } 1075 1076 struct flctl_soc_config { 1077 unsigned long flcmncr_val; 1078 unsigned has_hwecc:1; 1079 unsigned use_holden:1; 1080 }; 1081 1082 static struct flctl_soc_config flctl_sh7372_config = { 1083 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL, 1084 .has_hwecc = 1, 1085 .use_holden = 1, 1086 }; 1087 1088 static const struct of_device_id of_flctl_match[] = { 1089 { .compatible = "renesas,shmobile-flctl-sh7372", 1090 .data = &flctl_sh7372_config }, 1091 {}, 1092 }; 1093 MODULE_DEVICE_TABLE(of, of_flctl_match); 1094 1095 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1096 { 1097 const struct flctl_soc_config *config; 1098 struct sh_flctl_platform_data *pdata; 1099 1100 config = of_device_get_match_data(dev); 1101 if (!config) { 1102 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1103 return NULL; 1104 } 1105 1106 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data), 1107 GFP_KERNEL); 1108 if (!pdata) 1109 return NULL; 1110 1111 /* set SoC specific options */ 1112 pdata->flcmncr_val = config->flcmncr_val; 1113 pdata->has_hwecc = config->has_hwecc; 1114 pdata->use_holden = config->use_holden; 1115 1116 return pdata; 1117 } 1118 1119 static int flctl_probe(struct platform_device *pdev) 1120 { 1121 struct resource *res; 1122 struct sh_flctl *flctl; 1123 struct mtd_info *flctl_mtd; 1124 struct nand_chip *nand; 1125 struct sh_flctl_platform_data *pdata; 1126 int ret; 1127 int irq; 1128 1129 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL); 1130 if (!flctl) 1131 return -ENOMEM; 1132 1133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1134 flctl->reg = devm_ioremap_resource(&pdev->dev, res); 1135 if (IS_ERR(flctl->reg)) 1136 return PTR_ERR(flctl->reg); 1137 flctl->fifo = res->start + 0x24; /* FLDTFIFO */ 1138 1139 irq = platform_get_irq(pdev, 0); 1140 if (irq < 0) { 1141 dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq); 1142 return irq; 1143 } 1144 1145 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, 1146 "flste", flctl); 1147 if (ret) { 1148 dev_err(&pdev->dev, "request interrupt failed.\n"); 1149 return ret; 1150 } 1151 1152 if (pdev->dev.of_node) 1153 pdata = flctl_parse_dt(&pdev->dev); 1154 else 1155 pdata = dev_get_platdata(&pdev->dev); 1156 1157 if (!pdata) { 1158 dev_err(&pdev->dev, "no setup data defined\n"); 1159 return -EINVAL; 1160 } 1161 1162 platform_set_drvdata(pdev, flctl); 1163 nand = &flctl->chip; 1164 flctl_mtd = nand_to_mtd(nand); 1165 nand_set_flash_node(nand, pdev->dev.of_node); 1166 flctl_mtd->dev.parent = &pdev->dev; 1167 flctl->pdev = pdev; 1168 flctl->hwecc = pdata->has_hwecc; 1169 flctl->holden = pdata->use_holden; 1170 flctl->flcmncr_base = pdata->flcmncr_val; 1171 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; 1172 1173 /* Set address of hardware control function */ 1174 /* 20 us command delay time */ 1175 nand->chip_delay = 20; 1176 1177 nand->read_byte = flctl_read_byte; 1178 nand->read_word = flctl_read_word; 1179 nand->write_buf = flctl_write_buf; 1180 nand->read_buf = flctl_read_buf; 1181 nand->select_chip = flctl_select_chip; 1182 nand->cmdfunc = flctl_cmdfunc; 1183 nand->set_features = nand_get_set_features_notsupp; 1184 nand->get_features = nand_get_set_features_notsupp; 1185 1186 if (pdata->flcmncr_val & SEL_16BIT) 1187 nand->options |= NAND_BUSWIDTH_16; 1188 1189 pm_runtime_enable(&pdev->dev); 1190 pm_runtime_resume(&pdev->dev); 1191 1192 flctl_setup_dma(flctl); 1193 1194 ret = nand_scan_ident(flctl_mtd, 1, NULL); 1195 if (ret) 1196 goto err_chip; 1197 1198 if (nand->options & NAND_BUSWIDTH_16) { 1199 /* 1200 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). 1201 * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign 1202 * flctl->flcmncr_base to pdata->flcmncr_val. 1203 */ 1204 pdata->flcmncr_val |= SEL_16BIT; 1205 flctl->flcmncr_base = pdata->flcmncr_val; 1206 } 1207 1208 ret = flctl_chip_init_tail(flctl_mtd); 1209 if (ret) 1210 goto err_chip; 1211 1212 ret = nand_scan_tail(flctl_mtd); 1213 if (ret) 1214 goto err_chip; 1215 1216 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 1217 if (ret) 1218 goto cleanup_nand; 1219 1220 return 0; 1221 1222 cleanup_nand: 1223 nand_cleanup(nand); 1224 err_chip: 1225 flctl_release_dma(flctl); 1226 pm_runtime_disable(&pdev->dev); 1227 return ret; 1228 } 1229 1230 static int flctl_remove(struct platform_device *pdev) 1231 { 1232 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1233 1234 flctl_release_dma(flctl); 1235 nand_release(nand_to_mtd(&flctl->chip)); 1236 pm_runtime_disable(&pdev->dev); 1237 1238 return 0; 1239 } 1240 1241 static struct platform_driver flctl_driver = { 1242 .remove = flctl_remove, 1243 .driver = { 1244 .name = "sh_flctl", 1245 .of_match_table = of_match_ptr(of_flctl_match), 1246 }, 1247 }; 1248 1249 module_platform_driver_probe(flctl_driver, flctl_probe); 1250 1251 MODULE_LICENSE("GPL"); 1252 MODULE_AUTHOR("Yoshihiro Shimoda"); 1253 MODULE_DESCRIPTION("SuperH FLCTL driver"); 1254 MODULE_ALIAS("platform:sh_flctl"); 1255