1 /* 2 * Driver for NAND MLC Controller in LPC32xx 3 * 4 * Author: Roland Stigge <stigge@antcom.de> 5 * 6 * Copyright © 2011 WORK Microwave GmbH 7 * Copyright © 2011, 2012 Roland Stigge 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * 20 * NAND Flash Controller Operation: 21 * - Read: Auto Decode 22 * - Write: Auto Encode 23 * - Tested Page Sizes: 2048, 4096 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/mtd/mtd.h> 30 #include <linux/mtd/rawnand.h> 31 #include <linux/mtd/partitions.h> 32 #include <linux/clk.h> 33 #include <linux/err.h> 34 #include <linux/delay.h> 35 #include <linux/completion.h> 36 #include <linux/interrupt.h> 37 #include <linux/of.h> 38 #include <linux/of_gpio.h> 39 #include <linux/mtd/lpc32xx_mlc.h> 40 #include <linux/io.h> 41 #include <linux/mm.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dmaengine.h> 44 #include <linux/mtd/nand_ecc.h> 45 46 #define DRV_NAME "lpc32xx_mlc" 47 48 /********************************************************************** 49 * MLC NAND controller register offsets 50 **********************************************************************/ 51 52 #define MLC_BUFF(x) (x + 0x00000) 53 #define MLC_DATA(x) (x + 0x08000) 54 #define MLC_CMD(x) (x + 0x10000) 55 #define MLC_ADDR(x) (x + 0x10004) 56 #define MLC_ECC_ENC_REG(x) (x + 0x10008) 57 #define MLC_ECC_DEC_REG(x) (x + 0x1000C) 58 #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010) 59 #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014) 60 #define MLC_RPR(x) (x + 0x10018) 61 #define MLC_WPR(x) (x + 0x1001C) 62 #define MLC_RUBP(x) (x + 0x10020) 63 #define MLC_ROBP(x) (x + 0x10024) 64 #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028) 65 #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C) 66 #define MLC_ICR(x) (x + 0x10030) 67 #define MLC_TIME_REG(x) (x + 0x10034) 68 #define MLC_IRQ_MR(x) (x + 0x10038) 69 #define MLC_IRQ_SR(x) (x + 0x1003C) 70 #define MLC_LOCK_PR(x) (x + 0x10044) 71 #define MLC_ISR(x) (x + 0x10048) 72 #define MLC_CEH(x) (x + 0x1004C) 73 74 /********************************************************************** 75 * MLC_CMD bit definitions 76 **********************************************************************/ 77 #define MLCCMD_RESET 0xFF 78 79 /********************************************************************** 80 * MLC_ICR bit definitions 81 **********************************************************************/ 82 #define MLCICR_WPROT (1 << 3) 83 #define MLCICR_LARGEBLOCK (1 << 2) 84 #define MLCICR_LONGADDR (1 << 1) 85 #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */ 86 87 /********************************************************************** 88 * MLC_TIME_REG bit definitions 89 **********************************************************************/ 90 #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24) 91 #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19) 92 #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16) 93 #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12) 94 #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8) 95 #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4) 96 #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0) 97 98 /********************************************************************** 99 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions 100 **********************************************************************/ 101 #define MLCIRQ_NAND_READY (1 << 5) 102 #define MLCIRQ_CONTROLLER_READY (1 << 4) 103 #define MLCIRQ_DECODE_FAILURE (1 << 3) 104 #define MLCIRQ_DECODE_ERROR (1 << 2) 105 #define MLCIRQ_ECC_READY (1 << 1) 106 #define MLCIRQ_WRPROT_FAULT (1 << 0) 107 108 /********************************************************************** 109 * MLC_LOCK_PR bit definitions 110 **********************************************************************/ 111 #define MLCLOCKPR_MAGIC 0xA25E 112 113 /********************************************************************** 114 * MLC_ISR bit definitions 115 **********************************************************************/ 116 #define MLCISR_DECODER_FAILURE (1 << 6) 117 #define MLCISR_ERRORS ((1 << 4) | (1 << 5)) 118 #define MLCISR_ERRORS_DETECTED (1 << 3) 119 #define MLCISR_ECC_READY (1 << 2) 120 #define MLCISR_CONTROLLER_READY (1 << 1) 121 #define MLCISR_NAND_READY (1 << 0) 122 123 /********************************************************************** 124 * MLC_CEH bit definitions 125 **********************************************************************/ 126 #define MLCCEH_NORMAL (1 << 0) 127 128 struct lpc32xx_nand_cfg_mlc { 129 uint32_t tcea_delay; 130 uint32_t busy_delay; 131 uint32_t nand_ta; 132 uint32_t rd_high; 133 uint32_t rd_low; 134 uint32_t wr_high; 135 uint32_t wr_low; 136 int wp_gpio; 137 struct mtd_partition *parts; 138 unsigned num_parts; 139 }; 140 141 static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section, 142 struct mtd_oob_region *oobregion) 143 { 144 struct nand_chip *nand_chip = mtd_to_nand(mtd); 145 146 if (section >= nand_chip->ecc.steps) 147 return -ERANGE; 148 149 oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes; 150 oobregion->length = nand_chip->ecc.bytes; 151 152 return 0; 153 } 154 155 static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section, 156 struct mtd_oob_region *oobregion) 157 { 158 struct nand_chip *nand_chip = mtd_to_nand(mtd); 159 160 if (section >= nand_chip->ecc.steps) 161 return -ERANGE; 162 163 oobregion->offset = 16 * section; 164 oobregion->length = 16 - nand_chip->ecc.bytes; 165 166 return 0; 167 } 168 169 static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = { 170 .ecc = lpc32xx_ooblayout_ecc, 171 .free = lpc32xx_ooblayout_free, 172 }; 173 174 static struct nand_bbt_descr lpc32xx_nand_bbt = { 175 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | 176 NAND_BBT_WRITE, 177 .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 }, 178 }; 179 180 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = { 181 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | 182 NAND_BBT_WRITE, 183 .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 }, 184 }; 185 186 struct lpc32xx_nand_host { 187 struct nand_chip nand_chip; 188 struct lpc32xx_mlc_platform_data *pdata; 189 struct clk *clk; 190 void __iomem *io_base; 191 int irq; 192 struct lpc32xx_nand_cfg_mlc *ncfg; 193 struct completion comp_nand; 194 struct completion comp_controller; 195 uint32_t llptr; 196 /* 197 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer 198 */ 199 dma_addr_t oob_buf_phy; 200 /* 201 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer 202 */ 203 uint8_t *oob_buf; 204 /* Physical address of DMA base address */ 205 dma_addr_t io_base_phy; 206 207 struct completion comp_dma; 208 struct dma_chan *dma_chan; 209 struct dma_slave_config dma_slave_config; 210 struct scatterlist sgl; 211 uint8_t *dma_buf; 212 uint8_t *dummy_buf; 213 int mlcsubpages; /* number of 512bytes-subpages */ 214 }; 215 216 /* 217 * Activate/Deactivate DMA Operation: 218 * 219 * Using the PL080 DMA Controller for transferring the 512 byte subpages 220 * instead of doing readl() / writel() in a loop slows it down significantly. 221 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal: 222 * 223 * - readl() of 128 x 32 bits in a loop: ~20us 224 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us 225 * - DMA read of 512 bytes (32 bit, no bursts): ~100us 226 * 227 * This applies to the transfer itself. In the DMA case: only the 228 * wait_for_completion() (DMA setup _not_ included). 229 * 230 * Note that the 512 bytes subpage transfer is done directly from/to a 231 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a 232 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND 233 * controller transferring data between its internal buffer to/from the NAND 234 * chip.) 235 * 236 * Therefore, using the PL080 DMA is disabled by default, for now. 237 * 238 */ 239 static int use_dma; 240 241 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) 242 { 243 uint32_t clkrate, tmp; 244 245 /* Reset MLC controller */ 246 writel(MLCCMD_RESET, MLC_CMD(host->io_base)); 247 udelay(1000); 248 249 /* Get base clock for MLC block */ 250 clkrate = clk_get_rate(host->clk); 251 if (clkrate == 0) 252 clkrate = 104000000; 253 254 /* Unlock MLC_ICR 255 * (among others, will be locked again automatically) */ 256 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); 257 258 /* Configure MLC Controller: Large Block, 5 Byte Address */ 259 tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR; 260 writel(tmp, MLC_ICR(host->io_base)); 261 262 /* Unlock MLC_TIME_REG 263 * (among others, will be locked again automatically) */ 264 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); 265 266 /* Compute clock setup values, see LPC and NAND manual */ 267 tmp = 0; 268 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1); 269 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1); 270 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1); 271 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1); 272 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low); 273 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1); 274 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low); 275 writel(tmp, MLC_TIME_REG(host->io_base)); 276 277 /* Enable IRQ for CONTROLLER_READY and NAND_READY */ 278 writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY, 279 MLC_IRQ_MR(host->io_base)); 280 281 /* Normal nCE operation: nCE controlled by controller */ 282 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base)); 283 } 284 285 /* 286 * Hardware specific access to control lines 287 */ 288 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, 289 unsigned int ctrl) 290 { 291 struct nand_chip *nand_chip = mtd_to_nand(mtd); 292 struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip); 293 294 if (cmd != NAND_CMD_NONE) { 295 if (ctrl & NAND_CLE) 296 writel(cmd, MLC_CMD(host->io_base)); 297 else 298 writel(cmd, MLC_ADDR(host->io_base)); 299 } 300 } 301 302 /* 303 * Read Device Ready (NAND device _and_ controller ready) 304 */ 305 static int lpc32xx_nand_device_ready(struct mtd_info *mtd) 306 { 307 struct nand_chip *nand_chip = mtd_to_nand(mtd); 308 struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip); 309 310 if ((readb(MLC_ISR(host->io_base)) & 311 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) == 312 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) 313 return 1; 314 315 return 0; 316 } 317 318 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) 319 { 320 uint8_t sr; 321 322 /* Clear interrupt flag by reading status */ 323 sr = readb(MLC_IRQ_SR(host->io_base)); 324 if (sr & MLCIRQ_NAND_READY) 325 complete(&host->comp_nand); 326 if (sr & MLCIRQ_CONTROLLER_READY) 327 complete(&host->comp_controller); 328 329 return IRQ_HANDLED; 330 } 331 332 static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip) 333 { 334 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 335 336 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY) 337 goto exit; 338 339 wait_for_completion(&host->comp_nand); 340 341 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) { 342 /* Seems to be delayed sometimes by controller */ 343 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n"); 344 cpu_relax(); 345 } 346 347 exit: 348 return NAND_STATUS_READY; 349 } 350 351 static int lpc32xx_waitfunc_controller(struct mtd_info *mtd, 352 struct nand_chip *chip) 353 { 354 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 355 356 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY) 357 goto exit; 358 359 wait_for_completion(&host->comp_controller); 360 361 while (!(readb(MLC_ISR(host->io_base)) & 362 MLCISR_CONTROLLER_READY)) { 363 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n"); 364 cpu_relax(); 365 } 366 367 exit: 368 return NAND_STATUS_READY; 369 } 370 371 static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 372 { 373 lpc32xx_waitfunc_nand(mtd, chip); 374 lpc32xx_waitfunc_controller(mtd, chip); 375 376 return NAND_STATUS_READY; 377 } 378 379 /* 380 * Enable NAND write protect 381 */ 382 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) 383 { 384 if (gpio_is_valid(host->ncfg->wp_gpio)) 385 gpio_set_value(host->ncfg->wp_gpio, 0); 386 } 387 388 /* 389 * Disable NAND write protect 390 */ 391 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) 392 { 393 if (gpio_is_valid(host->ncfg->wp_gpio)) 394 gpio_set_value(host->ncfg->wp_gpio, 1); 395 } 396 397 static void lpc32xx_dma_complete_func(void *completion) 398 { 399 complete(completion); 400 } 401 402 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len, 403 enum dma_transfer_direction dir) 404 { 405 struct nand_chip *chip = mtd_to_nand(mtd); 406 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 407 struct dma_async_tx_descriptor *desc; 408 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 409 int res; 410 411 sg_init_one(&host->sgl, mem, len); 412 413 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, 414 DMA_BIDIRECTIONAL); 415 if (res != 1) { 416 dev_err(mtd->dev.parent, "Failed to map sg list\n"); 417 return -ENXIO; 418 } 419 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, 420 flags); 421 if (!desc) { 422 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n"); 423 goto out1; 424 } 425 426 init_completion(&host->comp_dma); 427 desc->callback = lpc32xx_dma_complete_func; 428 desc->callback_param = &host->comp_dma; 429 430 dmaengine_submit(desc); 431 dma_async_issue_pending(host->dma_chan); 432 433 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000)); 434 435 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, 436 DMA_BIDIRECTIONAL); 437 return 0; 438 out1: 439 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, 440 DMA_BIDIRECTIONAL); 441 return -ENXIO; 442 } 443 444 static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip, 445 uint8_t *buf, int oob_required, int page) 446 { 447 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 448 int i, j; 449 uint8_t *oobbuf = chip->oob_poi; 450 uint32_t mlc_isr; 451 int res; 452 uint8_t *dma_buf; 453 bool dma_mapped; 454 455 if ((void *)buf <= high_memory) { 456 dma_buf = buf; 457 dma_mapped = true; 458 } else { 459 dma_buf = host->dma_buf; 460 dma_mapped = false; 461 } 462 463 /* Writing Command and Address */ 464 nand_read_page_op(chip, page, 0, NULL, 0); 465 466 /* For all sub-pages */ 467 for (i = 0; i < host->mlcsubpages; i++) { 468 /* Start Auto Decode Command */ 469 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base)); 470 471 /* Wait for Controller Ready */ 472 lpc32xx_waitfunc_controller(mtd, chip); 473 474 /* Check ECC Error status */ 475 mlc_isr = readl(MLC_ISR(host->io_base)); 476 if (mlc_isr & MLCISR_DECODER_FAILURE) { 477 mtd->ecc_stats.failed++; 478 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__); 479 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) { 480 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1; 481 } 482 483 /* Read 512 + 16 Bytes */ 484 if (use_dma) { 485 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, 486 DMA_DEV_TO_MEM); 487 if (res) 488 return res; 489 } else { 490 for (j = 0; j < (512 >> 2); j++) { 491 *((uint32_t *)(buf)) = 492 readl(MLC_BUFF(host->io_base)); 493 buf += 4; 494 } 495 } 496 for (j = 0; j < (16 >> 2); j++) { 497 *((uint32_t *)(oobbuf)) = 498 readl(MLC_BUFF(host->io_base)); 499 oobbuf += 4; 500 } 501 } 502 503 if (use_dma && !dma_mapped) 504 memcpy(buf, dma_buf, mtd->writesize); 505 506 return 0; 507 } 508 509 static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd, 510 struct nand_chip *chip, 511 const uint8_t *buf, int oob_required, 512 int page) 513 { 514 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 515 const uint8_t *oobbuf = chip->oob_poi; 516 uint8_t *dma_buf = (uint8_t *)buf; 517 int res; 518 int i, j; 519 520 if (use_dma && (void *)buf >= high_memory) { 521 dma_buf = host->dma_buf; 522 memcpy(dma_buf, buf, mtd->writesize); 523 } 524 525 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 526 527 for (i = 0; i < host->mlcsubpages; i++) { 528 /* Start Encode */ 529 writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); 530 531 /* Write 512 + 6 Bytes to Buffer */ 532 if (use_dma) { 533 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, 534 DMA_MEM_TO_DEV); 535 if (res) 536 return res; 537 } else { 538 for (j = 0; j < (512 >> 2); j++) { 539 writel(*((uint32_t *)(buf)), 540 MLC_BUFF(host->io_base)); 541 buf += 4; 542 } 543 } 544 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base)); 545 oobbuf += 4; 546 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base)); 547 oobbuf += 12; 548 549 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */ 550 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base)); 551 552 /* Wait for Controller Ready */ 553 lpc32xx_waitfunc_controller(mtd, chip); 554 } 555 556 return nand_prog_page_end_op(chip); 557 } 558 559 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 560 int page) 561 { 562 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 563 564 /* Read whole page - necessary with MLC controller! */ 565 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page); 566 567 return 0; 568 } 569 570 static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 571 int page) 572 { 573 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */ 574 return 0; 575 } 576 577 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */ 578 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode) 579 { 580 /* Always enabled! */ 581 } 582 583 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host) 584 { 585 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); 586 dma_cap_mask_t mask; 587 588 if (!host->pdata || !host->pdata->dma_filter) { 589 dev_err(mtd->dev.parent, "no DMA platform data\n"); 590 return -ENOENT; 591 } 592 593 dma_cap_zero(mask); 594 dma_cap_set(DMA_SLAVE, mask); 595 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, 596 "nand-mlc"); 597 if (!host->dma_chan) { 598 dev_err(mtd->dev.parent, "Failed to request DMA channel\n"); 599 return -EBUSY; 600 } 601 602 /* 603 * Set direction to a sensible value even if the dmaengine driver 604 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x 605 * driver criticizes it as "alien transfer direction". 606 */ 607 host->dma_slave_config.direction = DMA_DEV_TO_MEM; 608 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 609 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 610 host->dma_slave_config.src_maxburst = 128; 611 host->dma_slave_config.dst_maxburst = 128; 612 /* DMA controller does flow control: */ 613 host->dma_slave_config.device_fc = false; 614 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy); 615 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy); 616 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { 617 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n"); 618 goto out1; 619 } 620 621 return 0; 622 out1: 623 dma_release_channel(host->dma_chan); 624 return -ENXIO; 625 } 626 627 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev) 628 { 629 struct lpc32xx_nand_cfg_mlc *ncfg; 630 struct device_node *np = dev->of_node; 631 632 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL); 633 if (!ncfg) 634 return NULL; 635 636 of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay); 637 of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay); 638 of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta); 639 of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high); 640 of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low); 641 of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high); 642 of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low); 643 644 if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta || 645 !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high || 646 !ncfg->wr_low) { 647 dev_err(dev, "chip parameters not specified correctly\n"); 648 return NULL; 649 } 650 651 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0); 652 653 return ncfg; 654 } 655 656 /* 657 * Probe for NAND controller 658 */ 659 static int lpc32xx_nand_probe(struct platform_device *pdev) 660 { 661 struct lpc32xx_nand_host *host; 662 struct mtd_info *mtd; 663 struct nand_chip *nand_chip; 664 struct resource *rc; 665 int res; 666 667 /* Allocate memory for the device structure (and zero it) */ 668 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 669 if (!host) 670 return -ENOMEM; 671 672 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); 673 host->io_base = devm_ioremap_resource(&pdev->dev, rc); 674 if (IS_ERR(host->io_base)) 675 return PTR_ERR(host->io_base); 676 677 host->io_base_phy = rc->start; 678 679 nand_chip = &host->nand_chip; 680 mtd = nand_to_mtd(nand_chip); 681 if (pdev->dev.of_node) 682 host->ncfg = lpc32xx_parse_dt(&pdev->dev); 683 if (!host->ncfg) { 684 dev_err(&pdev->dev, 685 "Missing or bad NAND config from device tree\n"); 686 return -ENOENT; 687 } 688 if (host->ncfg->wp_gpio == -EPROBE_DEFER) 689 return -EPROBE_DEFER; 690 if (gpio_is_valid(host->ncfg->wp_gpio) && 691 gpio_request(host->ncfg->wp_gpio, "NAND WP")) { 692 dev_err(&pdev->dev, "GPIO not available\n"); 693 return -EBUSY; 694 } 695 lpc32xx_wp_disable(host); 696 697 host->pdata = dev_get_platdata(&pdev->dev); 698 699 /* link the private data structures */ 700 nand_set_controller_data(nand_chip, host); 701 nand_set_flash_node(nand_chip, pdev->dev.of_node); 702 mtd->dev.parent = &pdev->dev; 703 704 /* Get NAND clock */ 705 host->clk = clk_get(&pdev->dev, NULL); 706 if (IS_ERR(host->clk)) { 707 dev_err(&pdev->dev, "Clock initialization failure\n"); 708 res = -ENOENT; 709 goto free_gpio; 710 } 711 res = clk_prepare_enable(host->clk); 712 if (res) 713 goto put_clk; 714 715 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; 716 nand_chip->dev_ready = lpc32xx_nand_device_ready; 717 nand_chip->chip_delay = 25; /* us */ 718 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base); 719 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base); 720 721 /* Init NAND controller */ 722 lpc32xx_nand_setup(host); 723 724 platform_set_drvdata(pdev, host); 725 726 /* Initialize function pointers */ 727 nand_chip->ecc.hwctl = lpc32xx_ecc_enable; 728 nand_chip->ecc.read_page_raw = lpc32xx_read_page; 729 nand_chip->ecc.read_page = lpc32xx_read_page; 730 nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel; 731 nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel; 732 nand_chip->ecc.write_oob = lpc32xx_write_oob; 733 nand_chip->ecc.read_oob = lpc32xx_read_oob; 734 nand_chip->ecc.strength = 4; 735 nand_chip->ecc.bytes = 10; 736 nand_chip->waitfunc = lpc32xx_waitfunc; 737 738 nand_chip->options = NAND_NO_SUBPAGE_WRITE; 739 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 740 nand_chip->bbt_td = &lpc32xx_nand_bbt; 741 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; 742 743 if (use_dma) { 744 res = lpc32xx_dma_setup(host); 745 if (res) { 746 res = -EIO; 747 goto unprepare_clk; 748 } 749 } 750 751 /* 752 * Scan to find existance of the device and 753 * Get the type of NAND device SMALL block or LARGE block 754 */ 755 res = nand_scan_ident(mtd, 1, NULL); 756 if (res) 757 goto release_dma_chan; 758 759 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); 760 if (!host->dma_buf) { 761 res = -ENOMEM; 762 goto release_dma_chan; 763 } 764 765 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); 766 if (!host->dummy_buf) { 767 res = -ENOMEM; 768 goto release_dma_chan; 769 } 770 771 nand_chip->ecc.mode = NAND_ECC_HW; 772 nand_chip->ecc.size = 512; 773 mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops); 774 host->mlcsubpages = mtd->writesize / 512; 775 776 /* initially clear interrupt status */ 777 readb(MLC_IRQ_SR(host->io_base)); 778 779 init_completion(&host->comp_nand); 780 init_completion(&host->comp_controller); 781 782 host->irq = platform_get_irq(pdev, 0); 783 if (host->irq < 0) { 784 dev_err(&pdev->dev, "failed to get platform irq\n"); 785 res = -EINVAL; 786 goto release_dma_chan; 787 } 788 789 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, 790 IRQF_TRIGGER_HIGH, DRV_NAME, host)) { 791 dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); 792 res = -ENXIO; 793 goto release_dma_chan; 794 } 795 796 /* 797 * Fills out all the uninitialized function pointers with the defaults 798 * And scans for a bad block table if appropriate. 799 */ 800 res = nand_scan_tail(mtd); 801 if (res) 802 goto free_irq; 803 804 mtd->name = DRV_NAME; 805 806 res = mtd_device_register(mtd, host->ncfg->parts, 807 host->ncfg->num_parts); 808 if (res) 809 goto cleanup_nand; 810 811 return 0; 812 813 cleanup_nand: 814 nand_cleanup(nand_chip); 815 free_irq: 816 free_irq(host->irq, host); 817 release_dma_chan: 818 if (use_dma) 819 dma_release_channel(host->dma_chan); 820 unprepare_clk: 821 clk_disable_unprepare(host->clk); 822 put_clk: 823 clk_put(host->clk); 824 free_gpio: 825 lpc32xx_wp_enable(host); 826 gpio_free(host->ncfg->wp_gpio); 827 828 return res; 829 } 830 831 /* 832 * Remove NAND device 833 */ 834 static int lpc32xx_nand_remove(struct platform_device *pdev) 835 { 836 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 837 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); 838 839 nand_release(mtd); 840 free_irq(host->irq, host); 841 if (use_dma) 842 dma_release_channel(host->dma_chan); 843 844 clk_disable_unprepare(host->clk); 845 clk_put(host->clk); 846 847 lpc32xx_wp_enable(host); 848 gpio_free(host->ncfg->wp_gpio); 849 850 return 0; 851 } 852 853 #ifdef CONFIG_PM 854 static int lpc32xx_nand_resume(struct platform_device *pdev) 855 { 856 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 857 int ret; 858 859 /* Re-enable NAND clock */ 860 ret = clk_prepare_enable(host->clk); 861 if (ret) 862 return ret; 863 864 /* Fresh init of NAND controller */ 865 lpc32xx_nand_setup(host); 866 867 /* Disable write protect */ 868 lpc32xx_wp_disable(host); 869 870 return 0; 871 } 872 873 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm) 874 { 875 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 876 877 /* Enable write protect for safety */ 878 lpc32xx_wp_enable(host); 879 880 /* Disable clock */ 881 clk_disable_unprepare(host->clk); 882 return 0; 883 } 884 885 #else 886 #define lpc32xx_nand_resume NULL 887 #define lpc32xx_nand_suspend NULL 888 #endif 889 890 static const struct of_device_id lpc32xx_nand_match[] = { 891 { .compatible = "nxp,lpc3220-mlc" }, 892 { /* sentinel */ }, 893 }; 894 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); 895 896 static struct platform_driver lpc32xx_nand_driver = { 897 .probe = lpc32xx_nand_probe, 898 .remove = lpc32xx_nand_remove, 899 .resume = lpc32xx_nand_resume, 900 .suspend = lpc32xx_nand_suspend, 901 .driver = { 902 .name = DRV_NAME, 903 .of_match_table = lpc32xx_nand_match, 904 }, 905 }; 906 907 module_platform_driver(lpc32xx_nand_driver); 908 909 MODULE_LICENSE("GPL"); 910 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); 911 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller"); 912