1 /* 2 * NAND Flash Controller Device Driver 3 * Copyright © 2009-2010, Intel Corporation and its suppliers. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/bitfield.h> 16 #include <linux/completion.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/mtd/mtd.h> 22 #include <linux/mtd/rawnand.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 26 #include "denali.h" 27 28 MODULE_LICENSE("GPL"); 29 30 #define DENALI_NAND_NAME "denali-nand" 31 32 /* for Indexed Addressing */ 33 #define DENALI_INDEXED_CTRL 0x00 34 #define DENALI_INDEXED_DATA 0x10 35 36 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 37 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 38 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 39 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 40 41 /* MAP11 access cycle type */ 42 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 43 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 44 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 45 46 /* MAP10 commands */ 47 #define DENALI_ERASE 0x01 48 49 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 50 51 #define DENALI_INVALID_BANK -1 52 #define DENALI_NR_BANKS 4 53 54 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 55 { 56 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 57 } 58 59 /* 60 * Direct Addressing - the slave address forms the control information (command 61 * type, bank, block, and page address). The slave data is the actual data to 62 * be transferred. This mode requires 28 bits of address region allocated. 63 */ 64 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 65 { 66 return ioread32(denali->host + addr); 67 } 68 69 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 70 u32 data) 71 { 72 iowrite32(data, denali->host + addr); 73 } 74 75 /* 76 * Indexed Addressing - address translation module intervenes in passing the 77 * control information. This mode reduces the required address range. The 78 * control information and transferred data are latched by the registers in 79 * the translation module. 80 */ 81 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 82 { 83 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 84 return ioread32(denali->host + DENALI_INDEXED_DATA); 85 } 86 87 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 88 u32 data) 89 { 90 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 91 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 92 } 93 94 /* 95 * Use the configuration feature register to determine the maximum number of 96 * banks that the hardware supports. 97 */ 98 static void denali_detect_max_banks(struct denali_nand_info *denali) 99 { 100 uint32_t features = ioread32(denali->reg + FEATURES); 101 102 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 103 104 /* the encoding changed from rev 5.0 to 5.1 */ 105 if (denali->revision < 0x0501) 106 denali->max_banks <<= 1; 107 } 108 109 static void denali_enable_irq(struct denali_nand_info *denali) 110 { 111 int i; 112 113 for (i = 0; i < DENALI_NR_BANKS; i++) 114 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 115 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 116 } 117 118 static void denali_disable_irq(struct denali_nand_info *denali) 119 { 120 int i; 121 122 for (i = 0; i < DENALI_NR_BANKS; i++) 123 iowrite32(0, denali->reg + INTR_EN(i)); 124 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 125 } 126 127 static void denali_clear_irq(struct denali_nand_info *denali, 128 int bank, uint32_t irq_status) 129 { 130 /* write one to clear bits */ 131 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 132 } 133 134 static void denali_clear_irq_all(struct denali_nand_info *denali) 135 { 136 int i; 137 138 for (i = 0; i < DENALI_NR_BANKS; i++) 139 denali_clear_irq(denali, i, U32_MAX); 140 } 141 142 static irqreturn_t denali_isr(int irq, void *dev_id) 143 { 144 struct denali_nand_info *denali = dev_id; 145 irqreturn_t ret = IRQ_NONE; 146 uint32_t irq_status; 147 int i; 148 149 spin_lock(&denali->irq_lock); 150 151 for (i = 0; i < DENALI_NR_BANKS; i++) { 152 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 153 if (irq_status) 154 ret = IRQ_HANDLED; 155 156 denali_clear_irq(denali, i, irq_status); 157 158 if (i != denali->active_bank) 159 continue; 160 161 denali->irq_status |= irq_status; 162 163 if (denali->irq_status & denali->irq_mask) 164 complete(&denali->complete); 165 } 166 167 spin_unlock(&denali->irq_lock); 168 169 return ret; 170 } 171 172 static void denali_reset_irq(struct denali_nand_info *denali) 173 { 174 unsigned long flags; 175 176 spin_lock_irqsave(&denali->irq_lock, flags); 177 denali->irq_status = 0; 178 denali->irq_mask = 0; 179 spin_unlock_irqrestore(&denali->irq_lock, flags); 180 } 181 182 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 183 uint32_t irq_mask) 184 { 185 unsigned long time_left, flags; 186 uint32_t irq_status; 187 188 spin_lock_irqsave(&denali->irq_lock, flags); 189 190 irq_status = denali->irq_status; 191 192 if (irq_mask & irq_status) { 193 /* return immediately if the IRQ has already happened. */ 194 spin_unlock_irqrestore(&denali->irq_lock, flags); 195 return irq_status; 196 } 197 198 denali->irq_mask = irq_mask; 199 reinit_completion(&denali->complete); 200 spin_unlock_irqrestore(&denali->irq_lock, flags); 201 202 time_left = wait_for_completion_timeout(&denali->complete, 203 msecs_to_jiffies(1000)); 204 if (!time_left) { 205 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 206 irq_mask); 207 return 0; 208 } 209 210 return denali->irq_status; 211 } 212 213 static uint32_t denali_check_irq(struct denali_nand_info *denali) 214 { 215 unsigned long flags; 216 uint32_t irq_status; 217 218 spin_lock_irqsave(&denali->irq_lock, flags); 219 irq_status = denali->irq_status; 220 spin_unlock_irqrestore(&denali->irq_lock, flags); 221 222 return irq_status; 223 } 224 225 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 226 { 227 struct denali_nand_info *denali = mtd_to_denali(mtd); 228 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 229 int i; 230 231 for (i = 0; i < len; i++) 232 buf[i] = denali->host_read(denali, addr); 233 } 234 235 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 236 { 237 struct denali_nand_info *denali = mtd_to_denali(mtd); 238 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 239 int i; 240 241 for (i = 0; i < len; i++) 242 denali->host_write(denali, addr, buf[i]); 243 } 244 245 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 246 { 247 struct denali_nand_info *denali = mtd_to_denali(mtd); 248 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 249 uint16_t *buf16 = (uint16_t *)buf; 250 int i; 251 252 for (i = 0; i < len / 2; i++) 253 buf16[i] = denali->host_read(denali, addr); 254 } 255 256 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 257 int len) 258 { 259 struct denali_nand_info *denali = mtd_to_denali(mtd); 260 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 261 const uint16_t *buf16 = (const uint16_t *)buf; 262 int i; 263 264 for (i = 0; i < len / 2; i++) 265 denali->host_write(denali, addr, buf16[i]); 266 } 267 268 static uint8_t denali_read_byte(struct mtd_info *mtd) 269 { 270 uint8_t byte; 271 272 denali_read_buf(mtd, &byte, 1); 273 274 return byte; 275 } 276 277 static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) 278 { 279 denali_write_buf(mtd, &byte, 1); 280 } 281 282 static uint16_t denali_read_word(struct mtd_info *mtd) 283 { 284 uint16_t word; 285 286 denali_read_buf16(mtd, (uint8_t *)&word, 2); 287 288 return word; 289 } 290 291 static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 292 { 293 struct denali_nand_info *denali = mtd_to_denali(mtd); 294 uint32_t type; 295 296 if (ctrl & NAND_CLE) 297 type = DENALI_MAP11_CMD; 298 else if (ctrl & NAND_ALE) 299 type = DENALI_MAP11_ADDR; 300 else 301 return; 302 303 /* 304 * Some commands are followed by chip->dev_ready or chip->waitfunc. 305 * irq_status must be cleared here to catch the R/B# interrupt later. 306 */ 307 if (ctrl & NAND_CTRL_CHANGE) 308 denali_reset_irq(denali); 309 310 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 311 } 312 313 static int denali_dev_ready(struct mtd_info *mtd) 314 { 315 struct denali_nand_info *denali = mtd_to_denali(mtd); 316 317 return !!(denali_check_irq(denali) & INTR__INT_ACT); 318 } 319 320 static int denali_check_erased_page(struct mtd_info *mtd, 321 struct nand_chip *chip, uint8_t *buf, 322 unsigned long uncor_ecc_flags, 323 unsigned int max_bitflips) 324 { 325 struct denali_nand_info *denali = mtd_to_denali(mtd); 326 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 327 int ecc_steps = chip->ecc.steps; 328 int ecc_size = chip->ecc.size; 329 int ecc_bytes = chip->ecc.bytes; 330 int i, stat; 331 332 for (i = 0; i < ecc_steps; i++) { 333 if (!(uncor_ecc_flags & BIT(i))) 334 continue; 335 336 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 337 ecc_code, ecc_bytes, 338 NULL, 0, 339 chip->ecc.strength); 340 if (stat < 0) { 341 mtd->ecc_stats.failed++; 342 } else { 343 mtd->ecc_stats.corrected += stat; 344 max_bitflips = max_t(unsigned int, max_bitflips, stat); 345 } 346 347 buf += ecc_size; 348 ecc_code += ecc_bytes; 349 } 350 351 return max_bitflips; 352 } 353 354 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 355 struct denali_nand_info *denali, 356 unsigned long *uncor_ecc_flags) 357 { 358 struct nand_chip *chip = mtd_to_nand(mtd); 359 int bank = denali->active_bank; 360 uint32_t ecc_cor; 361 unsigned int max_bitflips; 362 363 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 364 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 365 366 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 367 /* 368 * This flag is set when uncorrectable error occurs at least in 369 * one ECC sector. We can not know "how many sectors", or 370 * "which sector(s)". We need erase-page check for all sectors. 371 */ 372 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 373 return 0; 374 } 375 376 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 377 378 /* 379 * The register holds the maximum of per-sector corrected bitflips. 380 * This is suitable for the return value of the ->read_page() callback. 381 * Unfortunately, we can not know the total number of corrected bits in 382 * the page. Increase the stats by max_bitflips. (compromised solution) 383 */ 384 mtd->ecc_stats.corrected += max_bitflips; 385 386 return max_bitflips; 387 } 388 389 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 390 struct denali_nand_info *denali, 391 unsigned long *uncor_ecc_flags, uint8_t *buf) 392 { 393 unsigned int ecc_size = denali->nand.ecc.size; 394 unsigned int bitflips = 0; 395 unsigned int max_bitflips = 0; 396 uint32_t err_addr, err_cor_info; 397 unsigned int err_byte, err_sector, err_device; 398 uint8_t err_cor_value; 399 unsigned int prev_sector = 0; 400 uint32_t irq_status; 401 402 denali_reset_irq(denali); 403 404 do { 405 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 406 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 407 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 408 409 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 410 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 411 err_cor_info); 412 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 413 err_cor_info); 414 415 /* reset the bitflip counter when crossing ECC sector */ 416 if (err_sector != prev_sector) 417 bitflips = 0; 418 419 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 420 /* 421 * Check later if this is a real ECC error, or 422 * an erased sector. 423 */ 424 *uncor_ecc_flags |= BIT(err_sector); 425 } else if (err_byte < ecc_size) { 426 /* 427 * If err_byte is larger than ecc_size, means error 428 * happened in OOB, so we ignore it. It's no need for 429 * us to correct it err_device is represented the NAND 430 * error bits are happened in if there are more than 431 * one NAND connected. 432 */ 433 int offset; 434 unsigned int flips_in_byte; 435 436 offset = (err_sector * ecc_size + err_byte) * 437 denali->devs_per_cs + err_device; 438 439 /* correct the ECC error */ 440 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 441 buf[offset] ^= err_cor_value; 442 mtd->ecc_stats.corrected += flips_in_byte; 443 bitflips += flips_in_byte; 444 445 max_bitflips = max(max_bitflips, bitflips); 446 } 447 448 prev_sector = err_sector; 449 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 450 451 /* 452 * Once handle all ECC errors, controller will trigger an 453 * ECC_TRANSACTION_DONE interrupt. 454 */ 455 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 456 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 457 return -EIO; 458 459 return max_bitflips; 460 } 461 462 static void denali_setup_dma64(struct denali_nand_info *denali, 463 dma_addr_t dma_addr, int page, int write) 464 { 465 uint32_t mode; 466 const int page_count = 1; 467 468 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 469 470 /* DMA is a three step process */ 471 472 /* 473 * 1. setup transfer type, interrupt when complete, 474 * burst len = 64 bytes, the number of pages 475 */ 476 denali->host_write(denali, mode, 477 0x01002000 | (64 << 16) | (write << 8) | page_count); 478 479 /* 2. set memory low address */ 480 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 481 482 /* 3. set memory high address */ 483 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 484 } 485 486 static void denali_setup_dma32(struct denali_nand_info *denali, 487 dma_addr_t dma_addr, int page, int write) 488 { 489 uint32_t mode; 490 const int page_count = 1; 491 492 mode = DENALI_MAP10 | DENALI_BANK(denali); 493 494 /* DMA is a four step process */ 495 496 /* 1. setup transfer type and # of pages */ 497 denali->host_write(denali, mode | page, 498 0x2000 | (write << 8) | page_count); 499 500 /* 2. set memory high address bits 23:8 */ 501 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 502 503 /* 3. set memory low address bits 23:8 */ 504 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 505 506 /* 4. interrupt when complete, burst len = 64 bytes */ 507 denali->host_write(denali, mode | 0x14000, 0x2400); 508 } 509 510 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 511 size_t size, int page, int raw) 512 { 513 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 514 uint32_t *buf32 = (uint32_t *)buf; 515 uint32_t irq_status, ecc_err_mask; 516 int i; 517 518 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 519 ecc_err_mask = INTR__ECC_UNCOR_ERR; 520 else 521 ecc_err_mask = INTR__ECC_ERR; 522 523 denali_reset_irq(denali); 524 525 for (i = 0; i < size / 4; i++) 526 *buf32++ = denali->host_read(denali, addr); 527 528 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 529 if (!(irq_status & INTR__PAGE_XFER_INC)) 530 return -EIO; 531 532 if (irq_status & INTR__ERASED_PAGE) 533 memset(buf, 0xff, size); 534 535 return irq_status & ecc_err_mask ? -EBADMSG : 0; 536 } 537 538 static int denali_pio_write(struct denali_nand_info *denali, 539 const void *buf, size_t size, int page, int raw) 540 { 541 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 542 const uint32_t *buf32 = (uint32_t *)buf; 543 uint32_t irq_status; 544 int i; 545 546 denali_reset_irq(denali); 547 548 for (i = 0; i < size / 4; i++) 549 denali->host_write(denali, addr, *buf32++); 550 551 irq_status = denali_wait_for_irq(denali, 552 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 553 if (!(irq_status & INTR__PROGRAM_COMP)) 554 return -EIO; 555 556 return 0; 557 } 558 559 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 560 size_t size, int page, int raw, int write) 561 { 562 if (write) 563 return denali_pio_write(denali, buf, size, page, raw); 564 else 565 return denali_pio_read(denali, buf, size, page, raw); 566 } 567 568 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 569 size_t size, int page, int raw, int write) 570 { 571 dma_addr_t dma_addr; 572 uint32_t irq_mask, irq_status, ecc_err_mask; 573 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 574 int ret = 0; 575 576 dma_addr = dma_map_single(denali->dev, buf, size, dir); 577 if (dma_mapping_error(denali->dev, dma_addr)) { 578 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 579 return denali_pio_xfer(denali, buf, size, page, raw, write); 580 } 581 582 if (write) { 583 /* 584 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 585 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 586 * when the page program is completed. 587 */ 588 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 589 ecc_err_mask = 0; 590 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 591 irq_mask = INTR__DMA_CMD_COMP; 592 ecc_err_mask = INTR__ECC_UNCOR_ERR; 593 } else { 594 irq_mask = INTR__DMA_CMD_COMP; 595 ecc_err_mask = INTR__ECC_ERR; 596 } 597 598 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 599 600 denali_reset_irq(denali); 601 denali->setup_dma(denali, dma_addr, page, write); 602 603 irq_status = denali_wait_for_irq(denali, irq_mask); 604 if (!(irq_status & INTR__DMA_CMD_COMP)) 605 ret = -EIO; 606 else if (irq_status & ecc_err_mask) 607 ret = -EBADMSG; 608 609 iowrite32(0, denali->reg + DMA_ENABLE); 610 611 dma_unmap_single(denali->dev, dma_addr, size, dir); 612 613 if (irq_status & INTR__ERASED_PAGE) 614 memset(buf, 0xff, size); 615 616 return ret; 617 } 618 619 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 620 size_t size, int page, int raw, int write) 621 { 622 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 623 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 624 denali->reg + TRANSFER_SPARE_REG); 625 626 if (denali->dma_avail) 627 return denali_dma_xfer(denali, buf, size, page, raw, write); 628 else 629 return denali_pio_xfer(denali, buf, size, page, raw, write); 630 } 631 632 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 633 int page, int write) 634 { 635 struct denali_nand_info *denali = mtd_to_denali(mtd); 636 int writesize = mtd->writesize; 637 int oobsize = mtd->oobsize; 638 uint8_t *bufpoi = chip->oob_poi; 639 int ecc_steps = chip->ecc.steps; 640 int ecc_size = chip->ecc.size; 641 int ecc_bytes = chip->ecc.bytes; 642 int oob_skip = denali->oob_skip_bytes; 643 size_t size = writesize + oobsize; 644 int i, pos, len; 645 646 /* BBM at the beginning of the OOB area */ 647 if (write) 648 nand_prog_page_begin_op(chip, page, writesize, bufpoi, 649 oob_skip); 650 else 651 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip); 652 bufpoi += oob_skip; 653 654 /* OOB ECC */ 655 for (i = 0; i < ecc_steps; i++) { 656 pos = ecc_size + i * (ecc_size + ecc_bytes); 657 len = ecc_bytes; 658 659 if (pos >= writesize) 660 pos += oob_skip; 661 else if (pos + len > writesize) 662 len = writesize - pos; 663 664 if (write) 665 nand_change_write_column_op(chip, pos, bufpoi, len, 666 false); 667 else 668 nand_change_read_column_op(chip, pos, bufpoi, len, 669 false); 670 bufpoi += len; 671 if (len < ecc_bytes) { 672 len = ecc_bytes - len; 673 if (write) 674 nand_change_write_column_op(chip, writesize + 675 oob_skip, bufpoi, 676 len, false); 677 else 678 nand_change_read_column_op(chip, writesize + 679 oob_skip, bufpoi, 680 len, false); 681 bufpoi += len; 682 } 683 } 684 685 /* OOB free */ 686 len = oobsize - (bufpoi - chip->oob_poi); 687 if (write) 688 nand_change_write_column_op(chip, size - len, bufpoi, len, 689 false); 690 else 691 nand_change_read_column_op(chip, size - len, bufpoi, len, 692 false); 693 } 694 695 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 696 uint8_t *buf, int oob_required, int page) 697 { 698 struct denali_nand_info *denali = mtd_to_denali(mtd); 699 int writesize = mtd->writesize; 700 int oobsize = mtd->oobsize; 701 int ecc_steps = chip->ecc.steps; 702 int ecc_size = chip->ecc.size; 703 int ecc_bytes = chip->ecc.bytes; 704 void *tmp_buf = denali->buf; 705 int oob_skip = denali->oob_skip_bytes; 706 size_t size = writesize + oobsize; 707 int ret, i, pos, len; 708 709 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 710 if (ret) 711 return ret; 712 713 /* Arrange the buffer for syndrome payload/ecc layout */ 714 if (buf) { 715 for (i = 0; i < ecc_steps; i++) { 716 pos = i * (ecc_size + ecc_bytes); 717 len = ecc_size; 718 719 if (pos >= writesize) 720 pos += oob_skip; 721 else if (pos + len > writesize) 722 len = writesize - pos; 723 724 memcpy(buf, tmp_buf + pos, len); 725 buf += len; 726 if (len < ecc_size) { 727 len = ecc_size - len; 728 memcpy(buf, tmp_buf + writesize + oob_skip, 729 len); 730 buf += len; 731 } 732 } 733 } 734 735 if (oob_required) { 736 uint8_t *oob = chip->oob_poi; 737 738 /* BBM at the beginning of the OOB area */ 739 memcpy(oob, tmp_buf + writesize, oob_skip); 740 oob += oob_skip; 741 742 /* OOB ECC */ 743 for (i = 0; i < ecc_steps; i++) { 744 pos = ecc_size + i * (ecc_size + ecc_bytes); 745 len = ecc_bytes; 746 747 if (pos >= writesize) 748 pos += oob_skip; 749 else if (pos + len > writesize) 750 len = writesize - pos; 751 752 memcpy(oob, tmp_buf + pos, len); 753 oob += len; 754 if (len < ecc_bytes) { 755 len = ecc_bytes - len; 756 memcpy(oob, tmp_buf + writesize + oob_skip, 757 len); 758 oob += len; 759 } 760 } 761 762 /* OOB free */ 763 len = oobsize - (oob - chip->oob_poi); 764 memcpy(oob, tmp_buf + size - len, len); 765 } 766 767 return 0; 768 } 769 770 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 771 int page) 772 { 773 denali_oob_xfer(mtd, chip, page, 0); 774 775 return 0; 776 } 777 778 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 779 int page) 780 { 781 struct denali_nand_info *denali = mtd_to_denali(mtd); 782 783 denali_reset_irq(denali); 784 785 denali_oob_xfer(mtd, chip, page, 1); 786 787 return nand_prog_page_end_op(chip); 788 } 789 790 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 791 uint8_t *buf, int oob_required, int page) 792 { 793 struct denali_nand_info *denali = mtd_to_denali(mtd); 794 unsigned long uncor_ecc_flags = 0; 795 int stat = 0; 796 int ret; 797 798 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 799 if (ret && ret != -EBADMSG) 800 return ret; 801 802 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 803 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 804 else if (ret == -EBADMSG) 805 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 806 807 if (stat < 0) 808 return stat; 809 810 if (uncor_ecc_flags) { 811 ret = denali_read_oob(mtd, chip, page); 812 if (ret) 813 return ret; 814 815 stat = denali_check_erased_page(mtd, chip, buf, 816 uncor_ecc_flags, stat); 817 } 818 819 return stat; 820 } 821 822 static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 823 const uint8_t *buf, int oob_required, int page) 824 { 825 struct denali_nand_info *denali = mtd_to_denali(mtd); 826 int writesize = mtd->writesize; 827 int oobsize = mtd->oobsize; 828 int ecc_steps = chip->ecc.steps; 829 int ecc_size = chip->ecc.size; 830 int ecc_bytes = chip->ecc.bytes; 831 void *tmp_buf = denali->buf; 832 int oob_skip = denali->oob_skip_bytes; 833 size_t size = writesize + oobsize; 834 int i, pos, len; 835 836 /* 837 * Fill the buffer with 0xff first except the full page transfer. 838 * This simplifies the logic. 839 */ 840 if (!buf || !oob_required) 841 memset(tmp_buf, 0xff, size); 842 843 /* Arrange the buffer for syndrome payload/ecc layout */ 844 if (buf) { 845 for (i = 0; i < ecc_steps; i++) { 846 pos = i * (ecc_size + ecc_bytes); 847 len = ecc_size; 848 849 if (pos >= writesize) 850 pos += oob_skip; 851 else if (pos + len > writesize) 852 len = writesize - pos; 853 854 memcpy(tmp_buf + pos, buf, len); 855 buf += len; 856 if (len < ecc_size) { 857 len = ecc_size - len; 858 memcpy(tmp_buf + writesize + oob_skip, buf, 859 len); 860 buf += len; 861 } 862 } 863 } 864 865 if (oob_required) { 866 const uint8_t *oob = chip->oob_poi; 867 868 /* BBM at the beginning of the OOB area */ 869 memcpy(tmp_buf + writesize, oob, oob_skip); 870 oob += oob_skip; 871 872 /* OOB ECC */ 873 for (i = 0; i < ecc_steps; i++) { 874 pos = ecc_size + i * (ecc_size + ecc_bytes); 875 len = ecc_bytes; 876 877 if (pos >= writesize) 878 pos += oob_skip; 879 else if (pos + len > writesize) 880 len = writesize - pos; 881 882 memcpy(tmp_buf + pos, oob, len); 883 oob += len; 884 if (len < ecc_bytes) { 885 len = ecc_bytes - len; 886 memcpy(tmp_buf + writesize + oob_skip, oob, 887 len); 888 oob += len; 889 } 890 } 891 892 /* OOB free */ 893 len = oobsize - (oob - chip->oob_poi); 894 memcpy(tmp_buf + size - len, oob, len); 895 } 896 897 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 898 } 899 900 static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 901 const uint8_t *buf, int oob_required, int page) 902 { 903 struct denali_nand_info *denali = mtd_to_denali(mtd); 904 905 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 906 page, 0, 1); 907 } 908 909 static void denali_select_chip(struct mtd_info *mtd, int chip) 910 { 911 struct denali_nand_info *denali = mtd_to_denali(mtd); 912 913 denali->active_bank = chip; 914 } 915 916 static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 917 { 918 struct denali_nand_info *denali = mtd_to_denali(mtd); 919 uint32_t irq_status; 920 921 /* R/B# pin transitioned from low to high? */ 922 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 923 924 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 925 } 926 927 static int denali_erase(struct mtd_info *mtd, int page) 928 { 929 struct denali_nand_info *denali = mtd_to_denali(mtd); 930 uint32_t irq_status; 931 932 denali_reset_irq(denali); 933 934 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 935 DENALI_ERASE); 936 937 /* wait for erase to complete or failure to occur */ 938 irq_status = denali_wait_for_irq(denali, 939 INTR__ERASE_COMP | INTR__ERASE_FAIL); 940 941 return irq_status & INTR__ERASE_COMP ? 0 : -EIO; 942 } 943 944 static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, 945 const struct nand_data_interface *conf) 946 { 947 struct denali_nand_info *denali = mtd_to_denali(mtd); 948 const struct nand_sdr_timings *timings; 949 unsigned long t_x, mult_x; 950 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 951 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 952 int addr_2_data_mask; 953 uint32_t tmp; 954 955 timings = nand_get_sdr_timings(conf); 956 if (IS_ERR(timings)) 957 return PTR_ERR(timings); 958 959 /* clk_x period in picoseconds */ 960 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 961 if (!t_x) 962 return -EINVAL; 963 964 /* 965 * The bus interface clock, clk_x, is phase aligned with the core clock. 966 * The clk_x is an integral multiple N of the core clk. The value N is 967 * configured at IP delivery time, and its available value is 4, 5, 6. 968 */ 969 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 970 if (mult_x < 4 || mult_x > 6) 971 return -EINVAL; 972 973 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 974 return 0; 975 976 /* tREA -> ACC_CLKS */ 977 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); 978 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 979 980 tmp = ioread32(denali->reg + ACC_CLKS); 981 tmp &= ~ACC_CLKS__VALUE; 982 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 983 iowrite32(tmp, denali->reg + ACC_CLKS); 984 985 /* tRWH -> RE_2_WE */ 986 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 987 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 988 989 tmp = ioread32(denali->reg + RE_2_WE); 990 tmp &= ~RE_2_WE__VALUE; 991 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 992 iowrite32(tmp, denali->reg + RE_2_WE); 993 994 /* tRHZ -> RE_2_RE */ 995 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 996 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 997 998 tmp = ioread32(denali->reg + RE_2_RE); 999 tmp &= ~RE_2_RE__VALUE; 1000 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1001 iowrite32(tmp, denali->reg + RE_2_RE); 1002 1003 /* 1004 * tCCS, tWHR -> WE_2_RE 1005 * 1006 * With WE_2_RE properly set, the Denali controller automatically takes 1007 * care of the delay; the driver need not set NAND_WAIT_TCCS. 1008 */ 1009 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 1010 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1011 1012 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1013 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1014 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1015 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1016 1017 /* tADL -> ADDR_2_DATA */ 1018 1019 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 1020 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1021 if (denali->revision < 0x0501) 1022 addr_2_data_mask >>= 1; 1023 1024 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 1025 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1026 1027 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1028 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1029 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1030 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1031 1032 /* tREH, tWH -> RDWR_EN_HI_CNT */ 1033 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 1034 t_x); 1035 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 1036 1037 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1038 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1039 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1040 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1041 1042 /* tRP, tWP -> RDWR_EN_LO_CNT */ 1043 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 1044 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1045 t_x); 1046 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); 1047 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1048 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1049 1050 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1051 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1052 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1053 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1054 1055 /* tCS, tCEA -> CS_SETUP_CNT */ 1056 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 1057 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 1058 0); 1059 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1060 1061 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1062 tmp &= ~CS_SETUP_CNT__VALUE; 1063 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1064 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1065 1066 return 0; 1067 } 1068 1069 static void denali_reset_banks(struct denali_nand_info *denali) 1070 { 1071 u32 irq_status; 1072 int i; 1073 1074 for (i = 0; i < denali->max_banks; i++) { 1075 denali->active_bank = i; 1076 1077 denali_reset_irq(denali); 1078 1079 iowrite32(DEVICE_RESET__BANK(i), 1080 denali->reg + DEVICE_RESET); 1081 1082 irq_status = denali_wait_for_irq(denali, 1083 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); 1084 if (!(irq_status & INTR__INT_ACT)) 1085 break; 1086 } 1087 1088 dev_dbg(denali->dev, "%d chips connected\n", i); 1089 denali->max_banks = i; 1090 } 1091 1092 static void denali_hw_init(struct denali_nand_info *denali) 1093 { 1094 /* 1095 * The REVISION register may not be reliable. Platforms are allowed to 1096 * override it. 1097 */ 1098 if (!denali->revision) 1099 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1100 1101 /* 1102 * tell driver how many bit controller will skip before 1103 * writing ECC code in OOB, this register may be already 1104 * set by firmware. So we read this value out. 1105 * if this value is 0, just let it be. 1106 */ 1107 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1108 denali_detect_max_banks(denali); 1109 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1110 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1111 1112 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1113 } 1114 1115 int denali_calc_ecc_bytes(int step_size, int strength) 1116 { 1117 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1118 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1119 } 1120 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1121 1122 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1123 struct mtd_oob_region *oobregion) 1124 { 1125 struct denali_nand_info *denali = mtd_to_denali(mtd); 1126 struct nand_chip *chip = mtd_to_nand(mtd); 1127 1128 if (section) 1129 return -ERANGE; 1130 1131 oobregion->offset = denali->oob_skip_bytes; 1132 oobregion->length = chip->ecc.total; 1133 1134 return 0; 1135 } 1136 1137 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1138 struct mtd_oob_region *oobregion) 1139 { 1140 struct denali_nand_info *denali = mtd_to_denali(mtd); 1141 struct nand_chip *chip = mtd_to_nand(mtd); 1142 1143 if (section) 1144 return -ERANGE; 1145 1146 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1147 oobregion->length = mtd->oobsize - oobregion->offset; 1148 1149 return 0; 1150 } 1151 1152 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1153 .ecc = denali_ooblayout_ecc, 1154 .free = denali_ooblayout_free, 1155 }; 1156 1157 static int denali_multidev_fixup(struct denali_nand_info *denali) 1158 { 1159 struct nand_chip *chip = &denali->nand; 1160 struct mtd_info *mtd = nand_to_mtd(chip); 1161 1162 /* 1163 * Support for multi device: 1164 * When the IP configuration is x16 capable and two x8 chips are 1165 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1166 * In this case, the core framework knows nothing about this fact, 1167 * so we should tell it the _logical_ pagesize and anything necessary. 1168 */ 1169 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1170 1171 /* 1172 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1173 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1174 */ 1175 if (denali->devs_per_cs == 0) { 1176 denali->devs_per_cs = 1; 1177 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1178 } 1179 1180 if (denali->devs_per_cs == 1) 1181 return 0; 1182 1183 if (denali->devs_per_cs != 2) { 1184 dev_err(denali->dev, "unsupported number of devices %d\n", 1185 denali->devs_per_cs); 1186 return -EINVAL; 1187 } 1188 1189 /* 2 chips in parallel */ 1190 mtd->size <<= 1; 1191 mtd->erasesize <<= 1; 1192 mtd->writesize <<= 1; 1193 mtd->oobsize <<= 1; 1194 chip->chipsize <<= 1; 1195 chip->page_shift += 1; 1196 chip->phys_erase_shift += 1; 1197 chip->bbt_erase_shift += 1; 1198 chip->chip_shift += 1; 1199 chip->pagemask <<= 1; 1200 chip->ecc.size <<= 1; 1201 chip->ecc.bytes <<= 1; 1202 chip->ecc.strength <<= 1; 1203 denali->oob_skip_bytes <<= 1; 1204 1205 return 0; 1206 } 1207 1208 static int denali_attach_chip(struct nand_chip *chip) 1209 { 1210 struct mtd_info *mtd = nand_to_mtd(chip); 1211 struct denali_nand_info *denali = mtd_to_denali(mtd); 1212 int ret; 1213 1214 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1215 denali->dma_avail = 1; 1216 1217 if (denali->dma_avail) { 1218 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1219 1220 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1221 if (ret) { 1222 dev_info(denali->dev, 1223 "Failed to set DMA mask. Disabling DMA.\n"); 1224 denali->dma_avail = 0; 1225 } 1226 } 1227 1228 if (denali->dma_avail) { 1229 chip->options |= NAND_USE_BOUNCE_BUFFER; 1230 chip->buf_align = 16; 1231 if (denali->caps & DENALI_CAP_DMA_64BIT) 1232 denali->setup_dma = denali_setup_dma64; 1233 else 1234 denali->setup_dma = denali_setup_dma32; 1235 } 1236 1237 chip->bbt_options |= NAND_BBT_USE_FLASH; 1238 chip->bbt_options |= NAND_BBT_NO_OOB; 1239 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1240 chip->options |= NAND_NO_SUBPAGE_WRITE; 1241 1242 ret = nand_ecc_choose_conf(chip, denali->ecc_caps, 1243 mtd->oobsize - denali->oob_skip_bytes); 1244 if (ret) { 1245 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1246 return ret; 1247 } 1248 1249 dev_dbg(denali->dev, 1250 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1251 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1252 1253 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1254 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1255 denali->reg + ECC_CORRECTION); 1256 iowrite32(mtd->erasesize / mtd->writesize, 1257 denali->reg + PAGES_PER_BLOCK); 1258 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1259 denali->reg + DEVICE_WIDTH); 1260 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1261 denali->reg + TWO_ROW_ADDR_CYCLES); 1262 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1263 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1264 1265 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1266 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1267 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1268 iowrite32(mtd->writesize / chip->ecc.size, 1269 denali->reg + CFG_NUM_DATA_BLOCKS); 1270 1271 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1272 1273 if (chip->options & NAND_BUSWIDTH_16) { 1274 chip->read_buf = denali_read_buf16; 1275 chip->write_buf = denali_write_buf16; 1276 } else { 1277 chip->read_buf = denali_read_buf; 1278 chip->write_buf = denali_write_buf; 1279 } 1280 chip->ecc.read_page = denali_read_page; 1281 chip->ecc.read_page_raw = denali_read_page_raw; 1282 chip->ecc.write_page = denali_write_page; 1283 chip->ecc.write_page_raw = denali_write_page_raw; 1284 chip->ecc.read_oob = denali_read_oob; 1285 chip->ecc.write_oob = denali_write_oob; 1286 chip->erase = denali_erase; 1287 1288 ret = denali_multidev_fixup(denali); 1289 if (ret) 1290 return ret; 1291 1292 /* 1293 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1294 * use devm_kmalloc() because the memory allocated by devm_ does not 1295 * guarantee DMA-safe alignment. 1296 */ 1297 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1298 if (!denali->buf) 1299 return -ENOMEM; 1300 1301 return 0; 1302 } 1303 1304 static void denali_detach_chip(struct nand_chip *chip) 1305 { 1306 struct mtd_info *mtd = nand_to_mtd(chip); 1307 struct denali_nand_info *denali = mtd_to_denali(mtd); 1308 1309 kfree(denali->buf); 1310 } 1311 1312 static const struct nand_controller_ops denali_controller_ops = { 1313 .attach_chip = denali_attach_chip, 1314 .detach_chip = denali_detach_chip, 1315 }; 1316 1317 int denali_init(struct denali_nand_info *denali) 1318 { 1319 struct nand_chip *chip = &denali->nand; 1320 struct mtd_info *mtd = nand_to_mtd(chip); 1321 u32 features = ioread32(denali->reg + FEATURES); 1322 int ret; 1323 1324 mtd->dev.parent = denali->dev; 1325 denali_hw_init(denali); 1326 1327 init_completion(&denali->complete); 1328 spin_lock_init(&denali->irq_lock); 1329 1330 denali_clear_irq_all(denali); 1331 1332 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1333 IRQF_SHARED, DENALI_NAND_NAME, denali); 1334 if (ret) { 1335 dev_err(denali->dev, "Unable to request IRQ\n"); 1336 return ret; 1337 } 1338 1339 denali_enable_irq(denali); 1340 denali_reset_banks(denali); 1341 1342 denali->active_bank = DENALI_INVALID_BANK; 1343 1344 nand_set_flash_node(chip, denali->dev->of_node); 1345 /* Fallback to the default name if DT did not give "label" property */ 1346 if (!mtd->name) 1347 mtd->name = "denali-nand"; 1348 1349 chip->select_chip = denali_select_chip; 1350 chip->read_byte = denali_read_byte; 1351 chip->write_byte = denali_write_byte; 1352 chip->read_word = denali_read_word; 1353 chip->cmd_ctrl = denali_cmd_ctrl; 1354 chip->dev_ready = denali_dev_ready; 1355 chip->waitfunc = denali_waitfunc; 1356 1357 if (features & FEATURES__INDEX_ADDR) { 1358 denali->host_read = denali_indexed_read; 1359 denali->host_write = denali_indexed_write; 1360 } else { 1361 denali->host_read = denali_direct_read; 1362 denali->host_write = denali_direct_write; 1363 } 1364 1365 /* clk rate info is needed for setup_data_interface */ 1366 if (denali->clk_rate && denali->clk_x_rate) 1367 chip->setup_data_interface = denali_setup_data_interface; 1368 1369 chip->dummy_controller.ops = &denali_controller_ops; 1370 ret = nand_scan(mtd, denali->max_banks); 1371 if (ret) 1372 goto disable_irq; 1373 1374 ret = mtd_device_register(mtd, NULL, 0); 1375 if (ret) { 1376 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1377 goto cleanup_nand; 1378 } 1379 1380 return 0; 1381 1382 cleanup_nand: 1383 nand_cleanup(chip); 1384 disable_irq: 1385 denali_disable_irq(denali); 1386 1387 return ret; 1388 } 1389 EXPORT_SYMBOL(denali_init); 1390 1391 void denali_remove(struct denali_nand_info *denali) 1392 { 1393 struct mtd_info *mtd = nand_to_mtd(&denali->nand); 1394 1395 nand_release(mtd); 1396 denali_disable_irq(denali); 1397 } 1398 EXPORT_SYMBOL(denali_remove); 1399