1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NAND Flash Controller Device Driver 4 * Copyright © 2009-2010, Intel Corporation and its suppliers. 5 * 6 * Copyright (c) 2017 Socionext Inc. 7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/completion.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 21 #include "denali.h" 22 23 #define DENALI_NAND_NAME "denali-nand" 24 #define DENALI_DEFAULT_OOB_SKIP_BYTES 8 25 26 /* for Indexed Addressing */ 27 #define DENALI_INDEXED_CTRL 0x00 28 #define DENALI_INDEXED_DATA 0x10 29 30 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 31 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 32 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 33 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 34 35 /* MAP11 access cycle type */ 36 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 37 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 38 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 39 40 /* MAP10 commands */ 41 #define DENALI_ERASE 0x01 42 43 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 44 45 #define DENALI_INVALID_BANK -1 46 #define DENALI_NR_BANKS 4 47 48 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 49 { 50 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 51 } 52 53 /* 54 * Direct Addressing - the slave address forms the control information (command 55 * type, bank, block, and page address). The slave data is the actual data to 56 * be transferred. This mode requires 28 bits of address region allocated. 57 */ 58 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 59 { 60 return ioread32(denali->host + addr); 61 } 62 63 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 64 u32 data) 65 { 66 iowrite32(data, denali->host + addr); 67 } 68 69 /* 70 * Indexed Addressing - address translation module intervenes in passing the 71 * control information. This mode reduces the required address range. The 72 * control information and transferred data are latched by the registers in 73 * the translation module. 74 */ 75 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 76 { 77 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 78 return ioread32(denali->host + DENALI_INDEXED_DATA); 79 } 80 81 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 82 u32 data) 83 { 84 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 85 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 86 } 87 88 /* 89 * Use the configuration feature register to determine the maximum number of 90 * banks that the hardware supports. 91 */ 92 static void denali_detect_max_banks(struct denali_nand_info *denali) 93 { 94 uint32_t features = ioread32(denali->reg + FEATURES); 95 96 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 97 98 /* the encoding changed from rev 5.0 to 5.1 */ 99 if (denali->revision < 0x0501) 100 denali->max_banks <<= 1; 101 } 102 103 static void denali_enable_irq(struct denali_nand_info *denali) 104 { 105 int i; 106 107 for (i = 0; i < DENALI_NR_BANKS; i++) 108 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 109 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 110 } 111 112 static void denali_disable_irq(struct denali_nand_info *denali) 113 { 114 int i; 115 116 for (i = 0; i < DENALI_NR_BANKS; i++) 117 iowrite32(0, denali->reg + INTR_EN(i)); 118 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 119 } 120 121 static void denali_clear_irq(struct denali_nand_info *denali, 122 int bank, uint32_t irq_status) 123 { 124 /* write one to clear bits */ 125 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 126 } 127 128 static void denali_clear_irq_all(struct denali_nand_info *denali) 129 { 130 int i; 131 132 for (i = 0; i < DENALI_NR_BANKS; i++) 133 denali_clear_irq(denali, i, U32_MAX); 134 } 135 136 static irqreturn_t denali_isr(int irq, void *dev_id) 137 { 138 struct denali_nand_info *denali = dev_id; 139 irqreturn_t ret = IRQ_NONE; 140 uint32_t irq_status; 141 int i; 142 143 spin_lock(&denali->irq_lock); 144 145 for (i = 0; i < DENALI_NR_BANKS; i++) { 146 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 147 if (irq_status) 148 ret = IRQ_HANDLED; 149 150 denali_clear_irq(denali, i, irq_status); 151 152 if (i != denali->active_bank) 153 continue; 154 155 denali->irq_status |= irq_status; 156 157 if (denali->irq_status & denali->irq_mask) 158 complete(&denali->complete); 159 } 160 161 spin_unlock(&denali->irq_lock); 162 163 return ret; 164 } 165 166 static void denali_reset_irq(struct denali_nand_info *denali) 167 { 168 unsigned long flags; 169 170 spin_lock_irqsave(&denali->irq_lock, flags); 171 denali->irq_status = 0; 172 denali->irq_mask = 0; 173 spin_unlock_irqrestore(&denali->irq_lock, flags); 174 } 175 176 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 177 uint32_t irq_mask) 178 { 179 unsigned long time_left, flags; 180 uint32_t irq_status; 181 182 spin_lock_irqsave(&denali->irq_lock, flags); 183 184 irq_status = denali->irq_status; 185 186 if (irq_mask & irq_status) { 187 /* return immediately if the IRQ has already happened. */ 188 spin_unlock_irqrestore(&denali->irq_lock, flags); 189 return irq_status; 190 } 191 192 denali->irq_mask = irq_mask; 193 reinit_completion(&denali->complete); 194 spin_unlock_irqrestore(&denali->irq_lock, flags); 195 196 time_left = wait_for_completion_timeout(&denali->complete, 197 msecs_to_jiffies(1000)); 198 if (!time_left) { 199 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 200 irq_mask); 201 return 0; 202 } 203 204 return denali->irq_status; 205 } 206 207 static uint32_t denali_check_irq(struct denali_nand_info *denali) 208 { 209 unsigned long flags; 210 uint32_t irq_status; 211 212 spin_lock_irqsave(&denali->irq_lock, flags); 213 irq_status = denali->irq_status; 214 spin_unlock_irqrestore(&denali->irq_lock, flags); 215 216 return irq_status; 217 } 218 219 static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 220 { 221 struct mtd_info *mtd = nand_to_mtd(chip); 222 struct denali_nand_info *denali = mtd_to_denali(mtd); 223 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 224 int i; 225 226 for (i = 0; i < len; i++) 227 buf[i] = denali->host_read(denali, addr); 228 } 229 230 static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf, 231 int len) 232 { 233 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 234 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 235 int i; 236 237 for (i = 0; i < len; i++) 238 denali->host_write(denali, addr, buf[i]); 239 } 240 241 static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len) 242 { 243 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 244 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 245 uint16_t *buf16 = (uint16_t *)buf; 246 int i; 247 248 for (i = 0; i < len / 2; i++) 249 buf16[i] = denali->host_read(denali, addr); 250 } 251 252 static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf, 253 int len) 254 { 255 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 256 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 257 const uint16_t *buf16 = (const uint16_t *)buf; 258 int i; 259 260 for (i = 0; i < len / 2; i++) 261 denali->host_write(denali, addr, buf16[i]); 262 } 263 264 static uint8_t denali_read_byte(struct nand_chip *chip) 265 { 266 uint8_t byte; 267 268 denali_read_buf(chip, &byte, 1); 269 270 return byte; 271 } 272 273 static void denali_write_byte(struct nand_chip *chip, uint8_t byte) 274 { 275 denali_write_buf(chip, &byte, 1); 276 } 277 278 static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl) 279 { 280 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 281 uint32_t type; 282 283 if (ctrl & NAND_CLE) 284 type = DENALI_MAP11_CMD; 285 else if (ctrl & NAND_ALE) 286 type = DENALI_MAP11_ADDR; 287 else 288 return; 289 290 /* 291 * Some commands are followed by chip->legacy.dev_ready or 292 * chip->legacy.waitfunc. 293 * irq_status must be cleared here to catch the R/B# interrupt later. 294 */ 295 if (ctrl & NAND_CTRL_CHANGE) 296 denali_reset_irq(denali); 297 298 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 299 } 300 301 static int denali_dev_ready(struct nand_chip *chip) 302 { 303 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 304 305 return !!(denali_check_irq(denali) & INTR__INT_ACT); 306 } 307 308 static int denali_check_erased_page(struct mtd_info *mtd, 309 struct nand_chip *chip, uint8_t *buf, 310 unsigned long uncor_ecc_flags, 311 unsigned int max_bitflips) 312 { 313 struct denali_nand_info *denali = mtd_to_denali(mtd); 314 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 315 int ecc_steps = chip->ecc.steps; 316 int ecc_size = chip->ecc.size; 317 int ecc_bytes = chip->ecc.bytes; 318 int i, stat; 319 320 for (i = 0; i < ecc_steps; i++) { 321 if (!(uncor_ecc_flags & BIT(i))) 322 continue; 323 324 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 325 ecc_code, ecc_bytes, 326 NULL, 0, 327 chip->ecc.strength); 328 if (stat < 0) { 329 mtd->ecc_stats.failed++; 330 } else { 331 mtd->ecc_stats.corrected += stat; 332 max_bitflips = max_t(unsigned int, max_bitflips, stat); 333 } 334 335 buf += ecc_size; 336 ecc_code += ecc_bytes; 337 } 338 339 return max_bitflips; 340 } 341 342 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 343 struct denali_nand_info *denali, 344 unsigned long *uncor_ecc_flags) 345 { 346 struct nand_chip *chip = mtd_to_nand(mtd); 347 int bank = denali->active_bank; 348 uint32_t ecc_cor; 349 unsigned int max_bitflips; 350 351 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 352 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 353 354 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 355 /* 356 * This flag is set when uncorrectable error occurs at least in 357 * one ECC sector. We can not know "how many sectors", or 358 * "which sector(s)". We need erase-page check for all sectors. 359 */ 360 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 361 return 0; 362 } 363 364 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 365 366 /* 367 * The register holds the maximum of per-sector corrected bitflips. 368 * This is suitable for the return value of the ->read_page() callback. 369 * Unfortunately, we can not know the total number of corrected bits in 370 * the page. Increase the stats by max_bitflips. (compromised solution) 371 */ 372 mtd->ecc_stats.corrected += max_bitflips; 373 374 return max_bitflips; 375 } 376 377 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 378 struct denali_nand_info *denali, 379 unsigned long *uncor_ecc_flags, uint8_t *buf) 380 { 381 unsigned int ecc_size = denali->nand.ecc.size; 382 unsigned int bitflips = 0; 383 unsigned int max_bitflips = 0; 384 uint32_t err_addr, err_cor_info; 385 unsigned int err_byte, err_sector, err_device; 386 uint8_t err_cor_value; 387 unsigned int prev_sector = 0; 388 uint32_t irq_status; 389 390 denali_reset_irq(denali); 391 392 do { 393 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 394 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 395 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 396 397 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 398 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 399 err_cor_info); 400 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 401 err_cor_info); 402 403 /* reset the bitflip counter when crossing ECC sector */ 404 if (err_sector != prev_sector) 405 bitflips = 0; 406 407 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 408 /* 409 * Check later if this is a real ECC error, or 410 * an erased sector. 411 */ 412 *uncor_ecc_flags |= BIT(err_sector); 413 } else if (err_byte < ecc_size) { 414 /* 415 * If err_byte is larger than ecc_size, means error 416 * happened in OOB, so we ignore it. It's no need for 417 * us to correct it err_device is represented the NAND 418 * error bits are happened in if there are more than 419 * one NAND connected. 420 */ 421 int offset; 422 unsigned int flips_in_byte; 423 424 offset = (err_sector * ecc_size + err_byte) * 425 denali->devs_per_cs + err_device; 426 427 /* correct the ECC error */ 428 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 429 buf[offset] ^= err_cor_value; 430 mtd->ecc_stats.corrected += flips_in_byte; 431 bitflips += flips_in_byte; 432 433 max_bitflips = max(max_bitflips, bitflips); 434 } 435 436 prev_sector = err_sector; 437 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 438 439 /* 440 * Once handle all ECC errors, controller will trigger an 441 * ECC_TRANSACTION_DONE interrupt. 442 */ 443 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 444 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 445 return -EIO; 446 447 return max_bitflips; 448 } 449 450 static void denali_setup_dma64(struct denali_nand_info *denali, 451 dma_addr_t dma_addr, int page, int write) 452 { 453 uint32_t mode; 454 const int page_count = 1; 455 456 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 457 458 /* DMA is a three step process */ 459 460 /* 461 * 1. setup transfer type, interrupt when complete, 462 * burst len = 64 bytes, the number of pages 463 */ 464 denali->host_write(denali, mode, 465 0x01002000 | (64 << 16) | (write << 8) | page_count); 466 467 /* 2. set memory low address */ 468 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 469 470 /* 3. set memory high address */ 471 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 472 } 473 474 static void denali_setup_dma32(struct denali_nand_info *denali, 475 dma_addr_t dma_addr, int page, int write) 476 { 477 uint32_t mode; 478 const int page_count = 1; 479 480 mode = DENALI_MAP10 | DENALI_BANK(denali); 481 482 /* DMA is a four step process */ 483 484 /* 1. setup transfer type and # of pages */ 485 denali->host_write(denali, mode | page, 486 0x2000 | (write << 8) | page_count); 487 488 /* 2. set memory high address bits 23:8 */ 489 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 490 491 /* 3. set memory low address bits 23:8 */ 492 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 493 494 /* 4. interrupt when complete, burst len = 64 bytes */ 495 denali->host_write(denali, mode | 0x14000, 0x2400); 496 } 497 498 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 499 size_t size, int page, int raw) 500 { 501 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 502 uint32_t *buf32 = (uint32_t *)buf; 503 uint32_t irq_status, ecc_err_mask; 504 int i; 505 506 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 507 ecc_err_mask = INTR__ECC_UNCOR_ERR; 508 else 509 ecc_err_mask = INTR__ECC_ERR; 510 511 denali_reset_irq(denali); 512 513 for (i = 0; i < size / 4; i++) 514 *buf32++ = denali->host_read(denali, addr); 515 516 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 517 if (!(irq_status & INTR__PAGE_XFER_INC)) 518 return -EIO; 519 520 if (irq_status & INTR__ERASED_PAGE) 521 memset(buf, 0xff, size); 522 523 return irq_status & ecc_err_mask ? -EBADMSG : 0; 524 } 525 526 static int denali_pio_write(struct denali_nand_info *denali, 527 const void *buf, size_t size, int page, int raw) 528 { 529 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 530 const uint32_t *buf32 = (uint32_t *)buf; 531 uint32_t irq_status; 532 int i; 533 534 denali_reset_irq(denali); 535 536 for (i = 0; i < size / 4; i++) 537 denali->host_write(denali, addr, *buf32++); 538 539 irq_status = denali_wait_for_irq(denali, 540 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 541 if (!(irq_status & INTR__PROGRAM_COMP)) 542 return -EIO; 543 544 return 0; 545 } 546 547 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 548 size_t size, int page, int raw, int write) 549 { 550 if (write) 551 return denali_pio_write(denali, buf, size, page, raw); 552 else 553 return denali_pio_read(denali, buf, size, page, raw); 554 } 555 556 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 557 size_t size, int page, int raw, int write) 558 { 559 dma_addr_t dma_addr; 560 uint32_t irq_mask, irq_status, ecc_err_mask; 561 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 562 int ret = 0; 563 564 dma_addr = dma_map_single(denali->dev, buf, size, dir); 565 if (dma_mapping_error(denali->dev, dma_addr)) { 566 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 567 return denali_pio_xfer(denali, buf, size, page, raw, write); 568 } 569 570 if (write) { 571 /* 572 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 573 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 574 * when the page program is completed. 575 */ 576 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 577 ecc_err_mask = 0; 578 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 579 irq_mask = INTR__DMA_CMD_COMP; 580 ecc_err_mask = INTR__ECC_UNCOR_ERR; 581 } else { 582 irq_mask = INTR__DMA_CMD_COMP; 583 ecc_err_mask = INTR__ECC_ERR; 584 } 585 586 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 587 /* 588 * The ->setup_dma() hook kicks DMA by using the data/command 589 * interface, which belongs to a different AXI port from the 590 * register interface. Read back the register to avoid a race. 591 */ 592 ioread32(denali->reg + DMA_ENABLE); 593 594 denali_reset_irq(denali); 595 denali->setup_dma(denali, dma_addr, page, write); 596 597 irq_status = denali_wait_for_irq(denali, irq_mask); 598 if (!(irq_status & INTR__DMA_CMD_COMP)) 599 ret = -EIO; 600 else if (irq_status & ecc_err_mask) 601 ret = -EBADMSG; 602 603 iowrite32(0, denali->reg + DMA_ENABLE); 604 605 dma_unmap_single(denali->dev, dma_addr, size, dir); 606 607 if (irq_status & INTR__ERASED_PAGE) 608 memset(buf, 0xff, size); 609 610 return ret; 611 } 612 613 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 614 size_t size, int page, int raw, int write) 615 { 616 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 617 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 618 denali->reg + TRANSFER_SPARE_REG); 619 620 if (denali->dma_avail) 621 return denali_dma_xfer(denali, buf, size, page, raw, write); 622 else 623 return denali_pio_xfer(denali, buf, size, page, raw, write); 624 } 625 626 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 627 int page, int write) 628 { 629 struct denali_nand_info *denali = mtd_to_denali(mtd); 630 int writesize = mtd->writesize; 631 int oobsize = mtd->oobsize; 632 uint8_t *bufpoi = chip->oob_poi; 633 int ecc_steps = chip->ecc.steps; 634 int ecc_size = chip->ecc.size; 635 int ecc_bytes = chip->ecc.bytes; 636 int oob_skip = denali->oob_skip_bytes; 637 size_t size = writesize + oobsize; 638 int i, pos, len; 639 640 /* BBM at the beginning of the OOB area */ 641 if (write) 642 nand_prog_page_begin_op(chip, page, writesize, bufpoi, 643 oob_skip); 644 else 645 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip); 646 bufpoi += oob_skip; 647 648 /* OOB ECC */ 649 for (i = 0; i < ecc_steps; i++) { 650 pos = ecc_size + i * (ecc_size + ecc_bytes); 651 len = ecc_bytes; 652 653 if (pos >= writesize) 654 pos += oob_skip; 655 else if (pos + len > writesize) 656 len = writesize - pos; 657 658 if (write) 659 nand_change_write_column_op(chip, pos, bufpoi, len, 660 false); 661 else 662 nand_change_read_column_op(chip, pos, bufpoi, len, 663 false); 664 bufpoi += len; 665 if (len < ecc_bytes) { 666 len = ecc_bytes - len; 667 if (write) 668 nand_change_write_column_op(chip, writesize + 669 oob_skip, bufpoi, 670 len, false); 671 else 672 nand_change_read_column_op(chip, writesize + 673 oob_skip, bufpoi, 674 len, false); 675 bufpoi += len; 676 } 677 } 678 679 /* OOB free */ 680 len = oobsize - (bufpoi - chip->oob_poi); 681 if (write) 682 nand_change_write_column_op(chip, size - len, bufpoi, len, 683 false); 684 else 685 nand_change_read_column_op(chip, size - len, bufpoi, len, 686 false); 687 } 688 689 static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf, 690 int oob_required, int page) 691 { 692 struct mtd_info *mtd = nand_to_mtd(chip); 693 struct denali_nand_info *denali = mtd_to_denali(mtd); 694 int writesize = mtd->writesize; 695 int oobsize = mtd->oobsize; 696 int ecc_steps = chip->ecc.steps; 697 int ecc_size = chip->ecc.size; 698 int ecc_bytes = chip->ecc.bytes; 699 void *tmp_buf = denali->buf; 700 int oob_skip = denali->oob_skip_bytes; 701 size_t size = writesize + oobsize; 702 int ret, i, pos, len; 703 704 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 705 if (ret) 706 return ret; 707 708 /* Arrange the buffer for syndrome payload/ecc layout */ 709 if (buf) { 710 for (i = 0; i < ecc_steps; i++) { 711 pos = i * (ecc_size + ecc_bytes); 712 len = ecc_size; 713 714 if (pos >= writesize) 715 pos += oob_skip; 716 else if (pos + len > writesize) 717 len = writesize - pos; 718 719 memcpy(buf, tmp_buf + pos, len); 720 buf += len; 721 if (len < ecc_size) { 722 len = ecc_size - len; 723 memcpy(buf, tmp_buf + writesize + oob_skip, 724 len); 725 buf += len; 726 } 727 } 728 } 729 730 if (oob_required) { 731 uint8_t *oob = chip->oob_poi; 732 733 /* BBM at the beginning of the OOB area */ 734 memcpy(oob, tmp_buf + writesize, oob_skip); 735 oob += oob_skip; 736 737 /* OOB ECC */ 738 for (i = 0; i < ecc_steps; i++) { 739 pos = ecc_size + i * (ecc_size + ecc_bytes); 740 len = ecc_bytes; 741 742 if (pos >= writesize) 743 pos += oob_skip; 744 else if (pos + len > writesize) 745 len = writesize - pos; 746 747 memcpy(oob, tmp_buf + pos, len); 748 oob += len; 749 if (len < ecc_bytes) { 750 len = ecc_bytes - len; 751 memcpy(oob, tmp_buf + writesize + oob_skip, 752 len); 753 oob += len; 754 } 755 } 756 757 /* OOB free */ 758 len = oobsize - (oob - chip->oob_poi); 759 memcpy(oob, tmp_buf + size - len, len); 760 } 761 762 return 0; 763 } 764 765 static int denali_read_oob(struct nand_chip *chip, int page) 766 { 767 struct mtd_info *mtd = nand_to_mtd(chip); 768 769 denali_oob_xfer(mtd, chip, page, 0); 770 771 return 0; 772 } 773 774 static int denali_write_oob(struct nand_chip *chip, int page) 775 { 776 struct mtd_info *mtd = nand_to_mtd(chip); 777 struct denali_nand_info *denali = mtd_to_denali(mtd); 778 779 denali_reset_irq(denali); 780 781 denali_oob_xfer(mtd, chip, page, 1); 782 783 return nand_prog_page_end_op(chip); 784 } 785 786 static int denali_read_page(struct nand_chip *chip, uint8_t *buf, 787 int oob_required, int page) 788 { 789 struct mtd_info *mtd = nand_to_mtd(chip); 790 struct denali_nand_info *denali = mtd_to_denali(mtd); 791 unsigned long uncor_ecc_flags = 0; 792 int stat = 0; 793 int ret; 794 795 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 796 if (ret && ret != -EBADMSG) 797 return ret; 798 799 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 800 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 801 else if (ret == -EBADMSG) 802 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 803 804 if (stat < 0) 805 return stat; 806 807 if (uncor_ecc_flags) { 808 ret = denali_read_oob(chip, page); 809 if (ret) 810 return ret; 811 812 stat = denali_check_erased_page(mtd, chip, buf, 813 uncor_ecc_flags, stat); 814 } 815 816 return stat; 817 } 818 819 static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 820 int oob_required, int page) 821 { 822 struct mtd_info *mtd = nand_to_mtd(chip); 823 struct denali_nand_info *denali = mtd_to_denali(mtd); 824 int writesize = mtd->writesize; 825 int oobsize = mtd->oobsize; 826 int ecc_steps = chip->ecc.steps; 827 int ecc_size = chip->ecc.size; 828 int ecc_bytes = chip->ecc.bytes; 829 void *tmp_buf = denali->buf; 830 int oob_skip = denali->oob_skip_bytes; 831 size_t size = writesize + oobsize; 832 int i, pos, len; 833 834 /* 835 * Fill the buffer with 0xff first except the full page transfer. 836 * This simplifies the logic. 837 */ 838 if (!buf || !oob_required) 839 memset(tmp_buf, 0xff, size); 840 841 /* Arrange the buffer for syndrome payload/ecc layout */ 842 if (buf) { 843 for (i = 0; i < ecc_steps; i++) { 844 pos = i * (ecc_size + ecc_bytes); 845 len = ecc_size; 846 847 if (pos >= writesize) 848 pos += oob_skip; 849 else if (pos + len > writesize) 850 len = writesize - pos; 851 852 memcpy(tmp_buf + pos, buf, len); 853 buf += len; 854 if (len < ecc_size) { 855 len = ecc_size - len; 856 memcpy(tmp_buf + writesize + oob_skip, buf, 857 len); 858 buf += len; 859 } 860 } 861 } 862 863 if (oob_required) { 864 const uint8_t *oob = chip->oob_poi; 865 866 /* BBM at the beginning of the OOB area */ 867 memcpy(tmp_buf + writesize, oob, oob_skip); 868 oob += oob_skip; 869 870 /* OOB ECC */ 871 for (i = 0; i < ecc_steps; i++) { 872 pos = ecc_size + i * (ecc_size + ecc_bytes); 873 len = ecc_bytes; 874 875 if (pos >= writesize) 876 pos += oob_skip; 877 else if (pos + len > writesize) 878 len = writesize - pos; 879 880 memcpy(tmp_buf + pos, oob, len); 881 oob += len; 882 if (len < ecc_bytes) { 883 len = ecc_bytes - len; 884 memcpy(tmp_buf + writesize + oob_skip, oob, 885 len); 886 oob += len; 887 } 888 } 889 890 /* OOB free */ 891 len = oobsize - (oob - chip->oob_poi); 892 memcpy(tmp_buf + size - len, oob, len); 893 } 894 895 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 896 } 897 898 static int denali_write_page(struct nand_chip *chip, const uint8_t *buf, 899 int oob_required, int page) 900 { 901 struct mtd_info *mtd = nand_to_mtd(chip); 902 struct denali_nand_info *denali = mtd_to_denali(mtd); 903 904 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 905 page, 0, 1); 906 } 907 908 static void denali_select_chip(struct nand_chip *chip, int cs) 909 { 910 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 911 912 denali->active_bank = cs; 913 } 914 915 static int denali_waitfunc(struct nand_chip *chip) 916 { 917 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 918 uint32_t irq_status; 919 920 /* R/B# pin transitioned from low to high? */ 921 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 922 923 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 924 } 925 926 static int denali_erase(struct nand_chip *chip, int page) 927 { 928 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 929 uint32_t irq_status; 930 931 denali_reset_irq(denali); 932 933 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 934 DENALI_ERASE); 935 936 /* wait for erase to complete or failure to occur */ 937 irq_status = denali_wait_for_irq(denali, 938 INTR__ERASE_COMP | INTR__ERASE_FAIL); 939 940 return irq_status & INTR__ERASE_COMP ? 0 : -EIO; 941 } 942 943 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, 944 const struct nand_data_interface *conf) 945 { 946 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 947 const struct nand_sdr_timings *timings; 948 unsigned long t_x, mult_x; 949 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 950 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 951 int addr_2_data_mask; 952 uint32_t tmp; 953 954 timings = nand_get_sdr_timings(conf); 955 if (IS_ERR(timings)) 956 return PTR_ERR(timings); 957 958 /* clk_x period in picoseconds */ 959 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 960 if (!t_x) 961 return -EINVAL; 962 963 /* 964 * The bus interface clock, clk_x, is phase aligned with the core clock. 965 * The clk_x is an integral multiple N of the core clk. The value N is 966 * configured at IP delivery time, and its available value is 4, 5, 6. 967 */ 968 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 969 if (mult_x < 4 || mult_x > 6) 970 return -EINVAL; 971 972 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 973 return 0; 974 975 /* tREA -> ACC_CLKS */ 976 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); 977 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 978 979 tmp = ioread32(denali->reg + ACC_CLKS); 980 tmp &= ~ACC_CLKS__VALUE; 981 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 982 iowrite32(tmp, denali->reg + ACC_CLKS); 983 984 /* tRWH -> RE_2_WE */ 985 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 986 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 987 988 tmp = ioread32(denali->reg + RE_2_WE); 989 tmp &= ~RE_2_WE__VALUE; 990 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 991 iowrite32(tmp, denali->reg + RE_2_WE); 992 993 /* tRHZ -> RE_2_RE */ 994 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 995 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 996 997 tmp = ioread32(denali->reg + RE_2_RE); 998 tmp &= ~RE_2_RE__VALUE; 999 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1000 iowrite32(tmp, denali->reg + RE_2_RE); 1001 1002 /* 1003 * tCCS, tWHR -> WE_2_RE 1004 * 1005 * With WE_2_RE properly set, the Denali controller automatically takes 1006 * care of the delay; the driver need not set NAND_WAIT_TCCS. 1007 */ 1008 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 1009 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1010 1011 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1012 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1013 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1014 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1015 1016 /* tADL -> ADDR_2_DATA */ 1017 1018 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 1019 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1020 if (denali->revision < 0x0501) 1021 addr_2_data_mask >>= 1; 1022 1023 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 1024 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1025 1026 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1027 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1028 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1029 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1030 1031 /* tREH, tWH -> RDWR_EN_HI_CNT */ 1032 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 1033 t_x); 1034 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 1035 1036 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1037 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1038 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1039 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1040 1041 /* tRP, tWP -> RDWR_EN_LO_CNT */ 1042 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 1043 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1044 t_x); 1045 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); 1046 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1047 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1048 1049 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1050 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1051 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1052 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1053 1054 /* tCS, tCEA -> CS_SETUP_CNT */ 1055 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 1056 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 1057 0); 1058 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1059 1060 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1061 tmp &= ~CS_SETUP_CNT__VALUE; 1062 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1063 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1064 1065 return 0; 1066 } 1067 1068 static void denali_reset_banks(struct denali_nand_info *denali) 1069 { 1070 u32 irq_status; 1071 int i; 1072 1073 for (i = 0; i < denali->max_banks; i++) { 1074 denali->active_bank = i; 1075 1076 denali_reset_irq(denali); 1077 1078 iowrite32(DEVICE_RESET__BANK(i), 1079 denali->reg + DEVICE_RESET); 1080 1081 irq_status = denali_wait_for_irq(denali, 1082 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); 1083 if (!(irq_status & INTR__INT_ACT)) 1084 break; 1085 } 1086 1087 dev_dbg(denali->dev, "%d chips connected\n", i); 1088 denali->max_banks = i; 1089 } 1090 1091 static void denali_hw_init(struct denali_nand_info *denali) 1092 { 1093 /* 1094 * The REVISION register may not be reliable. Platforms are allowed to 1095 * override it. 1096 */ 1097 if (!denali->revision) 1098 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1099 1100 /* 1101 * Set how many bytes should be skipped before writing data in OOB. 1102 * If a non-zero value has already been set (by firmware or something), 1103 * just use it. Otherwise, set the driver default. 1104 */ 1105 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1106 if (!denali->oob_skip_bytes) { 1107 denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES; 1108 iowrite32(denali->oob_skip_bytes, 1109 denali->reg + SPARE_AREA_SKIP_BYTES); 1110 } 1111 1112 denali_detect_max_banks(denali); 1113 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1114 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1115 1116 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1117 } 1118 1119 int denali_calc_ecc_bytes(int step_size, int strength) 1120 { 1121 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1122 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1123 } 1124 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1125 1126 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1127 struct mtd_oob_region *oobregion) 1128 { 1129 struct denali_nand_info *denali = mtd_to_denali(mtd); 1130 struct nand_chip *chip = mtd_to_nand(mtd); 1131 1132 if (section) 1133 return -ERANGE; 1134 1135 oobregion->offset = denali->oob_skip_bytes; 1136 oobregion->length = chip->ecc.total; 1137 1138 return 0; 1139 } 1140 1141 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1142 struct mtd_oob_region *oobregion) 1143 { 1144 struct denali_nand_info *denali = mtd_to_denali(mtd); 1145 struct nand_chip *chip = mtd_to_nand(mtd); 1146 1147 if (section) 1148 return -ERANGE; 1149 1150 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1151 oobregion->length = mtd->oobsize - oobregion->offset; 1152 1153 return 0; 1154 } 1155 1156 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1157 .ecc = denali_ooblayout_ecc, 1158 .free = denali_ooblayout_free, 1159 }; 1160 1161 static int denali_multidev_fixup(struct denali_nand_info *denali) 1162 { 1163 struct nand_chip *chip = &denali->nand; 1164 struct mtd_info *mtd = nand_to_mtd(chip); 1165 1166 /* 1167 * Support for multi device: 1168 * When the IP configuration is x16 capable and two x8 chips are 1169 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1170 * In this case, the core framework knows nothing about this fact, 1171 * so we should tell it the _logical_ pagesize and anything necessary. 1172 */ 1173 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1174 1175 /* 1176 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1177 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1178 */ 1179 if (denali->devs_per_cs == 0) { 1180 denali->devs_per_cs = 1; 1181 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1182 } 1183 1184 if (denali->devs_per_cs == 1) 1185 return 0; 1186 1187 if (denali->devs_per_cs != 2) { 1188 dev_err(denali->dev, "unsupported number of devices %d\n", 1189 denali->devs_per_cs); 1190 return -EINVAL; 1191 } 1192 1193 /* 2 chips in parallel */ 1194 mtd->size <<= 1; 1195 mtd->erasesize <<= 1; 1196 mtd->writesize <<= 1; 1197 mtd->oobsize <<= 1; 1198 chip->chipsize <<= 1; 1199 chip->page_shift += 1; 1200 chip->phys_erase_shift += 1; 1201 chip->bbt_erase_shift += 1; 1202 chip->chip_shift += 1; 1203 chip->pagemask <<= 1; 1204 chip->ecc.size <<= 1; 1205 chip->ecc.bytes <<= 1; 1206 chip->ecc.strength <<= 1; 1207 denali->oob_skip_bytes <<= 1; 1208 1209 return 0; 1210 } 1211 1212 static int denali_attach_chip(struct nand_chip *chip) 1213 { 1214 struct mtd_info *mtd = nand_to_mtd(chip); 1215 struct denali_nand_info *denali = mtd_to_denali(mtd); 1216 int ret; 1217 1218 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1219 denali->dma_avail = 1; 1220 1221 if (denali->dma_avail) { 1222 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1223 1224 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1225 if (ret) { 1226 dev_info(denali->dev, 1227 "Failed to set DMA mask. Disabling DMA.\n"); 1228 denali->dma_avail = 0; 1229 } 1230 } 1231 1232 if (denali->dma_avail) { 1233 chip->options |= NAND_USE_BOUNCE_BUFFER; 1234 chip->buf_align = 16; 1235 if (denali->caps & DENALI_CAP_DMA_64BIT) 1236 denali->setup_dma = denali_setup_dma64; 1237 else 1238 denali->setup_dma = denali_setup_dma32; 1239 } 1240 1241 chip->bbt_options |= NAND_BBT_USE_FLASH; 1242 chip->bbt_options |= NAND_BBT_NO_OOB; 1243 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1244 chip->options |= NAND_NO_SUBPAGE_WRITE; 1245 1246 ret = nand_ecc_choose_conf(chip, denali->ecc_caps, 1247 mtd->oobsize - denali->oob_skip_bytes); 1248 if (ret) { 1249 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1250 return ret; 1251 } 1252 1253 dev_dbg(denali->dev, 1254 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1255 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1256 1257 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1258 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1259 denali->reg + ECC_CORRECTION); 1260 iowrite32(mtd->erasesize / mtd->writesize, 1261 denali->reg + PAGES_PER_BLOCK); 1262 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1263 denali->reg + DEVICE_WIDTH); 1264 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1265 denali->reg + TWO_ROW_ADDR_CYCLES); 1266 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1267 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1268 1269 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1270 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1271 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1272 iowrite32(mtd->writesize / chip->ecc.size, 1273 denali->reg + CFG_NUM_DATA_BLOCKS); 1274 1275 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1276 1277 if (chip->options & NAND_BUSWIDTH_16) { 1278 chip->legacy.read_buf = denali_read_buf16; 1279 chip->legacy.write_buf = denali_write_buf16; 1280 } else { 1281 chip->legacy.read_buf = denali_read_buf; 1282 chip->legacy.write_buf = denali_write_buf; 1283 } 1284 chip->ecc.read_page = denali_read_page; 1285 chip->ecc.read_page_raw = denali_read_page_raw; 1286 chip->ecc.write_page = denali_write_page; 1287 chip->ecc.write_page_raw = denali_write_page_raw; 1288 chip->ecc.read_oob = denali_read_oob; 1289 chip->ecc.write_oob = denali_write_oob; 1290 chip->legacy.erase = denali_erase; 1291 1292 ret = denali_multidev_fixup(denali); 1293 if (ret) 1294 return ret; 1295 1296 /* 1297 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1298 * use devm_kmalloc() because the memory allocated by devm_ does not 1299 * guarantee DMA-safe alignment. 1300 */ 1301 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1302 if (!denali->buf) 1303 return -ENOMEM; 1304 1305 return 0; 1306 } 1307 1308 static void denali_detach_chip(struct nand_chip *chip) 1309 { 1310 struct mtd_info *mtd = nand_to_mtd(chip); 1311 struct denali_nand_info *denali = mtd_to_denali(mtd); 1312 1313 kfree(denali->buf); 1314 } 1315 1316 static const struct nand_controller_ops denali_controller_ops = { 1317 .attach_chip = denali_attach_chip, 1318 .detach_chip = denali_detach_chip, 1319 }; 1320 1321 int denali_init(struct denali_nand_info *denali) 1322 { 1323 struct nand_chip *chip = &denali->nand; 1324 struct mtd_info *mtd = nand_to_mtd(chip); 1325 u32 features = ioread32(denali->reg + FEATURES); 1326 int ret; 1327 1328 mtd->dev.parent = denali->dev; 1329 denali_hw_init(denali); 1330 1331 init_completion(&denali->complete); 1332 spin_lock_init(&denali->irq_lock); 1333 1334 denali_clear_irq_all(denali); 1335 1336 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1337 IRQF_SHARED, DENALI_NAND_NAME, denali); 1338 if (ret) { 1339 dev_err(denali->dev, "Unable to request IRQ\n"); 1340 return ret; 1341 } 1342 1343 denali_enable_irq(denali); 1344 denali_reset_banks(denali); 1345 if (!denali->max_banks) { 1346 /* Error out earlier if no chip is found for some reasons. */ 1347 ret = -ENODEV; 1348 goto disable_irq; 1349 } 1350 1351 denali->active_bank = DENALI_INVALID_BANK; 1352 1353 nand_set_flash_node(chip, denali->dev->of_node); 1354 /* Fallback to the default name if DT did not give "label" property */ 1355 if (!mtd->name) 1356 mtd->name = "denali-nand"; 1357 1358 chip->select_chip = denali_select_chip; 1359 chip->legacy.read_byte = denali_read_byte; 1360 chip->legacy.write_byte = denali_write_byte; 1361 chip->legacy.cmd_ctrl = denali_cmd_ctrl; 1362 chip->legacy.dev_ready = denali_dev_ready; 1363 chip->legacy.waitfunc = denali_waitfunc; 1364 1365 if (features & FEATURES__INDEX_ADDR) { 1366 denali->host_read = denali_indexed_read; 1367 denali->host_write = denali_indexed_write; 1368 } else { 1369 denali->host_read = denali_direct_read; 1370 denali->host_write = denali_direct_write; 1371 } 1372 1373 /* clk rate info is needed for setup_data_interface */ 1374 if (denali->clk_rate && denali->clk_x_rate) 1375 chip->setup_data_interface = denali_setup_data_interface; 1376 1377 chip->dummy_controller.ops = &denali_controller_ops; 1378 ret = nand_scan(chip, denali->max_banks); 1379 if (ret) 1380 goto disable_irq; 1381 1382 ret = mtd_device_register(mtd, NULL, 0); 1383 if (ret) { 1384 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1385 goto cleanup_nand; 1386 } 1387 1388 return 0; 1389 1390 cleanup_nand: 1391 nand_cleanup(chip); 1392 disable_irq: 1393 denali_disable_irq(denali); 1394 1395 return ret; 1396 } 1397 EXPORT_SYMBOL(denali_init); 1398 1399 void denali_remove(struct denali_nand_info *denali) 1400 { 1401 nand_release(&denali->nand); 1402 denali_disable_irq(denali); 1403 } 1404 EXPORT_SYMBOL(denali_remove); 1405 1406 MODULE_DESCRIPTION("Driver core for Denali NAND controller"); 1407 MODULE_AUTHOR("Intel Corporation and its suppliers"); 1408 MODULE_LICENSE("GPL v2"); 1409