1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NAND Flash Controller Device Driver 4 * Copyright © 2009-2010, Intel Corporation and its suppliers. 5 * 6 * Copyright (c) 2017-2019 Socionext Inc. 7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/completion.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 21 #include "denali.h" 22 23 #define DENALI_NAND_NAME "denali-nand" 24 25 /* for Indexed Addressing */ 26 #define DENALI_INDEXED_CTRL 0x00 27 #define DENALI_INDEXED_DATA 0x10 28 29 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 30 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 31 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 32 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 33 34 /* MAP11 access cycle type */ 35 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 36 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 37 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 38 39 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 40 41 #define DENALI_INVALID_BANK -1 42 43 static struct denali_chip *to_denali_chip(struct nand_chip *chip) 44 { 45 return container_of(chip, struct denali_chip, chip); 46 } 47 48 static struct denali_controller *to_denali_controller(struct nand_chip *chip) 49 { 50 return container_of(chip->controller, struct denali_controller, 51 controller); 52 } 53 54 /* 55 * Direct Addressing - the slave address forms the control information (command 56 * type, bank, block, and page address). The slave data is the actual data to 57 * be transferred. This mode requires 28 bits of address region allocated. 58 */ 59 static u32 denali_direct_read(struct denali_controller *denali, u32 addr) 60 { 61 return ioread32(denali->host + addr); 62 } 63 64 static void denali_direct_write(struct denali_controller *denali, u32 addr, 65 u32 data) 66 { 67 iowrite32(data, denali->host + addr); 68 } 69 70 /* 71 * Indexed Addressing - address translation module intervenes in passing the 72 * control information. This mode reduces the required address range. The 73 * control information and transferred data are latched by the registers in 74 * the translation module. 75 */ 76 static u32 denali_indexed_read(struct denali_controller *denali, u32 addr) 77 { 78 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 79 return ioread32(denali->host + DENALI_INDEXED_DATA); 80 } 81 82 static void denali_indexed_write(struct denali_controller *denali, u32 addr, 83 u32 data) 84 { 85 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 86 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 87 } 88 89 static void denali_enable_irq(struct denali_controller *denali) 90 { 91 int i; 92 93 for (i = 0; i < denali->nbanks; i++) 94 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 95 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 96 } 97 98 static void denali_disable_irq(struct denali_controller *denali) 99 { 100 int i; 101 102 for (i = 0; i < denali->nbanks; i++) 103 iowrite32(0, denali->reg + INTR_EN(i)); 104 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 105 } 106 107 static void denali_clear_irq(struct denali_controller *denali, 108 int bank, u32 irq_status) 109 { 110 /* write one to clear bits */ 111 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 112 } 113 114 static void denali_clear_irq_all(struct denali_controller *denali) 115 { 116 int i; 117 118 for (i = 0; i < denali->nbanks; i++) 119 denali_clear_irq(denali, i, U32_MAX); 120 } 121 122 static irqreturn_t denali_isr(int irq, void *dev_id) 123 { 124 struct denali_controller *denali = dev_id; 125 irqreturn_t ret = IRQ_NONE; 126 u32 irq_status; 127 int i; 128 129 spin_lock(&denali->irq_lock); 130 131 for (i = 0; i < denali->nbanks; i++) { 132 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 133 if (irq_status) 134 ret = IRQ_HANDLED; 135 136 denali_clear_irq(denali, i, irq_status); 137 138 if (i != denali->active_bank) 139 continue; 140 141 denali->irq_status |= irq_status; 142 143 if (denali->irq_status & denali->irq_mask) 144 complete(&denali->complete); 145 } 146 147 spin_unlock(&denali->irq_lock); 148 149 return ret; 150 } 151 152 static void denali_reset_irq(struct denali_controller *denali) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&denali->irq_lock, flags); 157 denali->irq_status = 0; 158 denali->irq_mask = 0; 159 spin_unlock_irqrestore(&denali->irq_lock, flags); 160 } 161 162 static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask) 163 { 164 unsigned long time_left, flags; 165 u32 irq_status; 166 167 spin_lock_irqsave(&denali->irq_lock, flags); 168 169 irq_status = denali->irq_status; 170 171 if (irq_mask & irq_status) { 172 /* return immediately if the IRQ has already happened. */ 173 spin_unlock_irqrestore(&denali->irq_lock, flags); 174 return irq_status; 175 } 176 177 denali->irq_mask = irq_mask; 178 reinit_completion(&denali->complete); 179 spin_unlock_irqrestore(&denali->irq_lock, flags); 180 181 time_left = wait_for_completion_timeout(&denali->complete, 182 msecs_to_jiffies(1000)); 183 if (!time_left) { 184 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 185 irq_mask); 186 return 0; 187 } 188 189 return denali->irq_status; 190 } 191 192 static void denali_select_target(struct nand_chip *chip, int cs) 193 { 194 struct denali_controller *denali = to_denali_controller(chip); 195 struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs]; 196 struct mtd_info *mtd = nand_to_mtd(chip); 197 198 denali->active_bank = sel->bank; 199 200 iowrite32(1 << (chip->phys_erase_shift - chip->page_shift), 201 denali->reg + PAGES_PER_BLOCK); 202 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 203 denali->reg + DEVICE_WIDTH); 204 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 205 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 206 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 207 0 : TWO_ROW_ADDR_CYCLES__FLAG, 208 denali->reg + TWO_ROW_ADDR_CYCLES); 209 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 210 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 211 denali->reg + ECC_CORRECTION); 212 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 213 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 214 iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS); 215 216 if (chip->options & NAND_KEEP_TIMINGS) 217 return; 218 219 /* update timing registers unless NAND_KEEP_TIMINGS is set */ 220 iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE); 221 iowrite32(sel->tcwaw_and_addr_2_data, 222 denali->reg + TCWAW_AND_ADDR_2_DATA); 223 iowrite32(sel->re_2_we, denali->reg + RE_2_WE); 224 iowrite32(sel->acc_clks, denali->reg + ACC_CLKS); 225 iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT); 226 iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT); 227 iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT); 228 iowrite32(sel->re_2_re, denali->reg + RE_2_RE); 229 } 230 231 static int denali_change_column(struct nand_chip *chip, unsigned int offset, 232 void *buf, unsigned int len, bool write) 233 { 234 if (write) 235 return nand_change_write_column_op(chip, offset, buf, len, 236 false); 237 else 238 return nand_change_read_column_op(chip, offset, buf, len, 239 false); 240 } 241 242 static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write) 243 { 244 struct denali_controller *denali = to_denali_controller(chip); 245 struct mtd_info *mtd = nand_to_mtd(chip); 246 struct nand_ecc_ctrl *ecc = &chip->ecc; 247 int writesize = mtd->writesize; 248 int oob_skip = denali->oob_skip_bytes; 249 int ret, i, pos, len; 250 251 for (i = 0; i < ecc->steps; i++) { 252 pos = i * (ecc->size + ecc->bytes); 253 len = ecc->size; 254 255 if (pos >= writesize) { 256 pos += oob_skip; 257 } else if (pos + len > writesize) { 258 /* This chunk overwraps the BBM area. Must be split */ 259 ret = denali_change_column(chip, pos, buf, 260 writesize - pos, write); 261 if (ret) 262 return ret; 263 264 buf += writesize - pos; 265 len -= writesize - pos; 266 pos = writesize + oob_skip; 267 } 268 269 ret = denali_change_column(chip, pos, buf, len, write); 270 if (ret) 271 return ret; 272 273 buf += len; 274 } 275 276 return 0; 277 } 278 279 static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write) 280 { 281 struct denali_controller *denali = to_denali_controller(chip); 282 struct mtd_info *mtd = nand_to_mtd(chip); 283 struct nand_ecc_ctrl *ecc = &chip->ecc; 284 int writesize = mtd->writesize; 285 int oobsize = mtd->oobsize; 286 int oob_skip = denali->oob_skip_bytes; 287 int ret, i, pos, len; 288 289 /* BBM at the beginning of the OOB area */ 290 ret = denali_change_column(chip, writesize, buf, oob_skip, write); 291 if (ret) 292 return ret; 293 294 buf += oob_skip; 295 296 for (i = 0; i < ecc->steps; i++) { 297 pos = ecc->size + i * (ecc->size + ecc->bytes); 298 299 if (i == ecc->steps - 1) 300 /* The last chunk includes OOB free */ 301 len = writesize + oobsize - pos - oob_skip; 302 else 303 len = ecc->bytes; 304 305 if (pos >= writesize) { 306 pos += oob_skip; 307 } else if (pos + len > writesize) { 308 /* This chunk overwraps the BBM area. Must be split */ 309 ret = denali_change_column(chip, pos, buf, 310 writesize - pos, write); 311 if (ret) 312 return ret; 313 314 buf += writesize - pos; 315 len -= writesize - pos; 316 pos = writesize + oob_skip; 317 } 318 319 ret = denali_change_column(chip, pos, buf, len, write); 320 if (ret) 321 return ret; 322 323 buf += len; 324 } 325 326 return 0; 327 } 328 329 static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf, 330 int page) 331 { 332 int ret; 333 334 if (!buf && !oob_buf) 335 return -EINVAL; 336 337 ret = nand_read_page_op(chip, page, 0, NULL, 0); 338 if (ret) 339 return ret; 340 341 if (buf) { 342 ret = denali_payload_xfer(chip, buf, false); 343 if (ret) 344 return ret; 345 } 346 347 if (oob_buf) { 348 ret = denali_oob_xfer(chip, oob_buf, false); 349 if (ret) 350 return ret; 351 } 352 353 return 0; 354 } 355 356 static int denali_write_raw(struct nand_chip *chip, const void *buf, 357 const void *oob_buf, int page) 358 { 359 int ret; 360 361 if (!buf && !oob_buf) 362 return -EINVAL; 363 364 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 365 if (ret) 366 return ret; 367 368 if (buf) { 369 ret = denali_payload_xfer(chip, (void *)buf, true); 370 if (ret) 371 return ret; 372 } 373 374 if (oob_buf) { 375 ret = denali_oob_xfer(chip, (void *)oob_buf, true); 376 if (ret) 377 return ret; 378 } 379 380 return nand_prog_page_end_op(chip); 381 } 382 383 static int denali_read_page_raw(struct nand_chip *chip, u8 *buf, 384 int oob_required, int page) 385 { 386 return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL, 387 page); 388 } 389 390 static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf, 391 int oob_required, int page) 392 { 393 return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL, 394 page); 395 } 396 397 static int denali_read_oob(struct nand_chip *chip, int page) 398 { 399 return denali_read_raw(chip, NULL, chip->oob_poi, page); 400 } 401 402 static int denali_write_oob(struct nand_chip *chip, int page) 403 { 404 return denali_write_raw(chip, NULL, chip->oob_poi, page); 405 } 406 407 static int denali_check_erased_page(struct nand_chip *chip, u8 *buf, 408 unsigned long uncor_ecc_flags, 409 unsigned int max_bitflips) 410 { 411 struct denali_controller *denali = to_denali_controller(chip); 412 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 413 struct nand_ecc_ctrl *ecc = &chip->ecc; 414 u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 415 int i, stat; 416 417 for (i = 0; i < ecc->steps; i++) { 418 if (!(uncor_ecc_flags & BIT(i))) 419 continue; 420 421 stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code, 422 ecc->bytes, NULL, 0, 423 ecc->strength); 424 if (stat < 0) { 425 ecc_stats->failed++; 426 } else { 427 ecc_stats->corrected += stat; 428 max_bitflips = max_t(unsigned int, max_bitflips, stat); 429 } 430 431 buf += ecc->size; 432 ecc_code += ecc->bytes; 433 } 434 435 return max_bitflips; 436 } 437 438 static int denali_hw_ecc_fixup(struct nand_chip *chip, 439 unsigned long *uncor_ecc_flags) 440 { 441 struct denali_controller *denali = to_denali_controller(chip); 442 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 443 int bank = denali->active_bank; 444 u32 ecc_cor; 445 unsigned int max_bitflips; 446 447 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 448 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 449 450 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 451 /* 452 * This flag is set when uncorrectable error occurs at least in 453 * one ECC sector. We can not know "how many sectors", or 454 * "which sector(s)". We need erase-page check for all sectors. 455 */ 456 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 457 return 0; 458 } 459 460 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 461 462 /* 463 * The register holds the maximum of per-sector corrected bitflips. 464 * This is suitable for the return value of the ->read_page() callback. 465 * Unfortunately, we can not know the total number of corrected bits in 466 * the page. Increase the stats by max_bitflips. (compromised solution) 467 */ 468 ecc_stats->corrected += max_bitflips; 469 470 return max_bitflips; 471 } 472 473 static int denali_sw_ecc_fixup(struct nand_chip *chip, 474 unsigned long *uncor_ecc_flags, u8 *buf) 475 { 476 struct denali_controller *denali = to_denali_controller(chip); 477 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 478 unsigned int ecc_size = chip->ecc.size; 479 unsigned int bitflips = 0; 480 unsigned int max_bitflips = 0; 481 u32 err_addr, err_cor_info; 482 unsigned int err_byte, err_sector, err_device; 483 u8 err_cor_value; 484 unsigned int prev_sector = 0; 485 u32 irq_status; 486 487 denali_reset_irq(denali); 488 489 do { 490 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 491 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 492 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 493 494 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 495 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 496 err_cor_info); 497 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 498 err_cor_info); 499 500 /* reset the bitflip counter when crossing ECC sector */ 501 if (err_sector != prev_sector) 502 bitflips = 0; 503 504 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 505 /* 506 * Check later if this is a real ECC error, or 507 * an erased sector. 508 */ 509 *uncor_ecc_flags |= BIT(err_sector); 510 } else if (err_byte < ecc_size) { 511 /* 512 * If err_byte is larger than ecc_size, means error 513 * happened in OOB, so we ignore it. It's no need for 514 * us to correct it err_device is represented the NAND 515 * error bits are happened in if there are more than 516 * one NAND connected. 517 */ 518 int offset; 519 unsigned int flips_in_byte; 520 521 offset = (err_sector * ecc_size + err_byte) * 522 denali->devs_per_cs + err_device; 523 524 /* correct the ECC error */ 525 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 526 buf[offset] ^= err_cor_value; 527 ecc_stats->corrected += flips_in_byte; 528 bitflips += flips_in_byte; 529 530 max_bitflips = max(max_bitflips, bitflips); 531 } 532 533 prev_sector = err_sector; 534 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 535 536 /* 537 * Once handle all ECC errors, controller will trigger an 538 * ECC_TRANSACTION_DONE interrupt. 539 */ 540 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 541 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 542 return -EIO; 543 544 return max_bitflips; 545 } 546 547 static void denali_setup_dma64(struct denali_controller *denali, 548 dma_addr_t dma_addr, int page, bool write) 549 { 550 u32 mode; 551 const int page_count = 1; 552 553 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 554 555 /* DMA is a three step process */ 556 557 /* 558 * 1. setup transfer type, interrupt when complete, 559 * burst len = 64 bytes, the number of pages 560 */ 561 denali->host_write(denali, mode, 562 0x01002000 | (64 << 16) | 563 (write ? BIT(8) : 0) | page_count); 564 565 /* 2. set memory low address */ 566 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 567 568 /* 3. set memory high address */ 569 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 570 } 571 572 static void denali_setup_dma32(struct denali_controller *denali, 573 dma_addr_t dma_addr, int page, bool write) 574 { 575 u32 mode; 576 const int page_count = 1; 577 578 mode = DENALI_MAP10 | DENALI_BANK(denali); 579 580 /* DMA is a four step process */ 581 582 /* 1. setup transfer type and # of pages */ 583 denali->host_write(denali, mode | page, 584 0x2000 | (write ? BIT(8) : 0) | page_count); 585 586 /* 2. set memory high address bits 23:8 */ 587 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 588 589 /* 3. set memory low address bits 23:8 */ 590 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 591 592 /* 4. interrupt when complete, burst len = 64 bytes */ 593 denali->host_write(denali, mode | 0x14000, 0x2400); 594 } 595 596 static int denali_pio_read(struct denali_controller *denali, u32 *buf, 597 size_t size, int page) 598 { 599 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 600 u32 irq_status, ecc_err_mask; 601 int i; 602 603 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 604 ecc_err_mask = INTR__ECC_UNCOR_ERR; 605 else 606 ecc_err_mask = INTR__ECC_ERR; 607 608 denali_reset_irq(denali); 609 610 for (i = 0; i < size / 4; i++) 611 buf[i] = denali->host_read(denali, addr); 612 613 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 614 if (!(irq_status & INTR__PAGE_XFER_INC)) 615 return -EIO; 616 617 if (irq_status & INTR__ERASED_PAGE) 618 memset(buf, 0xff, size); 619 620 return irq_status & ecc_err_mask ? -EBADMSG : 0; 621 } 622 623 static int denali_pio_write(struct denali_controller *denali, const u32 *buf, 624 size_t size, int page) 625 { 626 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 627 u32 irq_status; 628 int i; 629 630 denali_reset_irq(denali); 631 632 for (i = 0; i < size / 4; i++) 633 denali->host_write(denali, addr, buf[i]); 634 635 irq_status = denali_wait_for_irq(denali, 636 INTR__PROGRAM_COMP | 637 INTR__PROGRAM_FAIL); 638 if (!(irq_status & INTR__PROGRAM_COMP)) 639 return -EIO; 640 641 return 0; 642 } 643 644 static int denali_pio_xfer(struct denali_controller *denali, void *buf, 645 size_t size, int page, bool write) 646 { 647 if (write) 648 return denali_pio_write(denali, buf, size, page); 649 else 650 return denali_pio_read(denali, buf, size, page); 651 } 652 653 static int denali_dma_xfer(struct denali_controller *denali, void *buf, 654 size_t size, int page, bool write) 655 { 656 dma_addr_t dma_addr; 657 u32 irq_mask, irq_status, ecc_err_mask; 658 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 659 int ret = 0; 660 661 dma_addr = dma_map_single(denali->dev, buf, size, dir); 662 if (dma_mapping_error(denali->dev, dma_addr)) { 663 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 664 return denali_pio_xfer(denali, buf, size, page, write); 665 } 666 667 if (write) { 668 /* 669 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 670 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 671 * when the page program is completed. 672 */ 673 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 674 ecc_err_mask = 0; 675 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 676 irq_mask = INTR__DMA_CMD_COMP; 677 ecc_err_mask = INTR__ECC_UNCOR_ERR; 678 } else { 679 irq_mask = INTR__DMA_CMD_COMP; 680 ecc_err_mask = INTR__ECC_ERR; 681 } 682 683 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 684 /* 685 * The ->setup_dma() hook kicks DMA by using the data/command 686 * interface, which belongs to a different AXI port from the 687 * register interface. Read back the register to avoid a race. 688 */ 689 ioread32(denali->reg + DMA_ENABLE); 690 691 denali_reset_irq(denali); 692 denali->setup_dma(denali, dma_addr, page, write); 693 694 irq_status = denali_wait_for_irq(denali, irq_mask); 695 if (!(irq_status & INTR__DMA_CMD_COMP)) 696 ret = -EIO; 697 else if (irq_status & ecc_err_mask) 698 ret = -EBADMSG; 699 700 iowrite32(0, denali->reg + DMA_ENABLE); 701 702 dma_unmap_single(denali->dev, dma_addr, size, dir); 703 704 if (irq_status & INTR__ERASED_PAGE) 705 memset(buf, 0xff, size); 706 707 return ret; 708 } 709 710 static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size, 711 int page, bool write) 712 { 713 struct denali_controller *denali = to_denali_controller(chip); 714 715 denali_select_target(chip, chip->cur_cs); 716 717 if (denali->dma_avail) 718 return denali_dma_xfer(denali, buf, size, page, write); 719 else 720 return denali_pio_xfer(denali, buf, size, page, write); 721 } 722 723 static int denali_read_page(struct nand_chip *chip, u8 *buf, 724 int oob_required, int page) 725 { 726 struct denali_controller *denali = to_denali_controller(chip); 727 struct mtd_info *mtd = nand_to_mtd(chip); 728 unsigned long uncor_ecc_flags = 0; 729 int stat = 0; 730 int ret; 731 732 ret = denali_page_xfer(chip, buf, mtd->writesize, page, false); 733 if (ret && ret != -EBADMSG) 734 return ret; 735 736 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 737 stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags); 738 else if (ret == -EBADMSG) 739 stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf); 740 741 if (stat < 0) 742 return stat; 743 744 if (uncor_ecc_flags) { 745 ret = denali_read_oob(chip, page); 746 if (ret) 747 return ret; 748 749 stat = denali_check_erased_page(chip, buf, 750 uncor_ecc_flags, stat); 751 } 752 753 return stat; 754 } 755 756 static int denali_write_page(struct nand_chip *chip, const u8 *buf, 757 int oob_required, int page) 758 { 759 struct mtd_info *mtd = nand_to_mtd(chip); 760 761 return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); 762 } 763 764 static int denali_setup_interface(struct nand_chip *chip, int chipnr, 765 const struct nand_interface_config *conf) 766 { 767 static const unsigned int data_setup_on_host = 10000; 768 struct denali_controller *denali = to_denali_controller(chip); 769 struct denali_chip_sel *sel; 770 const struct nand_sdr_timings *timings; 771 unsigned long t_x, mult_x; 772 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 773 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 774 int addr_2_data_mask; 775 u32 tmp; 776 777 timings = nand_get_sdr_timings(conf); 778 if (IS_ERR(timings)) 779 return PTR_ERR(timings); 780 781 /* clk_x period in picoseconds */ 782 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 783 if (!t_x) 784 return -EINVAL; 785 786 /* 787 * The bus interface clock, clk_x, is phase aligned with the core clock. 788 * The clk_x is an integral multiple N of the core clk. The value N is 789 * configured at IP delivery time, and its available value is 4, 5, 6. 790 */ 791 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 792 if (mult_x < 4 || mult_x > 6) 793 return -EINVAL; 794 795 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 796 return 0; 797 798 sel = &to_denali_chip(chip)->sels[chipnr]; 799 800 /* tRWH -> RE_2_WE */ 801 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 802 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 803 804 tmp = ioread32(denali->reg + RE_2_WE); 805 tmp &= ~RE_2_WE__VALUE; 806 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 807 sel->re_2_we = tmp; 808 809 /* tRHZ -> RE_2_RE */ 810 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 811 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 812 813 tmp = ioread32(denali->reg + RE_2_RE); 814 tmp &= ~RE_2_RE__VALUE; 815 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 816 sel->re_2_re = tmp; 817 818 /* 819 * tCCS, tWHR -> WE_2_RE 820 * 821 * With WE_2_RE properly set, the Denali controller automatically takes 822 * care of the delay; the driver need not set NAND_WAIT_TCCS. 823 */ 824 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 825 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 826 827 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 828 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 829 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 830 sel->hwhr2_and_we_2_re = tmp; 831 832 /* tADL -> ADDR_2_DATA */ 833 834 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 835 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 836 if (denali->revision < 0x0501) 837 addr_2_data_mask >>= 1; 838 839 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 840 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 841 842 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 843 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 844 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 845 sel->tcwaw_and_addr_2_data = tmp; 846 847 /* tREH, tWH -> RDWR_EN_HI_CNT */ 848 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 849 t_x); 850 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 851 852 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 853 tmp &= ~RDWR_EN_HI_CNT__VALUE; 854 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 855 sel->rdwr_en_hi_cnt = tmp; 856 857 /* 858 * tREA -> ACC_CLKS 859 * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT 860 */ 861 862 /* 863 * Determine the minimum of acc_clks to meet the setup timing when 864 * capturing the incoming data. 865 * 866 * The delay on the chip side is well-defined as tREA, but we need to 867 * take additional delay into account. This includes a certain degree 868 * of unknowledge, such as signal propagation delays on the PCB and 869 * in the SoC, load capacity of the I/O pins, etc. 870 */ 871 acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x); 872 873 /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */ 874 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 875 876 /* Extend rdwr_en_lo to meet the data hold timing */ 877 rdwr_en_lo = max_t(int, rdwr_en_lo, 878 acc_clks - timings->tRHOH_min / t_x); 879 880 /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */ 881 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 882 t_x); 883 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 884 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 885 886 /* Center the data latch timing for extra safety */ 887 acc_clks = (acc_clks + rdwr_en_lo + 888 DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2; 889 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 890 891 tmp = ioread32(denali->reg + ACC_CLKS); 892 tmp &= ~ACC_CLKS__VALUE; 893 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 894 sel->acc_clks = tmp; 895 896 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 897 tmp &= ~RDWR_EN_LO_CNT__VALUE; 898 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 899 sel->rdwr_en_lo_cnt = tmp; 900 901 /* tCS, tCEA -> CS_SETUP_CNT */ 902 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 903 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 904 0); 905 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 906 907 tmp = ioread32(denali->reg + CS_SETUP_CNT); 908 tmp &= ~CS_SETUP_CNT__VALUE; 909 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 910 sel->cs_setup_cnt = tmp; 911 912 return 0; 913 } 914 915 int denali_calc_ecc_bytes(int step_size, int strength) 916 { 917 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 918 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 919 } 920 EXPORT_SYMBOL(denali_calc_ecc_bytes); 921 922 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 923 struct mtd_oob_region *oobregion) 924 { 925 struct nand_chip *chip = mtd_to_nand(mtd); 926 struct denali_controller *denali = to_denali_controller(chip); 927 928 if (section > 0) 929 return -ERANGE; 930 931 oobregion->offset = denali->oob_skip_bytes; 932 oobregion->length = chip->ecc.total; 933 934 return 0; 935 } 936 937 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 938 struct mtd_oob_region *oobregion) 939 { 940 struct nand_chip *chip = mtd_to_nand(mtd); 941 struct denali_controller *denali = to_denali_controller(chip); 942 943 if (section > 0) 944 return -ERANGE; 945 946 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 947 oobregion->length = mtd->oobsize - oobregion->offset; 948 949 return 0; 950 } 951 952 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 953 .ecc = denali_ooblayout_ecc, 954 .free = denali_ooblayout_free, 955 }; 956 957 static int denali_multidev_fixup(struct nand_chip *chip) 958 { 959 struct denali_controller *denali = to_denali_controller(chip); 960 struct mtd_info *mtd = nand_to_mtd(chip); 961 struct nand_memory_organization *memorg; 962 963 memorg = nanddev_get_memorg(&chip->base); 964 965 /* 966 * Support for multi device: 967 * When the IP configuration is x16 capable and two x8 chips are 968 * connected in parallel, DEVICES_CONNECTED should be set to 2. 969 * In this case, the core framework knows nothing about this fact, 970 * so we should tell it the _logical_ pagesize and anything necessary. 971 */ 972 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 973 974 /* 975 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 976 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 977 */ 978 if (denali->devs_per_cs == 0) { 979 denali->devs_per_cs = 1; 980 iowrite32(1, denali->reg + DEVICES_CONNECTED); 981 } 982 983 if (denali->devs_per_cs == 1) 984 return 0; 985 986 if (denali->devs_per_cs != 2) { 987 dev_err(denali->dev, "unsupported number of devices %d\n", 988 denali->devs_per_cs); 989 return -EINVAL; 990 } 991 992 /* 2 chips in parallel */ 993 memorg->pagesize <<= 1; 994 memorg->oobsize <<= 1; 995 mtd->size <<= 1; 996 mtd->erasesize <<= 1; 997 mtd->writesize <<= 1; 998 mtd->oobsize <<= 1; 999 chip->page_shift += 1; 1000 chip->phys_erase_shift += 1; 1001 chip->bbt_erase_shift += 1; 1002 chip->chip_shift += 1; 1003 chip->pagemask <<= 1; 1004 chip->ecc.size <<= 1; 1005 chip->ecc.bytes <<= 1; 1006 chip->ecc.strength <<= 1; 1007 denali->oob_skip_bytes <<= 1; 1008 1009 return 0; 1010 } 1011 1012 static int denali_attach_chip(struct nand_chip *chip) 1013 { 1014 struct denali_controller *denali = to_denali_controller(chip); 1015 struct mtd_info *mtd = nand_to_mtd(chip); 1016 int ret; 1017 1018 ret = nand_ecc_choose_conf(chip, denali->ecc_caps, 1019 mtd->oobsize - denali->oob_skip_bytes); 1020 if (ret) { 1021 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1022 return ret; 1023 } 1024 1025 dev_dbg(denali->dev, 1026 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1027 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1028 1029 ret = denali_multidev_fixup(chip); 1030 if (ret) 1031 return ret; 1032 1033 return 0; 1034 } 1035 1036 static void denali_exec_in8(struct denali_controller *denali, u32 type, 1037 u8 *buf, unsigned int len) 1038 { 1039 int i; 1040 1041 for (i = 0; i < len; i++) 1042 buf[i] = denali->host_read(denali, type | DENALI_BANK(denali)); 1043 } 1044 1045 static void denali_exec_in16(struct denali_controller *denali, u32 type, 1046 u8 *buf, unsigned int len) 1047 { 1048 u32 data; 1049 int i; 1050 1051 for (i = 0; i < len; i += 2) { 1052 data = denali->host_read(denali, type | DENALI_BANK(denali)); 1053 /* bit 31:24 and 15:8 are used for DDR */ 1054 buf[i] = data; 1055 buf[i + 1] = data >> 16; 1056 } 1057 } 1058 1059 static void denali_exec_in(struct denali_controller *denali, u32 type, 1060 u8 *buf, unsigned int len, bool width16) 1061 { 1062 if (width16) 1063 denali_exec_in16(denali, type, buf, len); 1064 else 1065 denali_exec_in8(denali, type, buf, len); 1066 } 1067 1068 static void denali_exec_out8(struct denali_controller *denali, u32 type, 1069 const u8 *buf, unsigned int len) 1070 { 1071 int i; 1072 1073 for (i = 0; i < len; i++) 1074 denali->host_write(denali, type | DENALI_BANK(denali), buf[i]); 1075 } 1076 1077 static void denali_exec_out16(struct denali_controller *denali, u32 type, 1078 const u8 *buf, unsigned int len) 1079 { 1080 int i; 1081 1082 for (i = 0; i < len; i += 2) 1083 denali->host_write(denali, type | DENALI_BANK(denali), 1084 buf[i + 1] << 16 | buf[i]); 1085 } 1086 1087 static void denali_exec_out(struct denali_controller *denali, u32 type, 1088 const u8 *buf, unsigned int len, bool width16) 1089 { 1090 if (width16) 1091 denali_exec_out16(denali, type, buf, len); 1092 else 1093 denali_exec_out8(denali, type, buf, len); 1094 } 1095 1096 static int denali_exec_waitrdy(struct denali_controller *denali) 1097 { 1098 u32 irq_stat; 1099 1100 /* R/B# pin transitioned from low to high? */ 1101 irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT); 1102 1103 /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */ 1104 denali_reset_irq(denali); 1105 1106 return irq_stat & INTR__INT_ACT ? 0 : -EIO; 1107 } 1108 1109 static int denali_exec_instr(struct nand_chip *chip, 1110 const struct nand_op_instr *instr) 1111 { 1112 struct denali_controller *denali = to_denali_controller(chip); 1113 1114 switch (instr->type) { 1115 case NAND_OP_CMD_INSTR: 1116 denali_exec_out8(denali, DENALI_MAP11_CMD, 1117 &instr->ctx.cmd.opcode, 1); 1118 return 0; 1119 case NAND_OP_ADDR_INSTR: 1120 denali_exec_out8(denali, DENALI_MAP11_ADDR, 1121 instr->ctx.addr.addrs, 1122 instr->ctx.addr.naddrs); 1123 return 0; 1124 case NAND_OP_DATA_IN_INSTR: 1125 denali_exec_in(denali, DENALI_MAP11_DATA, 1126 instr->ctx.data.buf.in, 1127 instr->ctx.data.len, 1128 !instr->ctx.data.force_8bit && 1129 chip->options & NAND_BUSWIDTH_16); 1130 return 0; 1131 case NAND_OP_DATA_OUT_INSTR: 1132 denali_exec_out(denali, DENALI_MAP11_DATA, 1133 instr->ctx.data.buf.out, 1134 instr->ctx.data.len, 1135 !instr->ctx.data.force_8bit && 1136 chip->options & NAND_BUSWIDTH_16); 1137 return 0; 1138 case NAND_OP_WAITRDY_INSTR: 1139 return denali_exec_waitrdy(denali); 1140 default: 1141 WARN_ONCE(1, "unsupported NAND instruction type: %d\n", 1142 instr->type); 1143 1144 return -EINVAL; 1145 } 1146 } 1147 1148 static int denali_exec_op(struct nand_chip *chip, 1149 const struct nand_operation *op, bool check_only) 1150 { 1151 int i, ret; 1152 1153 if (check_only) 1154 return 0; 1155 1156 denali_select_target(chip, op->cs); 1157 1158 /* 1159 * Some commands contain NAND_OP_WAITRDY_INSTR. 1160 * irq must be cleared here to catch the R/B# interrupt there. 1161 */ 1162 denali_reset_irq(to_denali_controller(chip)); 1163 1164 for (i = 0; i < op->ninstrs; i++) { 1165 ret = denali_exec_instr(chip, &op->instrs[i]); 1166 if (ret) 1167 return ret; 1168 } 1169 1170 return 0; 1171 } 1172 1173 static const struct nand_controller_ops denali_controller_ops = { 1174 .attach_chip = denali_attach_chip, 1175 .exec_op = denali_exec_op, 1176 .setup_interface = denali_setup_interface, 1177 }; 1178 1179 int denali_chip_init(struct denali_controller *denali, 1180 struct denali_chip *dchip) 1181 { 1182 struct nand_chip *chip = &dchip->chip; 1183 struct mtd_info *mtd = nand_to_mtd(chip); 1184 struct denali_chip *dchip2; 1185 int i, j, ret; 1186 1187 chip->controller = &denali->controller; 1188 1189 /* sanity checks for bank numbers */ 1190 for (i = 0; i < dchip->nsels; i++) { 1191 unsigned int bank = dchip->sels[i].bank; 1192 1193 if (bank >= denali->nbanks) { 1194 dev_err(denali->dev, "unsupported bank %d\n", bank); 1195 return -EINVAL; 1196 } 1197 1198 for (j = 0; j < i; j++) { 1199 if (bank == dchip->sels[j].bank) { 1200 dev_err(denali->dev, 1201 "bank %d is assigned twice in the same chip\n", 1202 bank); 1203 return -EINVAL; 1204 } 1205 } 1206 1207 list_for_each_entry(dchip2, &denali->chips, node) { 1208 for (j = 0; j < dchip2->nsels; j++) { 1209 if (bank == dchip2->sels[j].bank) { 1210 dev_err(denali->dev, 1211 "bank %d is already used\n", 1212 bank); 1213 return -EINVAL; 1214 } 1215 } 1216 } 1217 } 1218 1219 mtd->dev.parent = denali->dev; 1220 1221 /* 1222 * Fallback to the default name if DT did not give "label" property. 1223 * Use "label" property if multiple chips are connected. 1224 */ 1225 if (!mtd->name && list_empty(&denali->chips)) 1226 mtd->name = "denali-nand"; 1227 1228 if (denali->dma_avail) { 1229 chip->options |= NAND_USES_DMA; 1230 chip->buf_align = 16; 1231 } 1232 1233 /* clk rate info is needed for setup_interface */ 1234 if (!denali->clk_rate || !denali->clk_x_rate) 1235 chip->options |= NAND_KEEP_TIMINGS; 1236 1237 chip->bbt_options |= NAND_BBT_USE_FLASH; 1238 chip->bbt_options |= NAND_BBT_NO_OOB; 1239 chip->options |= NAND_NO_SUBPAGE_WRITE; 1240 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1241 chip->ecc.read_page = denali_read_page; 1242 chip->ecc.write_page = denali_write_page; 1243 chip->ecc.read_page_raw = denali_read_page_raw; 1244 chip->ecc.write_page_raw = denali_write_page_raw; 1245 chip->ecc.read_oob = denali_read_oob; 1246 chip->ecc.write_oob = denali_write_oob; 1247 1248 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1249 1250 ret = nand_scan(chip, dchip->nsels); 1251 if (ret) 1252 return ret; 1253 1254 ret = mtd_device_register(mtd, NULL, 0); 1255 if (ret) { 1256 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1257 goto cleanup_nand; 1258 } 1259 1260 list_add_tail(&dchip->node, &denali->chips); 1261 1262 return 0; 1263 1264 cleanup_nand: 1265 nand_cleanup(chip); 1266 1267 return ret; 1268 } 1269 EXPORT_SYMBOL_GPL(denali_chip_init); 1270 1271 int denali_init(struct denali_controller *denali) 1272 { 1273 u32 features = ioread32(denali->reg + FEATURES); 1274 int ret; 1275 1276 nand_controller_init(&denali->controller); 1277 denali->controller.ops = &denali_controller_ops; 1278 init_completion(&denali->complete); 1279 spin_lock_init(&denali->irq_lock); 1280 INIT_LIST_HEAD(&denali->chips); 1281 denali->active_bank = DENALI_INVALID_BANK; 1282 1283 /* 1284 * The REVISION register may not be reliable. Platforms are allowed to 1285 * override it. 1286 */ 1287 if (!denali->revision) 1288 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1289 1290 denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 1291 1292 /* the encoding changed from rev 5.0 to 5.1 */ 1293 if (denali->revision < 0x0501) 1294 denali->nbanks <<= 1; 1295 1296 if (features & FEATURES__DMA) 1297 denali->dma_avail = true; 1298 1299 if (denali->dma_avail) { 1300 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1301 1302 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1303 if (ret) { 1304 dev_info(denali->dev, 1305 "Failed to set DMA mask. Disabling DMA.\n"); 1306 denali->dma_avail = false; 1307 } 1308 } 1309 1310 if (denali->dma_avail) { 1311 if (denali->caps & DENALI_CAP_DMA_64BIT) 1312 denali->setup_dma = denali_setup_dma64; 1313 else 1314 denali->setup_dma = denali_setup_dma32; 1315 } 1316 1317 if (features & FEATURES__INDEX_ADDR) { 1318 denali->host_read = denali_indexed_read; 1319 denali->host_write = denali_indexed_write; 1320 } else { 1321 denali->host_read = denali_direct_read; 1322 denali->host_write = denali_direct_write; 1323 } 1324 1325 /* 1326 * Set how many bytes should be skipped before writing data in OOB. 1327 * If a platform requests a non-zero value, set it to the register. 1328 * Otherwise, read the value out, expecting it has already been set up 1329 * by firmware. 1330 */ 1331 if (denali->oob_skip_bytes) 1332 iowrite32(denali->oob_skip_bytes, 1333 denali->reg + SPARE_AREA_SKIP_BYTES); 1334 else 1335 denali->oob_skip_bytes = ioread32(denali->reg + 1336 SPARE_AREA_SKIP_BYTES); 1337 1338 iowrite32(0, denali->reg + TRANSFER_SPARE_REG); 1339 iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED); 1340 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1341 iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 1342 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1343 iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT); 1344 1345 denali_clear_irq_all(denali); 1346 1347 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1348 IRQF_SHARED, DENALI_NAND_NAME, denali); 1349 if (ret) { 1350 dev_err(denali->dev, "Unable to request IRQ\n"); 1351 return ret; 1352 } 1353 1354 denali_enable_irq(denali); 1355 1356 return 0; 1357 } 1358 EXPORT_SYMBOL(denali_init); 1359 1360 void denali_remove(struct denali_controller *denali) 1361 { 1362 struct denali_chip *dchip, *tmp; 1363 struct nand_chip *chip; 1364 int ret; 1365 1366 list_for_each_entry_safe(dchip, tmp, &denali->chips, node) { 1367 chip = &dchip->chip; 1368 ret = mtd_device_unregister(nand_to_mtd(chip)); 1369 WARN_ON(ret); 1370 nand_cleanup(chip); 1371 list_del(&dchip->node); 1372 } 1373 1374 denali_disable_irq(denali); 1375 } 1376 EXPORT_SYMBOL(denali_remove); 1377 1378 MODULE_DESCRIPTION("Driver core for Denali NAND controller"); 1379 MODULE_AUTHOR("Intel Corporation and its suppliers"); 1380 MODULE_LICENSE("GPL v2"); 1381