1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NAND Flash Controller Device Driver 4 * Copyright © 2009-2010, Intel Corporation and its suppliers. 5 * 6 * Copyright (c) 2017-2019 Socionext Inc. 7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/completion.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 21 #include "denali.h" 22 23 #define DENALI_NAND_NAME "denali-nand" 24 25 /* for Indexed Addressing */ 26 #define DENALI_INDEXED_CTRL 0x00 27 #define DENALI_INDEXED_DATA 0x10 28 29 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 30 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 31 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 32 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 33 34 /* MAP11 access cycle type */ 35 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 36 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 37 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 38 39 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 40 41 #define DENALI_INVALID_BANK -1 42 43 static struct denali_chip *to_denali_chip(struct nand_chip *chip) 44 { 45 return container_of(chip, struct denali_chip, chip); 46 } 47 48 static struct denali_controller *to_denali_controller(struct nand_chip *chip) 49 { 50 return container_of(chip->controller, struct denali_controller, 51 controller); 52 } 53 54 /* 55 * Direct Addressing - the slave address forms the control information (command 56 * type, bank, block, and page address). The slave data is the actual data to 57 * be transferred. This mode requires 28 bits of address region allocated. 58 */ 59 static u32 denali_direct_read(struct denali_controller *denali, u32 addr) 60 { 61 return ioread32(denali->host + addr); 62 } 63 64 static void denali_direct_write(struct denali_controller *denali, u32 addr, 65 u32 data) 66 { 67 iowrite32(data, denali->host + addr); 68 } 69 70 /* 71 * Indexed Addressing - address translation module intervenes in passing the 72 * control information. This mode reduces the required address range. The 73 * control information and transferred data are latched by the registers in 74 * the translation module. 75 */ 76 static u32 denali_indexed_read(struct denali_controller *denali, u32 addr) 77 { 78 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 79 return ioread32(denali->host + DENALI_INDEXED_DATA); 80 } 81 82 static void denali_indexed_write(struct denali_controller *denali, u32 addr, 83 u32 data) 84 { 85 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 86 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 87 } 88 89 static void denali_enable_irq(struct denali_controller *denali) 90 { 91 int i; 92 93 for (i = 0; i < denali->nbanks; i++) 94 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 95 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 96 } 97 98 static void denali_disable_irq(struct denali_controller *denali) 99 { 100 int i; 101 102 for (i = 0; i < denali->nbanks; i++) 103 iowrite32(0, denali->reg + INTR_EN(i)); 104 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 105 } 106 107 static void denali_clear_irq(struct denali_controller *denali, 108 int bank, u32 irq_status) 109 { 110 /* write one to clear bits */ 111 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 112 } 113 114 static void denali_clear_irq_all(struct denali_controller *denali) 115 { 116 int i; 117 118 for (i = 0; i < denali->nbanks; i++) 119 denali_clear_irq(denali, i, U32_MAX); 120 } 121 122 static irqreturn_t denali_isr(int irq, void *dev_id) 123 { 124 struct denali_controller *denali = dev_id; 125 irqreturn_t ret = IRQ_NONE; 126 u32 irq_status; 127 int i; 128 129 spin_lock(&denali->irq_lock); 130 131 for (i = 0; i < denali->nbanks; i++) { 132 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 133 if (irq_status) 134 ret = IRQ_HANDLED; 135 136 denali_clear_irq(denali, i, irq_status); 137 138 if (i != denali->active_bank) 139 continue; 140 141 denali->irq_status |= irq_status; 142 143 if (denali->irq_status & denali->irq_mask) 144 complete(&denali->complete); 145 } 146 147 spin_unlock(&denali->irq_lock); 148 149 return ret; 150 } 151 152 static void denali_reset_irq(struct denali_controller *denali) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&denali->irq_lock, flags); 157 denali->irq_status = 0; 158 denali->irq_mask = 0; 159 spin_unlock_irqrestore(&denali->irq_lock, flags); 160 } 161 162 static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask) 163 { 164 unsigned long time_left, flags; 165 u32 irq_status; 166 167 spin_lock_irqsave(&denali->irq_lock, flags); 168 169 irq_status = denali->irq_status; 170 171 if (irq_mask & irq_status) { 172 /* return immediately if the IRQ has already happened. */ 173 spin_unlock_irqrestore(&denali->irq_lock, flags); 174 return irq_status; 175 } 176 177 denali->irq_mask = irq_mask; 178 reinit_completion(&denali->complete); 179 spin_unlock_irqrestore(&denali->irq_lock, flags); 180 181 time_left = wait_for_completion_timeout(&denali->complete, 182 msecs_to_jiffies(1000)); 183 if (!time_left) { 184 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 185 irq_mask); 186 return 0; 187 } 188 189 return denali->irq_status; 190 } 191 192 static void denali_select_target(struct nand_chip *chip, int cs) 193 { 194 struct denali_controller *denali = to_denali_controller(chip); 195 struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs]; 196 struct mtd_info *mtd = nand_to_mtd(chip); 197 198 denali->active_bank = sel->bank; 199 200 iowrite32(1 << (chip->phys_erase_shift - chip->page_shift), 201 denali->reg + PAGES_PER_BLOCK); 202 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 203 denali->reg + DEVICE_WIDTH); 204 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 205 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 206 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 207 0 : TWO_ROW_ADDR_CYCLES__FLAG, 208 denali->reg + TWO_ROW_ADDR_CYCLES); 209 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 210 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 211 denali->reg + ECC_CORRECTION); 212 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 213 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 214 iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS); 215 216 if (chip->options & NAND_KEEP_TIMINGS) 217 return; 218 219 /* update timing registers unless NAND_KEEP_TIMINGS is set */ 220 iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE); 221 iowrite32(sel->tcwaw_and_addr_2_data, 222 denali->reg + TCWAW_AND_ADDR_2_DATA); 223 iowrite32(sel->re_2_we, denali->reg + RE_2_WE); 224 iowrite32(sel->acc_clks, denali->reg + ACC_CLKS); 225 iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT); 226 iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT); 227 iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT); 228 iowrite32(sel->re_2_re, denali->reg + RE_2_RE); 229 } 230 231 static int denali_change_column(struct nand_chip *chip, unsigned int offset, 232 void *buf, unsigned int len, bool write) 233 { 234 if (write) 235 return nand_change_write_column_op(chip, offset, buf, len, 236 false); 237 else 238 return nand_change_read_column_op(chip, offset, buf, len, 239 false); 240 } 241 242 static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write) 243 { 244 struct denali_controller *denali = to_denali_controller(chip); 245 struct mtd_info *mtd = nand_to_mtd(chip); 246 struct nand_ecc_ctrl *ecc = &chip->ecc; 247 int writesize = mtd->writesize; 248 int oob_skip = denali->oob_skip_bytes; 249 int ret, i, pos, len; 250 251 for (i = 0; i < ecc->steps; i++) { 252 pos = i * (ecc->size + ecc->bytes); 253 len = ecc->size; 254 255 if (pos >= writesize) { 256 pos += oob_skip; 257 } else if (pos + len > writesize) { 258 /* This chunk overwraps the BBM area. Must be split */ 259 ret = denali_change_column(chip, pos, buf, 260 writesize - pos, write); 261 if (ret) 262 return ret; 263 264 buf += writesize - pos; 265 len -= writesize - pos; 266 pos = writesize + oob_skip; 267 } 268 269 ret = denali_change_column(chip, pos, buf, len, write); 270 if (ret) 271 return ret; 272 273 buf += len; 274 } 275 276 return 0; 277 } 278 279 static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write) 280 { 281 struct denali_controller *denali = to_denali_controller(chip); 282 struct mtd_info *mtd = nand_to_mtd(chip); 283 struct nand_ecc_ctrl *ecc = &chip->ecc; 284 int writesize = mtd->writesize; 285 int oobsize = mtd->oobsize; 286 int oob_skip = denali->oob_skip_bytes; 287 int ret, i, pos, len; 288 289 /* BBM at the beginning of the OOB area */ 290 ret = denali_change_column(chip, writesize, buf, oob_skip, write); 291 if (ret) 292 return ret; 293 294 buf += oob_skip; 295 296 for (i = 0; i < ecc->steps; i++) { 297 pos = ecc->size + i * (ecc->size + ecc->bytes); 298 299 if (i == ecc->steps - 1) 300 /* The last chunk includes OOB free */ 301 len = writesize + oobsize - pos - oob_skip; 302 else 303 len = ecc->bytes; 304 305 if (pos >= writesize) { 306 pos += oob_skip; 307 } else if (pos + len > writesize) { 308 /* This chunk overwraps the BBM area. Must be split */ 309 ret = denali_change_column(chip, pos, buf, 310 writesize - pos, write); 311 if (ret) 312 return ret; 313 314 buf += writesize - pos; 315 len -= writesize - pos; 316 pos = writesize + oob_skip; 317 } 318 319 ret = denali_change_column(chip, pos, buf, len, write); 320 if (ret) 321 return ret; 322 323 buf += len; 324 } 325 326 return 0; 327 } 328 329 static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf, 330 int page) 331 { 332 int ret; 333 334 if (!buf && !oob_buf) 335 return -EINVAL; 336 337 ret = nand_read_page_op(chip, page, 0, NULL, 0); 338 if (ret) 339 return ret; 340 341 if (buf) { 342 ret = denali_payload_xfer(chip, buf, false); 343 if (ret) 344 return ret; 345 } 346 347 if (oob_buf) { 348 ret = denali_oob_xfer(chip, oob_buf, false); 349 if (ret) 350 return ret; 351 } 352 353 return 0; 354 } 355 356 static int denali_write_raw(struct nand_chip *chip, const void *buf, 357 const void *oob_buf, int page) 358 { 359 int ret; 360 361 if (!buf && !oob_buf) 362 return -EINVAL; 363 364 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 365 if (ret) 366 return ret; 367 368 if (buf) { 369 ret = denali_payload_xfer(chip, (void *)buf, true); 370 if (ret) 371 return ret; 372 } 373 374 if (oob_buf) { 375 ret = denali_oob_xfer(chip, (void *)oob_buf, true); 376 if (ret) 377 return ret; 378 } 379 380 return nand_prog_page_end_op(chip); 381 } 382 383 static int denali_read_page_raw(struct nand_chip *chip, u8 *buf, 384 int oob_required, int page) 385 { 386 return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL, 387 page); 388 } 389 390 static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf, 391 int oob_required, int page) 392 { 393 return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL, 394 page); 395 } 396 397 static int denali_read_oob(struct nand_chip *chip, int page) 398 { 399 return denali_read_raw(chip, NULL, chip->oob_poi, page); 400 } 401 402 static int denali_write_oob(struct nand_chip *chip, int page) 403 { 404 return denali_write_raw(chip, NULL, chip->oob_poi, page); 405 } 406 407 static int denali_check_erased_page(struct nand_chip *chip, u8 *buf, 408 unsigned long uncor_ecc_flags, 409 unsigned int max_bitflips) 410 { 411 struct denali_controller *denali = to_denali_controller(chip); 412 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 413 struct nand_ecc_ctrl *ecc = &chip->ecc; 414 u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 415 int i, stat; 416 417 for (i = 0; i < ecc->steps; i++) { 418 if (!(uncor_ecc_flags & BIT(i))) 419 continue; 420 421 stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code, 422 ecc->bytes, NULL, 0, 423 ecc->strength); 424 if (stat < 0) { 425 ecc_stats->failed++; 426 } else { 427 ecc_stats->corrected += stat; 428 max_bitflips = max_t(unsigned int, max_bitflips, stat); 429 } 430 431 buf += ecc->size; 432 ecc_code += ecc->bytes; 433 } 434 435 return max_bitflips; 436 } 437 438 static int denali_hw_ecc_fixup(struct nand_chip *chip, 439 unsigned long *uncor_ecc_flags) 440 { 441 struct denali_controller *denali = to_denali_controller(chip); 442 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 443 int bank = denali->active_bank; 444 u32 ecc_cor; 445 unsigned int max_bitflips; 446 447 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 448 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 449 450 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 451 /* 452 * This flag is set when uncorrectable error occurs at least in 453 * one ECC sector. We can not know "how many sectors", or 454 * "which sector(s)". We need erase-page check for all sectors. 455 */ 456 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 457 return 0; 458 } 459 460 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 461 462 /* 463 * The register holds the maximum of per-sector corrected bitflips. 464 * This is suitable for the return value of the ->read_page() callback. 465 * Unfortunately, we can not know the total number of corrected bits in 466 * the page. Increase the stats by max_bitflips. (compromised solution) 467 */ 468 ecc_stats->corrected += max_bitflips; 469 470 return max_bitflips; 471 } 472 473 static int denali_sw_ecc_fixup(struct nand_chip *chip, 474 unsigned long *uncor_ecc_flags, u8 *buf) 475 { 476 struct denali_controller *denali = to_denali_controller(chip); 477 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats; 478 unsigned int ecc_size = chip->ecc.size; 479 unsigned int bitflips = 0; 480 unsigned int max_bitflips = 0; 481 u32 err_addr, err_cor_info; 482 unsigned int err_byte, err_sector, err_device; 483 u8 err_cor_value; 484 unsigned int prev_sector = 0; 485 u32 irq_status; 486 487 denali_reset_irq(denali); 488 489 do { 490 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 491 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 492 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 493 494 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 495 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 496 err_cor_info); 497 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 498 err_cor_info); 499 500 /* reset the bitflip counter when crossing ECC sector */ 501 if (err_sector != prev_sector) 502 bitflips = 0; 503 504 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 505 /* 506 * Check later if this is a real ECC error, or 507 * an erased sector. 508 */ 509 *uncor_ecc_flags |= BIT(err_sector); 510 } else if (err_byte < ecc_size) { 511 /* 512 * If err_byte is larger than ecc_size, means error 513 * happened in OOB, so we ignore it. It's no need for 514 * us to correct it err_device is represented the NAND 515 * error bits are happened in if there are more than 516 * one NAND connected. 517 */ 518 int offset; 519 unsigned int flips_in_byte; 520 521 offset = (err_sector * ecc_size + err_byte) * 522 denali->devs_per_cs + err_device; 523 524 /* correct the ECC error */ 525 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 526 buf[offset] ^= err_cor_value; 527 ecc_stats->corrected += flips_in_byte; 528 bitflips += flips_in_byte; 529 530 max_bitflips = max(max_bitflips, bitflips); 531 } 532 533 prev_sector = err_sector; 534 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 535 536 /* 537 * Once handle all ECC errors, controller will trigger an 538 * ECC_TRANSACTION_DONE interrupt. 539 */ 540 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 541 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 542 return -EIO; 543 544 return max_bitflips; 545 } 546 547 static void denali_setup_dma64(struct denali_controller *denali, 548 dma_addr_t dma_addr, int page, bool write) 549 { 550 u32 mode; 551 const int page_count = 1; 552 553 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 554 555 /* DMA is a three step process */ 556 557 /* 558 * 1. setup transfer type, interrupt when complete, 559 * burst len = 64 bytes, the number of pages 560 */ 561 denali->host_write(denali, mode, 562 0x01002000 | (64 << 16) | 563 (write ? BIT(8) : 0) | page_count); 564 565 /* 2. set memory low address */ 566 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 567 568 /* 3. set memory high address */ 569 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 570 } 571 572 static void denali_setup_dma32(struct denali_controller *denali, 573 dma_addr_t dma_addr, int page, bool write) 574 { 575 u32 mode; 576 const int page_count = 1; 577 578 mode = DENALI_MAP10 | DENALI_BANK(denali); 579 580 /* DMA is a four step process */ 581 582 /* 1. setup transfer type and # of pages */ 583 denali->host_write(denali, mode | page, 584 0x2000 | (write ? BIT(8) : 0) | page_count); 585 586 /* 2. set memory high address bits 23:8 */ 587 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 588 589 /* 3. set memory low address bits 23:8 */ 590 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 591 592 /* 4. interrupt when complete, burst len = 64 bytes */ 593 denali->host_write(denali, mode | 0x14000, 0x2400); 594 } 595 596 static int denali_pio_read(struct denali_controller *denali, u32 *buf, 597 size_t size, int page) 598 { 599 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 600 u32 irq_status, ecc_err_mask; 601 int i; 602 603 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 604 ecc_err_mask = INTR__ECC_UNCOR_ERR; 605 else 606 ecc_err_mask = INTR__ECC_ERR; 607 608 denali_reset_irq(denali); 609 610 for (i = 0; i < size / 4; i++) 611 buf[i] = denali->host_read(denali, addr); 612 613 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 614 if (!(irq_status & INTR__PAGE_XFER_INC)) 615 return -EIO; 616 617 if (irq_status & INTR__ERASED_PAGE) 618 memset(buf, 0xff, size); 619 620 return irq_status & ecc_err_mask ? -EBADMSG : 0; 621 } 622 623 static int denali_pio_write(struct denali_controller *denali, const u32 *buf, 624 size_t size, int page) 625 { 626 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 627 u32 irq_status; 628 int i; 629 630 denali_reset_irq(denali); 631 632 for (i = 0; i < size / 4; i++) 633 denali->host_write(denali, addr, buf[i]); 634 635 irq_status = denali_wait_for_irq(denali, 636 INTR__PROGRAM_COMP | 637 INTR__PROGRAM_FAIL); 638 if (!(irq_status & INTR__PROGRAM_COMP)) 639 return -EIO; 640 641 return 0; 642 } 643 644 static int denali_pio_xfer(struct denali_controller *denali, void *buf, 645 size_t size, int page, bool write) 646 { 647 if (write) 648 return denali_pio_write(denali, buf, size, page); 649 else 650 return denali_pio_read(denali, buf, size, page); 651 } 652 653 static int denali_dma_xfer(struct denali_controller *denali, void *buf, 654 size_t size, int page, bool write) 655 { 656 dma_addr_t dma_addr; 657 u32 irq_mask, irq_status, ecc_err_mask; 658 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 659 int ret = 0; 660 661 dma_addr = dma_map_single(denali->dev, buf, size, dir); 662 if (dma_mapping_error(denali->dev, dma_addr)) { 663 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 664 return denali_pio_xfer(denali, buf, size, page, write); 665 } 666 667 if (write) { 668 /* 669 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 670 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 671 * when the page program is completed. 672 */ 673 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 674 ecc_err_mask = 0; 675 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 676 irq_mask = INTR__DMA_CMD_COMP; 677 ecc_err_mask = INTR__ECC_UNCOR_ERR; 678 } else { 679 irq_mask = INTR__DMA_CMD_COMP; 680 ecc_err_mask = INTR__ECC_ERR; 681 } 682 683 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 684 /* 685 * The ->setup_dma() hook kicks DMA by using the data/command 686 * interface, which belongs to a different AXI port from the 687 * register interface. Read back the register to avoid a race. 688 */ 689 ioread32(denali->reg + DMA_ENABLE); 690 691 denali_reset_irq(denali); 692 denali->setup_dma(denali, dma_addr, page, write); 693 694 irq_status = denali_wait_for_irq(denali, irq_mask); 695 if (!(irq_status & INTR__DMA_CMD_COMP)) 696 ret = -EIO; 697 else if (irq_status & ecc_err_mask) 698 ret = -EBADMSG; 699 700 iowrite32(0, denali->reg + DMA_ENABLE); 701 702 dma_unmap_single(denali->dev, dma_addr, size, dir); 703 704 if (irq_status & INTR__ERASED_PAGE) 705 memset(buf, 0xff, size); 706 707 return ret; 708 } 709 710 static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size, 711 int page, bool write) 712 { 713 struct denali_controller *denali = to_denali_controller(chip); 714 715 denali_select_target(chip, chip->cur_cs); 716 717 if (denali->dma_avail) 718 return denali_dma_xfer(denali, buf, size, page, write); 719 else 720 return denali_pio_xfer(denali, buf, size, page, write); 721 } 722 723 static int denali_read_page(struct nand_chip *chip, u8 *buf, 724 int oob_required, int page) 725 { 726 struct denali_controller *denali = to_denali_controller(chip); 727 struct mtd_info *mtd = nand_to_mtd(chip); 728 unsigned long uncor_ecc_flags = 0; 729 int stat = 0; 730 int ret; 731 732 ret = denali_page_xfer(chip, buf, mtd->writesize, page, false); 733 if (ret && ret != -EBADMSG) 734 return ret; 735 736 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 737 stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags); 738 else if (ret == -EBADMSG) 739 stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf); 740 741 if (stat < 0) 742 return stat; 743 744 if (uncor_ecc_flags) { 745 ret = denali_read_oob(chip, page); 746 if (ret) 747 return ret; 748 749 stat = denali_check_erased_page(chip, buf, 750 uncor_ecc_flags, stat); 751 } 752 753 return stat; 754 } 755 756 static int denali_write_page(struct nand_chip *chip, const u8 *buf, 757 int oob_required, int page) 758 { 759 struct mtd_info *mtd = nand_to_mtd(chip); 760 761 return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); 762 } 763 764 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, 765 const struct nand_data_interface *conf) 766 { 767 struct denali_controller *denali = to_denali_controller(chip); 768 struct denali_chip_sel *sel; 769 const struct nand_sdr_timings *timings; 770 unsigned long t_x, mult_x; 771 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 772 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 773 int addr_2_data_mask; 774 u32 tmp; 775 776 timings = nand_get_sdr_timings(conf); 777 if (IS_ERR(timings)) 778 return PTR_ERR(timings); 779 780 /* clk_x period in picoseconds */ 781 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 782 if (!t_x) 783 return -EINVAL; 784 785 /* 786 * The bus interface clock, clk_x, is phase aligned with the core clock. 787 * The clk_x is an integral multiple N of the core clk. The value N is 788 * configured at IP delivery time, and its available value is 4, 5, 6. 789 */ 790 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 791 if (mult_x < 4 || mult_x > 6) 792 return -EINVAL; 793 794 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 795 return 0; 796 797 sel = &to_denali_chip(chip)->sels[chipnr]; 798 799 /* tREA -> ACC_CLKS */ 800 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); 801 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 802 803 tmp = ioread32(denali->reg + ACC_CLKS); 804 tmp &= ~ACC_CLKS__VALUE; 805 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 806 sel->acc_clks = tmp; 807 808 /* tRWH -> RE_2_WE */ 809 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 810 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 811 812 tmp = ioread32(denali->reg + RE_2_WE); 813 tmp &= ~RE_2_WE__VALUE; 814 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 815 sel->re_2_we = tmp; 816 817 /* tRHZ -> RE_2_RE */ 818 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 819 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 820 821 tmp = ioread32(denali->reg + RE_2_RE); 822 tmp &= ~RE_2_RE__VALUE; 823 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 824 sel->re_2_re = tmp; 825 826 /* 827 * tCCS, tWHR -> WE_2_RE 828 * 829 * With WE_2_RE properly set, the Denali controller automatically takes 830 * care of the delay; the driver need not set NAND_WAIT_TCCS. 831 */ 832 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 833 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 834 835 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 836 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 837 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 838 sel->hwhr2_and_we_2_re = tmp; 839 840 /* tADL -> ADDR_2_DATA */ 841 842 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 843 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 844 if (denali->revision < 0x0501) 845 addr_2_data_mask >>= 1; 846 847 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 848 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 849 850 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 851 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 852 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 853 sel->tcwaw_and_addr_2_data = tmp; 854 855 /* tREH, tWH -> RDWR_EN_HI_CNT */ 856 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 857 t_x); 858 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 859 860 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 861 tmp &= ~RDWR_EN_HI_CNT__VALUE; 862 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 863 sel->rdwr_en_hi_cnt = tmp; 864 865 /* tRP, tWP -> RDWR_EN_LO_CNT */ 866 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 867 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 868 t_x); 869 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); 870 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 871 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 872 873 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 874 tmp &= ~RDWR_EN_LO_CNT__VALUE; 875 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 876 sel->rdwr_en_lo_cnt = tmp; 877 878 /* tCS, tCEA -> CS_SETUP_CNT */ 879 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 880 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 881 0); 882 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 883 884 tmp = ioread32(denali->reg + CS_SETUP_CNT); 885 tmp &= ~CS_SETUP_CNT__VALUE; 886 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 887 sel->cs_setup_cnt = tmp; 888 889 return 0; 890 } 891 892 int denali_calc_ecc_bytes(int step_size, int strength) 893 { 894 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 895 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 896 } 897 EXPORT_SYMBOL(denali_calc_ecc_bytes); 898 899 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 900 struct mtd_oob_region *oobregion) 901 { 902 struct nand_chip *chip = mtd_to_nand(mtd); 903 struct denali_controller *denali = to_denali_controller(chip); 904 905 if (section > 0) 906 return -ERANGE; 907 908 oobregion->offset = denali->oob_skip_bytes; 909 oobregion->length = chip->ecc.total; 910 911 return 0; 912 } 913 914 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 915 struct mtd_oob_region *oobregion) 916 { 917 struct nand_chip *chip = mtd_to_nand(mtd); 918 struct denali_controller *denali = to_denali_controller(chip); 919 920 if (section > 0) 921 return -ERANGE; 922 923 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 924 oobregion->length = mtd->oobsize - oobregion->offset; 925 926 return 0; 927 } 928 929 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 930 .ecc = denali_ooblayout_ecc, 931 .free = denali_ooblayout_free, 932 }; 933 934 static int denali_multidev_fixup(struct nand_chip *chip) 935 { 936 struct denali_controller *denali = to_denali_controller(chip); 937 struct mtd_info *mtd = nand_to_mtd(chip); 938 struct nand_memory_organization *memorg; 939 940 memorg = nanddev_get_memorg(&chip->base); 941 942 /* 943 * Support for multi device: 944 * When the IP configuration is x16 capable and two x8 chips are 945 * connected in parallel, DEVICES_CONNECTED should be set to 2. 946 * In this case, the core framework knows nothing about this fact, 947 * so we should tell it the _logical_ pagesize and anything necessary. 948 */ 949 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 950 951 /* 952 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 953 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 954 */ 955 if (denali->devs_per_cs == 0) { 956 denali->devs_per_cs = 1; 957 iowrite32(1, denali->reg + DEVICES_CONNECTED); 958 } 959 960 if (denali->devs_per_cs == 1) 961 return 0; 962 963 if (denali->devs_per_cs != 2) { 964 dev_err(denali->dev, "unsupported number of devices %d\n", 965 denali->devs_per_cs); 966 return -EINVAL; 967 } 968 969 /* 2 chips in parallel */ 970 memorg->pagesize <<= 1; 971 memorg->oobsize <<= 1; 972 mtd->size <<= 1; 973 mtd->erasesize <<= 1; 974 mtd->writesize <<= 1; 975 mtd->oobsize <<= 1; 976 chip->page_shift += 1; 977 chip->phys_erase_shift += 1; 978 chip->bbt_erase_shift += 1; 979 chip->chip_shift += 1; 980 chip->pagemask <<= 1; 981 chip->ecc.size <<= 1; 982 chip->ecc.bytes <<= 1; 983 chip->ecc.strength <<= 1; 984 denali->oob_skip_bytes <<= 1; 985 986 return 0; 987 } 988 989 static int denali_attach_chip(struct nand_chip *chip) 990 { 991 struct denali_controller *denali = to_denali_controller(chip); 992 struct mtd_info *mtd = nand_to_mtd(chip); 993 int ret; 994 995 ret = nand_ecc_choose_conf(chip, denali->ecc_caps, 996 mtd->oobsize - denali->oob_skip_bytes); 997 if (ret) { 998 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 999 return ret; 1000 } 1001 1002 dev_dbg(denali->dev, 1003 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1004 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1005 1006 ret = denali_multidev_fixup(chip); 1007 if (ret) 1008 return ret; 1009 1010 return 0; 1011 } 1012 1013 static void denali_exec_in8(struct denali_controller *denali, u32 type, 1014 u8 *buf, unsigned int len) 1015 { 1016 int i; 1017 1018 for (i = 0; i < len; i++) 1019 buf[i] = denali->host_read(denali, type | DENALI_BANK(denali)); 1020 } 1021 1022 static void denali_exec_in16(struct denali_controller *denali, u32 type, 1023 u8 *buf, unsigned int len) 1024 { 1025 u32 data; 1026 int i; 1027 1028 for (i = 0; i < len; i += 2) { 1029 data = denali->host_read(denali, type | DENALI_BANK(denali)); 1030 /* bit 31:24 and 15:8 are used for DDR */ 1031 buf[i] = data; 1032 buf[i + 1] = data >> 16; 1033 } 1034 } 1035 1036 static void denali_exec_in(struct denali_controller *denali, u32 type, 1037 u8 *buf, unsigned int len, bool width16) 1038 { 1039 if (width16) 1040 denali_exec_in16(denali, type, buf, len); 1041 else 1042 denali_exec_in8(denali, type, buf, len); 1043 } 1044 1045 static void denali_exec_out8(struct denali_controller *denali, u32 type, 1046 const u8 *buf, unsigned int len) 1047 { 1048 int i; 1049 1050 for (i = 0; i < len; i++) 1051 denali->host_write(denali, type | DENALI_BANK(denali), buf[i]); 1052 } 1053 1054 static void denali_exec_out16(struct denali_controller *denali, u32 type, 1055 const u8 *buf, unsigned int len) 1056 { 1057 int i; 1058 1059 for (i = 0; i < len; i += 2) 1060 denali->host_write(denali, type | DENALI_BANK(denali), 1061 buf[i + 1] << 16 | buf[i]); 1062 } 1063 1064 static void denali_exec_out(struct denali_controller *denali, u32 type, 1065 const u8 *buf, unsigned int len, bool width16) 1066 { 1067 if (width16) 1068 denali_exec_out16(denali, type, buf, len); 1069 else 1070 denali_exec_out8(denali, type, buf, len); 1071 } 1072 1073 static int denali_exec_waitrdy(struct denali_controller *denali) 1074 { 1075 u32 irq_stat; 1076 1077 /* R/B# pin transitioned from low to high? */ 1078 irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT); 1079 1080 /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */ 1081 denali_reset_irq(denali); 1082 1083 return irq_stat & INTR__INT_ACT ? 0 : -EIO; 1084 } 1085 1086 static int denali_exec_instr(struct nand_chip *chip, 1087 const struct nand_op_instr *instr) 1088 { 1089 struct denali_controller *denali = to_denali_controller(chip); 1090 1091 switch (instr->type) { 1092 case NAND_OP_CMD_INSTR: 1093 denali_exec_out8(denali, DENALI_MAP11_CMD, 1094 &instr->ctx.cmd.opcode, 1); 1095 return 0; 1096 case NAND_OP_ADDR_INSTR: 1097 denali_exec_out8(denali, DENALI_MAP11_ADDR, 1098 instr->ctx.addr.addrs, 1099 instr->ctx.addr.naddrs); 1100 return 0; 1101 case NAND_OP_DATA_IN_INSTR: 1102 denali_exec_in(denali, DENALI_MAP11_DATA, 1103 instr->ctx.data.buf.in, 1104 instr->ctx.data.len, 1105 !instr->ctx.data.force_8bit && 1106 chip->options & NAND_BUSWIDTH_16); 1107 return 0; 1108 case NAND_OP_DATA_OUT_INSTR: 1109 denali_exec_out(denali, DENALI_MAP11_DATA, 1110 instr->ctx.data.buf.out, 1111 instr->ctx.data.len, 1112 !instr->ctx.data.force_8bit && 1113 chip->options & NAND_BUSWIDTH_16); 1114 return 0; 1115 case NAND_OP_WAITRDY_INSTR: 1116 return denali_exec_waitrdy(denali); 1117 default: 1118 WARN_ONCE(1, "unsupported NAND instruction type: %d\n", 1119 instr->type); 1120 1121 return -EINVAL; 1122 } 1123 } 1124 1125 static int denali_exec_op(struct nand_chip *chip, 1126 const struct nand_operation *op, bool check_only) 1127 { 1128 int i, ret; 1129 1130 if (check_only) 1131 return 0; 1132 1133 denali_select_target(chip, op->cs); 1134 1135 /* 1136 * Some commands contain NAND_OP_WAITRDY_INSTR. 1137 * irq must be cleared here to catch the R/B# interrupt there. 1138 */ 1139 denali_reset_irq(to_denali_controller(chip)); 1140 1141 for (i = 0; i < op->ninstrs; i++) { 1142 ret = denali_exec_instr(chip, &op->instrs[i]); 1143 if (ret) 1144 return ret; 1145 } 1146 1147 return 0; 1148 } 1149 1150 static const struct nand_controller_ops denali_controller_ops = { 1151 .attach_chip = denali_attach_chip, 1152 .exec_op = denali_exec_op, 1153 .setup_data_interface = denali_setup_data_interface, 1154 }; 1155 1156 int denali_chip_init(struct denali_controller *denali, 1157 struct denali_chip *dchip) 1158 { 1159 struct nand_chip *chip = &dchip->chip; 1160 struct mtd_info *mtd = nand_to_mtd(chip); 1161 struct denali_chip *dchip2; 1162 int i, j, ret; 1163 1164 chip->controller = &denali->controller; 1165 1166 /* sanity checks for bank numbers */ 1167 for (i = 0; i < dchip->nsels; i++) { 1168 unsigned int bank = dchip->sels[i].bank; 1169 1170 if (bank >= denali->nbanks) { 1171 dev_err(denali->dev, "unsupported bank %d\n", bank); 1172 return -EINVAL; 1173 } 1174 1175 for (j = 0; j < i; j++) { 1176 if (bank == dchip->sels[j].bank) { 1177 dev_err(denali->dev, 1178 "bank %d is assigned twice in the same chip\n", 1179 bank); 1180 return -EINVAL; 1181 } 1182 } 1183 1184 list_for_each_entry(dchip2, &denali->chips, node) { 1185 for (j = 0; j < dchip2->nsels; j++) { 1186 if (bank == dchip2->sels[j].bank) { 1187 dev_err(denali->dev, 1188 "bank %d is already used\n", 1189 bank); 1190 return -EINVAL; 1191 } 1192 } 1193 } 1194 } 1195 1196 mtd->dev.parent = denali->dev; 1197 1198 /* 1199 * Fallback to the default name if DT did not give "label" property. 1200 * Use "label" property if multiple chips are connected. 1201 */ 1202 if (!mtd->name && list_empty(&denali->chips)) 1203 mtd->name = "denali-nand"; 1204 1205 if (denali->dma_avail) { 1206 chip->options |= NAND_USE_BOUNCE_BUFFER; 1207 chip->buf_align = 16; 1208 } 1209 1210 /* clk rate info is needed for setup_data_interface */ 1211 if (!denali->clk_rate || !denali->clk_x_rate) 1212 chip->options |= NAND_KEEP_TIMINGS; 1213 1214 chip->bbt_options |= NAND_BBT_USE_FLASH; 1215 chip->bbt_options |= NAND_BBT_NO_OOB; 1216 chip->options |= NAND_NO_SUBPAGE_WRITE; 1217 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1218 chip->ecc.read_page = denali_read_page; 1219 chip->ecc.write_page = denali_write_page; 1220 chip->ecc.read_page_raw = denali_read_page_raw; 1221 chip->ecc.write_page_raw = denali_write_page_raw; 1222 chip->ecc.read_oob = denali_read_oob; 1223 chip->ecc.write_oob = denali_write_oob; 1224 1225 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1226 1227 ret = nand_scan(chip, dchip->nsels); 1228 if (ret) 1229 return ret; 1230 1231 ret = mtd_device_register(mtd, NULL, 0); 1232 if (ret) { 1233 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1234 goto cleanup_nand; 1235 } 1236 1237 list_add_tail(&dchip->node, &denali->chips); 1238 1239 return 0; 1240 1241 cleanup_nand: 1242 nand_cleanup(chip); 1243 1244 return ret; 1245 } 1246 EXPORT_SYMBOL_GPL(denali_chip_init); 1247 1248 int denali_init(struct denali_controller *denali) 1249 { 1250 u32 features = ioread32(denali->reg + FEATURES); 1251 int ret; 1252 1253 nand_controller_init(&denali->controller); 1254 denali->controller.ops = &denali_controller_ops; 1255 init_completion(&denali->complete); 1256 spin_lock_init(&denali->irq_lock); 1257 INIT_LIST_HEAD(&denali->chips); 1258 denali->active_bank = DENALI_INVALID_BANK; 1259 1260 /* 1261 * The REVISION register may not be reliable. Platforms are allowed to 1262 * override it. 1263 */ 1264 if (!denali->revision) 1265 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1266 1267 denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 1268 1269 /* the encoding changed from rev 5.0 to 5.1 */ 1270 if (denali->revision < 0x0501) 1271 denali->nbanks <<= 1; 1272 1273 if (features & FEATURES__DMA) 1274 denali->dma_avail = true; 1275 1276 if (denali->dma_avail) { 1277 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1278 1279 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1280 if (ret) { 1281 dev_info(denali->dev, 1282 "Failed to set DMA mask. Disabling DMA.\n"); 1283 denali->dma_avail = false; 1284 } 1285 } 1286 1287 if (denali->dma_avail) { 1288 if (denali->caps & DENALI_CAP_DMA_64BIT) 1289 denali->setup_dma = denali_setup_dma64; 1290 else 1291 denali->setup_dma = denali_setup_dma32; 1292 } 1293 1294 if (features & FEATURES__INDEX_ADDR) { 1295 denali->host_read = denali_indexed_read; 1296 denali->host_write = denali_indexed_write; 1297 } else { 1298 denali->host_read = denali_direct_read; 1299 denali->host_write = denali_direct_write; 1300 } 1301 1302 /* 1303 * Set how many bytes should be skipped before writing data in OOB. 1304 * If a platform requests a non-zero value, set it to the register. 1305 * Otherwise, read the value out, expecting it has already been set up 1306 * by firmware. 1307 */ 1308 if (denali->oob_skip_bytes) 1309 iowrite32(denali->oob_skip_bytes, 1310 denali->reg + SPARE_AREA_SKIP_BYTES); 1311 else 1312 denali->oob_skip_bytes = ioread32(denali->reg + 1313 SPARE_AREA_SKIP_BYTES); 1314 1315 iowrite32(0, denali->reg + TRANSFER_SPARE_REG); 1316 iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED); 1317 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1318 iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 1319 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1320 1321 denali_clear_irq_all(denali); 1322 1323 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1324 IRQF_SHARED, DENALI_NAND_NAME, denali); 1325 if (ret) { 1326 dev_err(denali->dev, "Unable to request IRQ\n"); 1327 return ret; 1328 } 1329 1330 denali_enable_irq(denali); 1331 1332 return 0; 1333 } 1334 EXPORT_SYMBOL(denali_init); 1335 1336 void denali_remove(struct denali_controller *denali) 1337 { 1338 struct denali_chip *dchip; 1339 1340 list_for_each_entry(dchip, &denali->chips, node) 1341 nand_release(&dchip->chip); 1342 1343 denali_disable_irq(denali); 1344 } 1345 EXPORT_SYMBOL(denali_remove); 1346 1347 MODULE_DESCRIPTION("Driver core for Denali NAND controller"); 1348 MODULE_AUTHOR("Intel Corporation and its suppliers"); 1349 MODULE_LICENSE("GPL v2"); 1350