1 /* 2 * NAND Flash Controller Device Driver 3 * Copyright © 2009-2010, Intel Corporation and its suppliers. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/bitfield.h> 16 #include <linux/completion.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/mtd/mtd.h> 22 #include <linux/mtd/rawnand.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 26 #include "denali.h" 27 28 MODULE_LICENSE("GPL"); 29 30 #define DENALI_NAND_NAME "denali-nand" 31 32 /* for Indexed Addressing */ 33 #define DENALI_INDEXED_CTRL 0x00 34 #define DENALI_INDEXED_DATA 0x10 35 36 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 37 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 38 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 39 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 40 41 /* MAP11 access cycle type */ 42 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 43 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 44 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 45 46 /* MAP10 commands */ 47 #define DENALI_ERASE 0x01 48 49 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 50 51 #define DENALI_INVALID_BANK -1 52 #define DENALI_NR_BANKS 4 53 54 /* 55 * The bus interface clock, clk_x, is phase aligned with the core clock. The 56 * clk_x is an integral multiple N of the core clk. The value N is configured 57 * at IP delivery time, and its available value is 4, 5, or 6. We need to align 58 * to the largest value to make it work with any possible configuration. 59 */ 60 #define DENALI_CLK_X_MULT 6 61 62 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 63 { 64 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 65 } 66 67 /* 68 * Direct Addressing - the slave address forms the control information (command 69 * type, bank, block, and page address). The slave data is the actual data to 70 * be transferred. This mode requires 28 bits of address region allocated. 71 */ 72 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 73 { 74 return ioread32(denali->host + addr); 75 } 76 77 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 78 u32 data) 79 { 80 iowrite32(data, denali->host + addr); 81 } 82 83 /* 84 * Indexed Addressing - address translation module intervenes in passing the 85 * control information. This mode reduces the required address range. The 86 * control information and transferred data are latched by the registers in 87 * the translation module. 88 */ 89 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 90 { 91 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 92 return ioread32(denali->host + DENALI_INDEXED_DATA); 93 } 94 95 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 96 u32 data) 97 { 98 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 99 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 100 } 101 102 /* 103 * Use the configuration feature register to determine the maximum number of 104 * banks that the hardware supports. 105 */ 106 static void denali_detect_max_banks(struct denali_nand_info *denali) 107 { 108 uint32_t features = ioread32(denali->reg + FEATURES); 109 110 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 111 112 /* the encoding changed from rev 5.0 to 5.1 */ 113 if (denali->revision < 0x0501) 114 denali->max_banks <<= 1; 115 } 116 117 static void denali_enable_irq(struct denali_nand_info *denali) 118 { 119 int i; 120 121 for (i = 0; i < DENALI_NR_BANKS; i++) 122 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 123 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 124 } 125 126 static void denali_disable_irq(struct denali_nand_info *denali) 127 { 128 int i; 129 130 for (i = 0; i < DENALI_NR_BANKS; i++) 131 iowrite32(0, denali->reg + INTR_EN(i)); 132 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 133 } 134 135 static void denali_clear_irq(struct denali_nand_info *denali, 136 int bank, uint32_t irq_status) 137 { 138 /* write one to clear bits */ 139 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 140 } 141 142 static void denali_clear_irq_all(struct denali_nand_info *denali) 143 { 144 int i; 145 146 for (i = 0; i < DENALI_NR_BANKS; i++) 147 denali_clear_irq(denali, i, U32_MAX); 148 } 149 150 static irqreturn_t denali_isr(int irq, void *dev_id) 151 { 152 struct denali_nand_info *denali = dev_id; 153 irqreturn_t ret = IRQ_NONE; 154 uint32_t irq_status; 155 int i; 156 157 spin_lock(&denali->irq_lock); 158 159 for (i = 0; i < DENALI_NR_BANKS; i++) { 160 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 161 if (irq_status) 162 ret = IRQ_HANDLED; 163 164 denali_clear_irq(denali, i, irq_status); 165 166 if (i != denali->active_bank) 167 continue; 168 169 denali->irq_status |= irq_status; 170 171 if (denali->irq_status & denali->irq_mask) 172 complete(&denali->complete); 173 } 174 175 spin_unlock(&denali->irq_lock); 176 177 return ret; 178 } 179 180 static void denali_reset_irq(struct denali_nand_info *denali) 181 { 182 unsigned long flags; 183 184 spin_lock_irqsave(&denali->irq_lock, flags); 185 denali->irq_status = 0; 186 denali->irq_mask = 0; 187 spin_unlock_irqrestore(&denali->irq_lock, flags); 188 } 189 190 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 191 uint32_t irq_mask) 192 { 193 unsigned long time_left, flags; 194 uint32_t irq_status; 195 196 spin_lock_irqsave(&denali->irq_lock, flags); 197 198 irq_status = denali->irq_status; 199 200 if (irq_mask & irq_status) { 201 /* return immediately if the IRQ has already happened. */ 202 spin_unlock_irqrestore(&denali->irq_lock, flags); 203 return irq_status; 204 } 205 206 denali->irq_mask = irq_mask; 207 reinit_completion(&denali->complete); 208 spin_unlock_irqrestore(&denali->irq_lock, flags); 209 210 time_left = wait_for_completion_timeout(&denali->complete, 211 msecs_to_jiffies(1000)); 212 if (!time_left) { 213 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 214 irq_mask); 215 return 0; 216 } 217 218 return denali->irq_status; 219 } 220 221 static uint32_t denali_check_irq(struct denali_nand_info *denali) 222 { 223 unsigned long flags; 224 uint32_t irq_status; 225 226 spin_lock_irqsave(&denali->irq_lock, flags); 227 irq_status = denali->irq_status; 228 spin_unlock_irqrestore(&denali->irq_lock, flags); 229 230 return irq_status; 231 } 232 233 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 234 { 235 struct denali_nand_info *denali = mtd_to_denali(mtd); 236 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 237 int i; 238 239 for (i = 0; i < len; i++) 240 buf[i] = denali->host_read(denali, addr); 241 } 242 243 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 244 { 245 struct denali_nand_info *denali = mtd_to_denali(mtd); 246 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 247 int i; 248 249 for (i = 0; i < len; i++) 250 denali->host_write(denali, addr, buf[i]); 251 } 252 253 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 254 { 255 struct denali_nand_info *denali = mtd_to_denali(mtd); 256 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 257 uint16_t *buf16 = (uint16_t *)buf; 258 int i; 259 260 for (i = 0; i < len / 2; i++) 261 buf16[i] = denali->host_read(denali, addr); 262 } 263 264 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 265 int len) 266 { 267 struct denali_nand_info *denali = mtd_to_denali(mtd); 268 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 269 const uint16_t *buf16 = (const uint16_t *)buf; 270 int i; 271 272 for (i = 0; i < len / 2; i++) 273 denali->host_write(denali, addr, buf16[i]); 274 } 275 276 static uint8_t denali_read_byte(struct mtd_info *mtd) 277 { 278 uint8_t byte; 279 280 denali_read_buf(mtd, &byte, 1); 281 282 return byte; 283 } 284 285 static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) 286 { 287 denali_write_buf(mtd, &byte, 1); 288 } 289 290 static uint16_t denali_read_word(struct mtd_info *mtd) 291 { 292 uint16_t word; 293 294 denali_read_buf16(mtd, (uint8_t *)&word, 2); 295 296 return word; 297 } 298 299 static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 300 { 301 struct denali_nand_info *denali = mtd_to_denali(mtd); 302 uint32_t type; 303 304 if (ctrl & NAND_CLE) 305 type = DENALI_MAP11_CMD; 306 else if (ctrl & NAND_ALE) 307 type = DENALI_MAP11_ADDR; 308 else 309 return; 310 311 /* 312 * Some commands are followed by chip->dev_ready or chip->waitfunc. 313 * irq_status must be cleared here to catch the R/B# interrupt later. 314 */ 315 if (ctrl & NAND_CTRL_CHANGE) 316 denali_reset_irq(denali); 317 318 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 319 } 320 321 static int denali_dev_ready(struct mtd_info *mtd) 322 { 323 struct denali_nand_info *denali = mtd_to_denali(mtd); 324 325 return !!(denali_check_irq(denali) & INTR__INT_ACT); 326 } 327 328 static int denali_check_erased_page(struct mtd_info *mtd, 329 struct nand_chip *chip, uint8_t *buf, 330 unsigned long uncor_ecc_flags, 331 unsigned int max_bitflips) 332 { 333 struct denali_nand_info *denali = mtd_to_denali(mtd); 334 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 335 int ecc_steps = chip->ecc.steps; 336 int ecc_size = chip->ecc.size; 337 int ecc_bytes = chip->ecc.bytes; 338 int i, stat; 339 340 for (i = 0; i < ecc_steps; i++) { 341 if (!(uncor_ecc_flags & BIT(i))) 342 continue; 343 344 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 345 ecc_code, ecc_bytes, 346 NULL, 0, 347 chip->ecc.strength); 348 if (stat < 0) { 349 mtd->ecc_stats.failed++; 350 } else { 351 mtd->ecc_stats.corrected += stat; 352 max_bitflips = max_t(unsigned int, max_bitflips, stat); 353 } 354 355 buf += ecc_size; 356 ecc_code += ecc_bytes; 357 } 358 359 return max_bitflips; 360 } 361 362 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 363 struct denali_nand_info *denali, 364 unsigned long *uncor_ecc_flags) 365 { 366 struct nand_chip *chip = mtd_to_nand(mtd); 367 int bank = denali->active_bank; 368 uint32_t ecc_cor; 369 unsigned int max_bitflips; 370 371 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 372 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 373 374 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 375 /* 376 * This flag is set when uncorrectable error occurs at least in 377 * one ECC sector. We can not know "how many sectors", or 378 * "which sector(s)". We need erase-page check for all sectors. 379 */ 380 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 381 return 0; 382 } 383 384 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 385 386 /* 387 * The register holds the maximum of per-sector corrected bitflips. 388 * This is suitable for the return value of the ->read_page() callback. 389 * Unfortunately, we can not know the total number of corrected bits in 390 * the page. Increase the stats by max_bitflips. (compromised solution) 391 */ 392 mtd->ecc_stats.corrected += max_bitflips; 393 394 return max_bitflips; 395 } 396 397 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 398 struct denali_nand_info *denali, 399 unsigned long *uncor_ecc_flags, uint8_t *buf) 400 { 401 unsigned int ecc_size = denali->nand.ecc.size; 402 unsigned int bitflips = 0; 403 unsigned int max_bitflips = 0; 404 uint32_t err_addr, err_cor_info; 405 unsigned int err_byte, err_sector, err_device; 406 uint8_t err_cor_value; 407 unsigned int prev_sector = 0; 408 uint32_t irq_status; 409 410 denali_reset_irq(denali); 411 412 do { 413 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 414 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 415 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 416 417 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 418 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 419 err_cor_info); 420 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 421 err_cor_info); 422 423 /* reset the bitflip counter when crossing ECC sector */ 424 if (err_sector != prev_sector) 425 bitflips = 0; 426 427 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 428 /* 429 * Check later if this is a real ECC error, or 430 * an erased sector. 431 */ 432 *uncor_ecc_flags |= BIT(err_sector); 433 } else if (err_byte < ecc_size) { 434 /* 435 * If err_byte is larger than ecc_size, means error 436 * happened in OOB, so we ignore it. It's no need for 437 * us to correct it err_device is represented the NAND 438 * error bits are happened in if there are more than 439 * one NAND connected. 440 */ 441 int offset; 442 unsigned int flips_in_byte; 443 444 offset = (err_sector * ecc_size + err_byte) * 445 denali->devs_per_cs + err_device; 446 447 /* correct the ECC error */ 448 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 449 buf[offset] ^= err_cor_value; 450 mtd->ecc_stats.corrected += flips_in_byte; 451 bitflips += flips_in_byte; 452 453 max_bitflips = max(max_bitflips, bitflips); 454 } 455 456 prev_sector = err_sector; 457 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 458 459 /* 460 * Once handle all ECC errors, controller will trigger an 461 * ECC_TRANSACTION_DONE interrupt. 462 */ 463 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 464 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 465 return -EIO; 466 467 return max_bitflips; 468 } 469 470 static void denali_setup_dma64(struct denali_nand_info *denali, 471 dma_addr_t dma_addr, int page, int write) 472 { 473 uint32_t mode; 474 const int page_count = 1; 475 476 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 477 478 /* DMA is a three step process */ 479 480 /* 481 * 1. setup transfer type, interrupt when complete, 482 * burst len = 64 bytes, the number of pages 483 */ 484 denali->host_write(denali, mode, 485 0x01002000 | (64 << 16) | (write << 8) | page_count); 486 487 /* 2. set memory low address */ 488 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 489 490 /* 3. set memory high address */ 491 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 492 } 493 494 static void denali_setup_dma32(struct denali_nand_info *denali, 495 dma_addr_t dma_addr, int page, int write) 496 { 497 uint32_t mode; 498 const int page_count = 1; 499 500 mode = DENALI_MAP10 | DENALI_BANK(denali); 501 502 /* DMA is a four step process */ 503 504 /* 1. setup transfer type and # of pages */ 505 denali->host_write(denali, mode | page, 506 0x2000 | (write << 8) | page_count); 507 508 /* 2. set memory high address bits 23:8 */ 509 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 510 511 /* 3. set memory low address bits 23:8 */ 512 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 513 514 /* 4. interrupt when complete, burst len = 64 bytes */ 515 denali->host_write(denali, mode | 0x14000, 0x2400); 516 } 517 518 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 519 size_t size, int page, int raw) 520 { 521 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 522 uint32_t *buf32 = (uint32_t *)buf; 523 uint32_t irq_status, ecc_err_mask; 524 int i; 525 526 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 527 ecc_err_mask = INTR__ECC_UNCOR_ERR; 528 else 529 ecc_err_mask = INTR__ECC_ERR; 530 531 denali_reset_irq(denali); 532 533 for (i = 0; i < size / 4; i++) 534 *buf32++ = denali->host_read(denali, addr); 535 536 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 537 if (!(irq_status & INTR__PAGE_XFER_INC)) 538 return -EIO; 539 540 if (irq_status & INTR__ERASED_PAGE) 541 memset(buf, 0xff, size); 542 543 return irq_status & ecc_err_mask ? -EBADMSG : 0; 544 } 545 546 static int denali_pio_write(struct denali_nand_info *denali, 547 const void *buf, size_t size, int page, int raw) 548 { 549 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 550 const uint32_t *buf32 = (uint32_t *)buf; 551 uint32_t irq_status; 552 int i; 553 554 denali_reset_irq(denali); 555 556 for (i = 0; i < size / 4; i++) 557 denali->host_write(denali, addr, *buf32++); 558 559 irq_status = denali_wait_for_irq(denali, 560 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 561 if (!(irq_status & INTR__PROGRAM_COMP)) 562 return -EIO; 563 564 return 0; 565 } 566 567 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 568 size_t size, int page, int raw, int write) 569 { 570 if (write) 571 return denali_pio_write(denali, buf, size, page, raw); 572 else 573 return denali_pio_read(denali, buf, size, page, raw); 574 } 575 576 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 577 size_t size, int page, int raw, int write) 578 { 579 dma_addr_t dma_addr; 580 uint32_t irq_mask, irq_status, ecc_err_mask; 581 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 582 int ret = 0; 583 584 dma_addr = dma_map_single(denali->dev, buf, size, dir); 585 if (dma_mapping_error(denali->dev, dma_addr)) { 586 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 587 return denali_pio_xfer(denali, buf, size, page, raw, write); 588 } 589 590 if (write) { 591 /* 592 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 593 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 594 * when the page program is completed. 595 */ 596 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 597 ecc_err_mask = 0; 598 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 599 irq_mask = INTR__DMA_CMD_COMP; 600 ecc_err_mask = INTR__ECC_UNCOR_ERR; 601 } else { 602 irq_mask = INTR__DMA_CMD_COMP; 603 ecc_err_mask = INTR__ECC_ERR; 604 } 605 606 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 607 608 denali_reset_irq(denali); 609 denali->setup_dma(denali, dma_addr, page, write); 610 611 irq_status = denali_wait_for_irq(denali, irq_mask); 612 if (!(irq_status & INTR__DMA_CMD_COMP)) 613 ret = -EIO; 614 else if (irq_status & ecc_err_mask) 615 ret = -EBADMSG; 616 617 iowrite32(0, denali->reg + DMA_ENABLE); 618 619 dma_unmap_single(denali->dev, dma_addr, size, dir); 620 621 if (irq_status & INTR__ERASED_PAGE) 622 memset(buf, 0xff, size); 623 624 return ret; 625 } 626 627 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 628 size_t size, int page, int raw, int write) 629 { 630 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 631 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 632 denali->reg + TRANSFER_SPARE_REG); 633 634 if (denali->dma_avail) 635 return denali_dma_xfer(denali, buf, size, page, raw, write); 636 else 637 return denali_pio_xfer(denali, buf, size, page, raw, write); 638 } 639 640 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 641 int page, int write) 642 { 643 struct denali_nand_info *denali = mtd_to_denali(mtd); 644 int writesize = mtd->writesize; 645 int oobsize = mtd->oobsize; 646 uint8_t *bufpoi = chip->oob_poi; 647 int ecc_steps = chip->ecc.steps; 648 int ecc_size = chip->ecc.size; 649 int ecc_bytes = chip->ecc.bytes; 650 int oob_skip = denali->oob_skip_bytes; 651 size_t size = writesize + oobsize; 652 int i, pos, len; 653 654 /* BBM at the beginning of the OOB area */ 655 if (write) 656 nand_prog_page_begin_op(chip, page, writesize, bufpoi, 657 oob_skip); 658 else 659 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip); 660 bufpoi += oob_skip; 661 662 /* OOB ECC */ 663 for (i = 0; i < ecc_steps; i++) { 664 pos = ecc_size + i * (ecc_size + ecc_bytes); 665 len = ecc_bytes; 666 667 if (pos >= writesize) 668 pos += oob_skip; 669 else if (pos + len > writesize) 670 len = writesize - pos; 671 672 if (write) 673 nand_change_write_column_op(chip, pos, bufpoi, len, 674 false); 675 else 676 nand_change_read_column_op(chip, pos, bufpoi, len, 677 false); 678 bufpoi += len; 679 if (len < ecc_bytes) { 680 len = ecc_bytes - len; 681 if (write) 682 nand_change_write_column_op(chip, writesize + 683 oob_skip, bufpoi, 684 len, false); 685 else 686 nand_change_read_column_op(chip, writesize + 687 oob_skip, bufpoi, 688 len, false); 689 bufpoi += len; 690 } 691 } 692 693 /* OOB free */ 694 len = oobsize - (bufpoi - chip->oob_poi); 695 if (write) 696 nand_change_write_column_op(chip, size - len, bufpoi, len, 697 false); 698 else 699 nand_change_read_column_op(chip, size - len, bufpoi, len, 700 false); 701 } 702 703 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 704 uint8_t *buf, int oob_required, int page) 705 { 706 struct denali_nand_info *denali = mtd_to_denali(mtd); 707 int writesize = mtd->writesize; 708 int oobsize = mtd->oobsize; 709 int ecc_steps = chip->ecc.steps; 710 int ecc_size = chip->ecc.size; 711 int ecc_bytes = chip->ecc.bytes; 712 void *tmp_buf = denali->buf; 713 int oob_skip = denali->oob_skip_bytes; 714 size_t size = writesize + oobsize; 715 int ret, i, pos, len; 716 717 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 718 if (ret) 719 return ret; 720 721 /* Arrange the buffer for syndrome payload/ecc layout */ 722 if (buf) { 723 for (i = 0; i < ecc_steps; i++) { 724 pos = i * (ecc_size + ecc_bytes); 725 len = ecc_size; 726 727 if (pos >= writesize) 728 pos += oob_skip; 729 else if (pos + len > writesize) 730 len = writesize - pos; 731 732 memcpy(buf, tmp_buf + pos, len); 733 buf += len; 734 if (len < ecc_size) { 735 len = ecc_size - len; 736 memcpy(buf, tmp_buf + writesize + oob_skip, 737 len); 738 buf += len; 739 } 740 } 741 } 742 743 if (oob_required) { 744 uint8_t *oob = chip->oob_poi; 745 746 /* BBM at the beginning of the OOB area */ 747 memcpy(oob, tmp_buf + writesize, oob_skip); 748 oob += oob_skip; 749 750 /* OOB ECC */ 751 for (i = 0; i < ecc_steps; i++) { 752 pos = ecc_size + i * (ecc_size + ecc_bytes); 753 len = ecc_bytes; 754 755 if (pos >= writesize) 756 pos += oob_skip; 757 else if (pos + len > writesize) 758 len = writesize - pos; 759 760 memcpy(oob, tmp_buf + pos, len); 761 oob += len; 762 if (len < ecc_bytes) { 763 len = ecc_bytes - len; 764 memcpy(oob, tmp_buf + writesize + oob_skip, 765 len); 766 oob += len; 767 } 768 } 769 770 /* OOB free */ 771 len = oobsize - (oob - chip->oob_poi); 772 memcpy(oob, tmp_buf + size - len, len); 773 } 774 775 return 0; 776 } 777 778 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 779 int page) 780 { 781 denali_oob_xfer(mtd, chip, page, 0); 782 783 return 0; 784 } 785 786 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 787 int page) 788 { 789 struct denali_nand_info *denali = mtd_to_denali(mtd); 790 791 denali_reset_irq(denali); 792 793 denali_oob_xfer(mtd, chip, page, 1); 794 795 return nand_prog_page_end_op(chip); 796 } 797 798 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 799 uint8_t *buf, int oob_required, int page) 800 { 801 struct denali_nand_info *denali = mtd_to_denali(mtd); 802 unsigned long uncor_ecc_flags = 0; 803 int stat = 0; 804 int ret; 805 806 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 807 if (ret && ret != -EBADMSG) 808 return ret; 809 810 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 811 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 812 else if (ret == -EBADMSG) 813 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 814 815 if (stat < 0) 816 return stat; 817 818 if (uncor_ecc_flags) { 819 ret = denali_read_oob(mtd, chip, page); 820 if (ret) 821 return ret; 822 823 stat = denali_check_erased_page(mtd, chip, buf, 824 uncor_ecc_flags, stat); 825 } 826 827 return stat; 828 } 829 830 static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 831 const uint8_t *buf, int oob_required, int page) 832 { 833 struct denali_nand_info *denali = mtd_to_denali(mtd); 834 int writesize = mtd->writesize; 835 int oobsize = mtd->oobsize; 836 int ecc_steps = chip->ecc.steps; 837 int ecc_size = chip->ecc.size; 838 int ecc_bytes = chip->ecc.bytes; 839 void *tmp_buf = denali->buf; 840 int oob_skip = denali->oob_skip_bytes; 841 size_t size = writesize + oobsize; 842 int i, pos, len; 843 844 /* 845 * Fill the buffer with 0xff first except the full page transfer. 846 * This simplifies the logic. 847 */ 848 if (!buf || !oob_required) 849 memset(tmp_buf, 0xff, size); 850 851 /* Arrange the buffer for syndrome payload/ecc layout */ 852 if (buf) { 853 for (i = 0; i < ecc_steps; i++) { 854 pos = i * (ecc_size + ecc_bytes); 855 len = ecc_size; 856 857 if (pos >= writesize) 858 pos += oob_skip; 859 else if (pos + len > writesize) 860 len = writesize - pos; 861 862 memcpy(tmp_buf + pos, buf, len); 863 buf += len; 864 if (len < ecc_size) { 865 len = ecc_size - len; 866 memcpy(tmp_buf + writesize + oob_skip, buf, 867 len); 868 buf += len; 869 } 870 } 871 } 872 873 if (oob_required) { 874 const uint8_t *oob = chip->oob_poi; 875 876 /* BBM at the beginning of the OOB area */ 877 memcpy(tmp_buf + writesize, oob, oob_skip); 878 oob += oob_skip; 879 880 /* OOB ECC */ 881 for (i = 0; i < ecc_steps; i++) { 882 pos = ecc_size + i * (ecc_size + ecc_bytes); 883 len = ecc_bytes; 884 885 if (pos >= writesize) 886 pos += oob_skip; 887 else if (pos + len > writesize) 888 len = writesize - pos; 889 890 memcpy(tmp_buf + pos, oob, len); 891 oob += len; 892 if (len < ecc_bytes) { 893 len = ecc_bytes - len; 894 memcpy(tmp_buf + writesize + oob_skip, oob, 895 len); 896 oob += len; 897 } 898 } 899 900 /* OOB free */ 901 len = oobsize - (oob - chip->oob_poi); 902 memcpy(tmp_buf + size - len, oob, len); 903 } 904 905 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 906 } 907 908 static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 909 const uint8_t *buf, int oob_required, int page) 910 { 911 struct denali_nand_info *denali = mtd_to_denali(mtd); 912 913 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 914 page, 0, 1); 915 } 916 917 static void denali_select_chip(struct mtd_info *mtd, int chip) 918 { 919 struct denali_nand_info *denali = mtd_to_denali(mtd); 920 921 denali->active_bank = chip; 922 } 923 924 static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 925 { 926 struct denali_nand_info *denali = mtd_to_denali(mtd); 927 uint32_t irq_status; 928 929 /* R/B# pin transitioned from low to high? */ 930 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 931 932 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 933 } 934 935 static int denali_erase(struct mtd_info *mtd, int page) 936 { 937 struct denali_nand_info *denali = mtd_to_denali(mtd); 938 uint32_t irq_status; 939 940 denali_reset_irq(denali); 941 942 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 943 DENALI_ERASE); 944 945 /* wait for erase to complete or failure to occur */ 946 irq_status = denali_wait_for_irq(denali, 947 INTR__ERASE_COMP | INTR__ERASE_FAIL); 948 949 return irq_status & INTR__ERASE_COMP ? 0 : -EIO; 950 } 951 952 static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, 953 const struct nand_data_interface *conf) 954 { 955 struct denali_nand_info *denali = mtd_to_denali(mtd); 956 const struct nand_sdr_timings *timings; 957 unsigned long t_clk; 958 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 959 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 960 int addr_2_data_mask; 961 uint32_t tmp; 962 963 timings = nand_get_sdr_timings(conf); 964 if (IS_ERR(timings)) 965 return PTR_ERR(timings); 966 967 /* clk_x period in picoseconds */ 968 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 969 if (!t_clk) 970 return -EINVAL; 971 972 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 973 return 0; 974 975 /* tREA -> ACC_CLKS */ 976 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk); 977 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 978 979 tmp = ioread32(denali->reg + ACC_CLKS); 980 tmp &= ~ACC_CLKS__VALUE; 981 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 982 iowrite32(tmp, denali->reg + ACC_CLKS); 983 984 /* tRWH -> RE_2_WE */ 985 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk); 986 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 987 988 tmp = ioread32(denali->reg + RE_2_WE); 989 tmp &= ~RE_2_WE__VALUE; 990 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 991 iowrite32(tmp, denali->reg + RE_2_WE); 992 993 /* tRHZ -> RE_2_RE */ 994 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk); 995 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 996 997 tmp = ioread32(denali->reg + RE_2_RE); 998 tmp &= ~RE_2_RE__VALUE; 999 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1000 iowrite32(tmp, denali->reg + RE_2_RE); 1001 1002 /* 1003 * tCCS, tWHR -> WE_2_RE 1004 * 1005 * With WE_2_RE properly set, the Denali controller automatically takes 1006 * care of the delay; the driver need not set NAND_WAIT_TCCS. 1007 */ 1008 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), 1009 t_clk); 1010 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1011 1012 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1013 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1014 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1015 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1016 1017 /* tADL -> ADDR_2_DATA */ 1018 1019 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 1020 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1021 if (denali->revision < 0x0501) 1022 addr_2_data_mask >>= 1; 1023 1024 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk); 1025 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1026 1027 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1028 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1029 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1030 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1031 1032 /* tREH, tWH -> RDWR_EN_HI_CNT */ 1033 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 1034 t_clk); 1035 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 1036 1037 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1038 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1039 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1040 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1041 1042 /* tRP, tWP -> RDWR_EN_LO_CNT */ 1043 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), 1044 t_clk); 1045 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1046 t_clk); 1047 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT); 1048 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1049 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1050 1051 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1052 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1053 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1054 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1055 1056 /* tCS, tCEA -> CS_SETUP_CNT */ 1057 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo, 1058 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks, 1059 0); 1060 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1061 1062 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1063 tmp &= ~CS_SETUP_CNT__VALUE; 1064 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1065 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1066 1067 return 0; 1068 } 1069 1070 static void denali_reset_banks(struct denali_nand_info *denali) 1071 { 1072 u32 irq_status; 1073 int i; 1074 1075 for (i = 0; i < denali->max_banks; i++) { 1076 denali->active_bank = i; 1077 1078 denali_reset_irq(denali); 1079 1080 iowrite32(DEVICE_RESET__BANK(i), 1081 denali->reg + DEVICE_RESET); 1082 1083 irq_status = denali_wait_for_irq(denali, 1084 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); 1085 if (!(irq_status & INTR__INT_ACT)) 1086 break; 1087 } 1088 1089 dev_dbg(denali->dev, "%d chips connected\n", i); 1090 denali->max_banks = i; 1091 } 1092 1093 static void denali_hw_init(struct denali_nand_info *denali) 1094 { 1095 /* 1096 * The REVISION register may not be reliable. Platforms are allowed to 1097 * override it. 1098 */ 1099 if (!denali->revision) 1100 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1101 1102 /* 1103 * tell driver how many bit controller will skip before 1104 * writing ECC code in OOB, this register may be already 1105 * set by firmware. So we read this value out. 1106 * if this value is 0, just let it be. 1107 */ 1108 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1109 denali_detect_max_banks(denali); 1110 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1111 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1112 1113 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1114 } 1115 1116 int denali_calc_ecc_bytes(int step_size, int strength) 1117 { 1118 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1119 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1120 } 1121 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1122 1123 static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, 1124 struct denali_nand_info *denali) 1125 { 1126 int oobavail = mtd->oobsize - denali->oob_skip_bytes; 1127 int ret; 1128 1129 /* 1130 * If .size and .strength are already set (usually by DT), 1131 * check if they are supported by this controller. 1132 */ 1133 if (chip->ecc.size && chip->ecc.strength) 1134 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); 1135 1136 /* 1137 * We want .size and .strength closest to the chip's requirement 1138 * unless NAND_ECC_MAXIMIZE is requested. 1139 */ 1140 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { 1141 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); 1142 if (!ret) 1143 return 0; 1144 } 1145 1146 /* Max ECC strength is the last thing we can do */ 1147 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); 1148 } 1149 1150 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1151 struct mtd_oob_region *oobregion) 1152 { 1153 struct denali_nand_info *denali = mtd_to_denali(mtd); 1154 struct nand_chip *chip = mtd_to_nand(mtd); 1155 1156 if (section) 1157 return -ERANGE; 1158 1159 oobregion->offset = denali->oob_skip_bytes; 1160 oobregion->length = chip->ecc.total; 1161 1162 return 0; 1163 } 1164 1165 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1166 struct mtd_oob_region *oobregion) 1167 { 1168 struct denali_nand_info *denali = mtd_to_denali(mtd); 1169 struct nand_chip *chip = mtd_to_nand(mtd); 1170 1171 if (section) 1172 return -ERANGE; 1173 1174 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1175 oobregion->length = mtd->oobsize - oobregion->offset; 1176 1177 return 0; 1178 } 1179 1180 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1181 .ecc = denali_ooblayout_ecc, 1182 .free = denali_ooblayout_free, 1183 }; 1184 1185 static int denali_multidev_fixup(struct denali_nand_info *denali) 1186 { 1187 struct nand_chip *chip = &denali->nand; 1188 struct mtd_info *mtd = nand_to_mtd(chip); 1189 1190 /* 1191 * Support for multi device: 1192 * When the IP configuration is x16 capable and two x8 chips are 1193 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1194 * In this case, the core framework knows nothing about this fact, 1195 * so we should tell it the _logical_ pagesize and anything necessary. 1196 */ 1197 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1198 1199 /* 1200 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1201 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1202 */ 1203 if (denali->devs_per_cs == 0) { 1204 denali->devs_per_cs = 1; 1205 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1206 } 1207 1208 if (denali->devs_per_cs == 1) 1209 return 0; 1210 1211 if (denali->devs_per_cs != 2) { 1212 dev_err(denali->dev, "unsupported number of devices %d\n", 1213 denali->devs_per_cs); 1214 return -EINVAL; 1215 } 1216 1217 /* 2 chips in parallel */ 1218 mtd->size <<= 1; 1219 mtd->erasesize <<= 1; 1220 mtd->writesize <<= 1; 1221 mtd->oobsize <<= 1; 1222 chip->chipsize <<= 1; 1223 chip->page_shift += 1; 1224 chip->phys_erase_shift += 1; 1225 chip->bbt_erase_shift += 1; 1226 chip->chip_shift += 1; 1227 chip->pagemask <<= 1; 1228 chip->ecc.size <<= 1; 1229 chip->ecc.bytes <<= 1; 1230 chip->ecc.strength <<= 1; 1231 denali->oob_skip_bytes <<= 1; 1232 1233 return 0; 1234 } 1235 1236 int denali_init(struct denali_nand_info *denali) 1237 { 1238 struct nand_chip *chip = &denali->nand; 1239 struct mtd_info *mtd = nand_to_mtd(chip); 1240 u32 features = ioread32(denali->reg + FEATURES); 1241 int ret; 1242 1243 mtd->dev.parent = denali->dev; 1244 denali_hw_init(denali); 1245 1246 init_completion(&denali->complete); 1247 spin_lock_init(&denali->irq_lock); 1248 1249 denali_clear_irq_all(denali); 1250 1251 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1252 IRQF_SHARED, DENALI_NAND_NAME, denali); 1253 if (ret) { 1254 dev_err(denali->dev, "Unable to request IRQ\n"); 1255 return ret; 1256 } 1257 1258 denali_enable_irq(denali); 1259 denali_reset_banks(denali); 1260 1261 denali->active_bank = DENALI_INVALID_BANK; 1262 1263 nand_set_flash_node(chip, denali->dev->of_node); 1264 /* Fallback to the default name if DT did not give "label" property */ 1265 if (!mtd->name) 1266 mtd->name = "denali-nand"; 1267 1268 chip->select_chip = denali_select_chip; 1269 chip->read_byte = denali_read_byte; 1270 chip->write_byte = denali_write_byte; 1271 chip->read_word = denali_read_word; 1272 chip->cmd_ctrl = denali_cmd_ctrl; 1273 chip->dev_ready = denali_dev_ready; 1274 chip->waitfunc = denali_waitfunc; 1275 1276 if (features & FEATURES__INDEX_ADDR) { 1277 denali->host_read = denali_indexed_read; 1278 denali->host_write = denali_indexed_write; 1279 } else { 1280 denali->host_read = denali_direct_read; 1281 denali->host_write = denali_direct_write; 1282 } 1283 1284 /* clk rate info is needed for setup_data_interface */ 1285 if (denali->clk_x_rate) 1286 chip->setup_data_interface = denali_setup_data_interface; 1287 1288 ret = nand_scan_ident(mtd, denali->max_banks, NULL); 1289 if (ret) 1290 goto disable_irq; 1291 1292 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1293 denali->dma_avail = 1; 1294 1295 if (denali->dma_avail) { 1296 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1297 1298 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1299 if (ret) { 1300 dev_info(denali->dev, 1301 "Failed to set DMA mask. Disabling DMA.\n"); 1302 denali->dma_avail = 0; 1303 } 1304 } 1305 1306 if (denali->dma_avail) { 1307 chip->options |= NAND_USE_BOUNCE_BUFFER; 1308 chip->buf_align = 16; 1309 if (denali->caps & DENALI_CAP_DMA_64BIT) 1310 denali->setup_dma = denali_setup_dma64; 1311 else 1312 denali->setup_dma = denali_setup_dma32; 1313 } 1314 1315 chip->bbt_options |= NAND_BBT_USE_FLASH; 1316 chip->bbt_options |= NAND_BBT_NO_OOB; 1317 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1318 chip->options |= NAND_NO_SUBPAGE_WRITE; 1319 1320 ret = denali_ecc_setup(mtd, chip, denali); 1321 if (ret) { 1322 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1323 goto disable_irq; 1324 } 1325 1326 dev_dbg(denali->dev, 1327 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1328 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1329 1330 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1331 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1332 denali->reg + ECC_CORRECTION); 1333 iowrite32(mtd->erasesize / mtd->writesize, 1334 denali->reg + PAGES_PER_BLOCK); 1335 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1336 denali->reg + DEVICE_WIDTH); 1337 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1338 denali->reg + TWO_ROW_ADDR_CYCLES); 1339 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1340 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1341 1342 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1343 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1344 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1345 iowrite32(mtd->writesize / chip->ecc.size, 1346 denali->reg + CFG_NUM_DATA_BLOCKS); 1347 1348 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1349 1350 if (chip->options & NAND_BUSWIDTH_16) { 1351 chip->read_buf = denali_read_buf16; 1352 chip->write_buf = denali_write_buf16; 1353 } else { 1354 chip->read_buf = denali_read_buf; 1355 chip->write_buf = denali_write_buf; 1356 } 1357 chip->ecc.read_page = denali_read_page; 1358 chip->ecc.read_page_raw = denali_read_page_raw; 1359 chip->ecc.write_page = denali_write_page; 1360 chip->ecc.write_page_raw = denali_write_page_raw; 1361 chip->ecc.read_oob = denali_read_oob; 1362 chip->ecc.write_oob = denali_write_oob; 1363 chip->erase = denali_erase; 1364 1365 ret = denali_multidev_fixup(denali); 1366 if (ret) 1367 goto disable_irq; 1368 1369 /* 1370 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1371 * use devm_kmalloc() because the memory allocated by devm_ does not 1372 * guarantee DMA-safe alignment. 1373 */ 1374 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1375 if (!denali->buf) { 1376 ret = -ENOMEM; 1377 goto disable_irq; 1378 } 1379 1380 ret = nand_scan_tail(mtd); 1381 if (ret) 1382 goto free_buf; 1383 1384 ret = mtd_device_register(mtd, NULL, 0); 1385 if (ret) { 1386 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1387 goto cleanup_nand; 1388 } 1389 return 0; 1390 1391 cleanup_nand: 1392 nand_cleanup(chip); 1393 free_buf: 1394 kfree(denali->buf); 1395 disable_irq: 1396 denali_disable_irq(denali); 1397 1398 return ret; 1399 } 1400 EXPORT_SYMBOL(denali_init); 1401 1402 void denali_remove(struct denali_nand_info *denali) 1403 { 1404 struct mtd_info *mtd = nand_to_mtd(&denali->nand); 1405 1406 nand_release(mtd); 1407 kfree(denali->buf); 1408 denali_disable_irq(denali); 1409 } 1410 EXPORT_SYMBOL(denali_remove); 1411