1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2014 Panasonic Corporation 4 * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> 5 * Copyright (C) 2009-2010, Intel Corporation and its suppliers. 6 */ 7 8 #include <dm.h> 9 #include <nand.h> 10 #include <linux/bitfield.h> 11 #include <linux/dma-direction.h> 12 #include <linux/errno.h> 13 #include <linux/io.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/mtd/rawnand.h> 16 17 #include "denali.h" 18 19 static dma_addr_t dma_map_single(void *dev, void *ptr, size_t size, 20 enum dma_data_direction dir) 21 { 22 unsigned long addr = (unsigned long)ptr; 23 24 size = ALIGN(size, ARCH_DMA_MINALIGN); 25 26 if (dir == DMA_FROM_DEVICE) 27 invalidate_dcache_range(addr, addr + size); 28 else 29 flush_dcache_range(addr, addr + size); 30 31 return addr; 32 } 33 34 static void dma_unmap_single(void *dev, dma_addr_t addr, size_t size, 35 enum dma_data_direction dir) 36 { 37 size = ALIGN(size, ARCH_DMA_MINALIGN); 38 39 if (dir != DMA_TO_DEVICE) 40 invalidate_dcache_range(addr, addr + size); 41 } 42 43 static int dma_mapping_error(void *dev, dma_addr_t addr) 44 { 45 return 0; 46 } 47 48 #define DENALI_NAND_NAME "denali-nand" 49 50 /* for Indexed Addressing */ 51 #define DENALI_INDEXED_CTRL 0x00 52 #define DENALI_INDEXED_DATA 0x10 53 54 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 55 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 56 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 57 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 58 59 /* MAP11 access cycle type */ 60 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 61 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 62 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 63 64 /* MAP10 commands */ 65 #define DENALI_ERASE 0x01 66 67 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 68 69 #define DENALI_INVALID_BANK -1 70 #define DENALI_NR_BANKS 4 71 72 /* 73 * The bus interface clock, clk_x, is phase aligned with the core clock. The 74 * clk_x is an integral multiple N of the core clk. The value N is configured 75 * at IP delivery time, and its available value is 4, 5, or 6. We need to align 76 * to the largest value to make it work with any possible configuration. 77 */ 78 #define DENALI_CLK_X_MULT 6 79 80 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 81 { 82 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 83 } 84 85 /* 86 * Direct Addressing - the slave address forms the control information (command 87 * type, bank, block, and page address). The slave data is the actual data to 88 * be transferred. This mode requires 28 bits of address region allocated. 89 */ 90 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 91 { 92 return ioread32(denali->host + addr); 93 } 94 95 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 96 u32 data) 97 { 98 iowrite32(data, denali->host + addr); 99 } 100 101 /* 102 * Indexed Addressing - address translation module intervenes in passing the 103 * control information. This mode reduces the required address range. The 104 * control information and transferred data are latched by the registers in 105 * the translation module. 106 */ 107 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 108 { 109 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 110 return ioread32(denali->host + DENALI_INDEXED_DATA); 111 } 112 113 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 114 u32 data) 115 { 116 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 117 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 118 } 119 120 /* 121 * Use the configuration feature register to determine the maximum number of 122 * banks that the hardware supports. 123 */ 124 static void denali_detect_max_banks(struct denali_nand_info *denali) 125 { 126 uint32_t features = ioread32(denali->reg + FEATURES); 127 128 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 129 130 /* the encoding changed from rev 5.0 to 5.1 */ 131 if (denali->revision < 0x0501) 132 denali->max_banks <<= 1; 133 } 134 135 static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) 136 { 137 int i; 138 139 for (i = 0; i < DENALI_NR_BANKS; i++) 140 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 141 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 142 } 143 144 static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) 145 { 146 int i; 147 148 for (i = 0; i < DENALI_NR_BANKS; i++) 149 iowrite32(0, denali->reg + INTR_EN(i)); 150 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 151 } 152 153 static void denali_clear_irq(struct denali_nand_info *denali, 154 int bank, uint32_t irq_status) 155 { 156 /* write one to clear bits */ 157 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 158 } 159 160 static void denali_clear_irq_all(struct denali_nand_info *denali) 161 { 162 int i; 163 164 for (i = 0; i < DENALI_NR_BANKS; i++) 165 denali_clear_irq(denali, i, U32_MAX); 166 } 167 168 static void __denali_check_irq(struct denali_nand_info *denali) 169 { 170 uint32_t irq_status; 171 int i; 172 173 for (i = 0; i < DENALI_NR_BANKS; i++) { 174 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 175 denali_clear_irq(denali, i, irq_status); 176 177 if (i != denali->active_bank) 178 continue; 179 180 denali->irq_status |= irq_status; 181 } 182 } 183 184 static void denali_reset_irq(struct denali_nand_info *denali) 185 { 186 denali->irq_status = 0; 187 denali->irq_mask = 0; 188 } 189 190 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 191 uint32_t irq_mask) 192 { 193 unsigned long time_left = 1000000; 194 195 while (time_left) { 196 __denali_check_irq(denali); 197 198 if (irq_mask & denali->irq_status) 199 return denali->irq_status; 200 udelay(1); 201 time_left--; 202 } 203 204 if (!time_left) { 205 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 206 irq_mask); 207 return 0; 208 } 209 210 return denali->irq_status; 211 } 212 213 static uint32_t denali_check_irq(struct denali_nand_info *denali) 214 { 215 __denali_check_irq(denali); 216 217 return denali->irq_status; 218 } 219 220 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 221 { 222 struct denali_nand_info *denali = mtd_to_denali(mtd); 223 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 224 int i; 225 226 for (i = 0; i < len; i++) 227 buf[i] = denali->host_read(denali, addr); 228 } 229 230 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 231 { 232 struct denali_nand_info *denali = mtd_to_denali(mtd); 233 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 234 int i; 235 236 for (i = 0; i < len; i++) 237 denali->host_write(denali, addr, buf[i]); 238 } 239 240 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 241 { 242 struct denali_nand_info *denali = mtd_to_denali(mtd); 243 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 244 uint16_t *buf16 = (uint16_t *)buf; 245 int i; 246 247 for (i = 0; i < len / 2; i++) 248 buf16[i] = denali->host_read(denali, addr); 249 } 250 251 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 252 int len) 253 { 254 struct denali_nand_info *denali = mtd_to_denali(mtd); 255 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 256 const uint16_t *buf16 = (const uint16_t *)buf; 257 int i; 258 259 for (i = 0; i < len / 2; i++) 260 denali->host_write(denali, addr, buf16[i]); 261 } 262 263 static uint8_t denali_read_byte(struct mtd_info *mtd) 264 { 265 uint8_t byte; 266 267 denali_read_buf(mtd, &byte, 1); 268 269 return byte; 270 } 271 272 static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) 273 { 274 denali_write_buf(mtd, &byte, 1); 275 } 276 277 static uint16_t denali_read_word(struct mtd_info *mtd) 278 { 279 uint16_t word; 280 281 denali_read_buf16(mtd, (uint8_t *)&word, 2); 282 283 return word; 284 } 285 286 static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 287 { 288 struct denali_nand_info *denali = mtd_to_denali(mtd); 289 uint32_t type; 290 291 if (ctrl & NAND_CLE) 292 type = DENALI_MAP11_CMD; 293 else if (ctrl & NAND_ALE) 294 type = DENALI_MAP11_ADDR; 295 else 296 return; 297 298 /* 299 * Some commands are followed by chip->dev_ready or chip->waitfunc. 300 * irq_status must be cleared here to catch the R/B# interrupt later. 301 */ 302 if (ctrl & NAND_CTRL_CHANGE) 303 denali_reset_irq(denali); 304 305 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 306 } 307 308 static int denali_dev_ready(struct mtd_info *mtd) 309 { 310 struct denali_nand_info *denali = mtd_to_denali(mtd); 311 312 return !!(denali_check_irq(denali) & INTR__INT_ACT); 313 } 314 315 static int denali_check_erased_page(struct mtd_info *mtd, 316 struct nand_chip *chip, uint8_t *buf, 317 unsigned long uncor_ecc_flags, 318 unsigned int max_bitflips) 319 { 320 uint8_t *ecc_code = chip->buffers->ecccode; 321 int ecc_steps = chip->ecc.steps; 322 int ecc_size = chip->ecc.size; 323 int ecc_bytes = chip->ecc.bytes; 324 int i, ret, stat; 325 326 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 327 chip->ecc.total); 328 if (ret) 329 return ret; 330 331 for (i = 0; i < ecc_steps; i++) { 332 if (!(uncor_ecc_flags & BIT(i))) 333 continue; 334 335 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 336 ecc_code, ecc_bytes, 337 NULL, 0, 338 chip->ecc.strength); 339 if (stat < 0) { 340 mtd->ecc_stats.failed++; 341 } else { 342 mtd->ecc_stats.corrected += stat; 343 max_bitflips = max_t(unsigned int, max_bitflips, stat); 344 } 345 346 buf += ecc_size; 347 ecc_code += ecc_bytes; 348 } 349 350 return max_bitflips; 351 } 352 353 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 354 struct denali_nand_info *denali, 355 unsigned long *uncor_ecc_flags) 356 { 357 struct nand_chip *chip = mtd_to_nand(mtd); 358 int bank = denali->active_bank; 359 uint32_t ecc_cor; 360 unsigned int max_bitflips; 361 362 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 363 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 364 365 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 366 /* 367 * This flag is set when uncorrectable error occurs at least in 368 * one ECC sector. We can not know "how many sectors", or 369 * "which sector(s)". We need erase-page check for all sectors. 370 */ 371 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 372 return 0; 373 } 374 375 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 376 377 /* 378 * The register holds the maximum of per-sector corrected bitflips. 379 * This is suitable for the return value of the ->read_page() callback. 380 * Unfortunately, we can not know the total number of corrected bits in 381 * the page. Increase the stats by max_bitflips. (compromised solution) 382 */ 383 mtd->ecc_stats.corrected += max_bitflips; 384 385 return max_bitflips; 386 } 387 388 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 389 struct denali_nand_info *denali, 390 unsigned long *uncor_ecc_flags, uint8_t *buf) 391 { 392 unsigned int ecc_size = denali->nand.ecc.size; 393 unsigned int bitflips = 0; 394 unsigned int max_bitflips = 0; 395 uint32_t err_addr, err_cor_info; 396 unsigned int err_byte, err_sector, err_device; 397 uint8_t err_cor_value; 398 unsigned int prev_sector = 0; 399 uint32_t irq_status; 400 401 denali_reset_irq(denali); 402 403 do { 404 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 405 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 406 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 407 408 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 409 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 410 err_cor_info); 411 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 412 err_cor_info); 413 414 /* reset the bitflip counter when crossing ECC sector */ 415 if (err_sector != prev_sector) 416 bitflips = 0; 417 418 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 419 /* 420 * Check later if this is a real ECC error, or 421 * an erased sector. 422 */ 423 *uncor_ecc_flags |= BIT(err_sector); 424 } else if (err_byte < ecc_size) { 425 /* 426 * If err_byte is larger than ecc_size, means error 427 * happened in OOB, so we ignore it. It's no need for 428 * us to correct it err_device is represented the NAND 429 * error bits are happened in if there are more than 430 * one NAND connected. 431 */ 432 int offset; 433 unsigned int flips_in_byte; 434 435 offset = (err_sector * ecc_size + err_byte) * 436 denali->devs_per_cs + err_device; 437 438 /* correct the ECC error */ 439 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 440 buf[offset] ^= err_cor_value; 441 mtd->ecc_stats.corrected += flips_in_byte; 442 bitflips += flips_in_byte; 443 444 max_bitflips = max(max_bitflips, bitflips); 445 } 446 447 prev_sector = err_sector; 448 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 449 450 /* 451 * Once handle all ECC errors, controller will trigger an 452 * ECC_TRANSACTION_DONE interrupt. 453 */ 454 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 455 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 456 return -EIO; 457 458 return max_bitflips; 459 } 460 461 static void denali_setup_dma64(struct denali_nand_info *denali, 462 dma_addr_t dma_addr, int page, int write) 463 { 464 uint32_t mode; 465 const int page_count = 1; 466 467 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 468 469 /* DMA is a three step process */ 470 471 /* 472 * 1. setup transfer type, interrupt when complete, 473 * burst len = 64 bytes, the number of pages 474 */ 475 denali->host_write(denali, mode, 476 0x01002000 | (64 << 16) | (write << 8) | page_count); 477 478 /* 2. set memory low address */ 479 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 480 481 /* 3. set memory high address */ 482 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 483 } 484 485 static void denali_setup_dma32(struct denali_nand_info *denali, 486 dma_addr_t dma_addr, int page, int write) 487 { 488 uint32_t mode; 489 const int page_count = 1; 490 491 mode = DENALI_MAP10 | DENALI_BANK(denali); 492 493 /* DMA is a four step process */ 494 495 /* 1. setup transfer type and # of pages */ 496 denali->host_write(denali, mode | page, 497 0x2000 | (write << 8) | page_count); 498 499 /* 2. set memory high address bits 23:8 */ 500 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 501 502 /* 3. set memory low address bits 23:8 */ 503 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 504 505 /* 4. interrupt when complete, burst len = 64 bytes */ 506 denali->host_write(denali, mode | 0x14000, 0x2400); 507 } 508 509 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 510 size_t size, int page, int raw) 511 { 512 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 513 uint32_t *buf32 = (uint32_t *)buf; 514 uint32_t irq_status, ecc_err_mask; 515 int i; 516 517 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 518 ecc_err_mask = INTR__ECC_UNCOR_ERR; 519 else 520 ecc_err_mask = INTR__ECC_ERR; 521 522 denali_reset_irq(denali); 523 524 for (i = 0; i < size / 4; i++) 525 *buf32++ = denali->host_read(denali, addr); 526 527 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 528 if (!(irq_status & INTR__PAGE_XFER_INC)) 529 return -EIO; 530 531 if (irq_status & INTR__ERASED_PAGE) 532 memset(buf, 0xff, size); 533 534 return irq_status & ecc_err_mask ? -EBADMSG : 0; 535 } 536 537 static int denali_pio_write(struct denali_nand_info *denali, 538 const void *buf, size_t size, int page, int raw) 539 { 540 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 541 const uint32_t *buf32 = (uint32_t *)buf; 542 uint32_t irq_status; 543 int i; 544 545 denali_reset_irq(denali); 546 547 for (i = 0; i < size / 4; i++) 548 denali->host_write(denali, addr, *buf32++); 549 550 irq_status = denali_wait_for_irq(denali, 551 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 552 if (!(irq_status & INTR__PROGRAM_COMP)) 553 return -EIO; 554 555 return 0; 556 } 557 558 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 559 size_t size, int page, int raw, int write) 560 { 561 if (write) 562 return denali_pio_write(denali, buf, size, page, raw); 563 else 564 return denali_pio_read(denali, buf, size, page, raw); 565 } 566 567 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 568 size_t size, int page, int raw, int write) 569 { 570 dma_addr_t dma_addr; 571 uint32_t irq_mask, irq_status, ecc_err_mask; 572 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 573 int ret = 0; 574 575 dma_addr = dma_map_single(denali->dev, buf, size, dir); 576 if (dma_mapping_error(denali->dev, dma_addr)) { 577 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 578 return denali_pio_xfer(denali, buf, size, page, raw, write); 579 } 580 581 if (write) { 582 /* 583 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 584 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 585 * when the page program is completed. 586 */ 587 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 588 ecc_err_mask = 0; 589 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 590 irq_mask = INTR__DMA_CMD_COMP; 591 ecc_err_mask = INTR__ECC_UNCOR_ERR; 592 } else { 593 irq_mask = INTR__DMA_CMD_COMP; 594 ecc_err_mask = INTR__ECC_ERR; 595 } 596 597 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 598 599 denali_reset_irq(denali); 600 denali->setup_dma(denali, dma_addr, page, write); 601 602 irq_status = denali_wait_for_irq(denali, irq_mask); 603 if (!(irq_status & INTR__DMA_CMD_COMP)) 604 ret = -EIO; 605 else if (irq_status & ecc_err_mask) 606 ret = -EBADMSG; 607 608 iowrite32(0, denali->reg + DMA_ENABLE); 609 610 dma_unmap_single(denali->dev, dma_addr, size, dir); 611 612 if (irq_status & INTR__ERASED_PAGE) 613 memset(buf, 0xff, size); 614 615 return ret; 616 } 617 618 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 619 size_t size, int page, int raw, int write) 620 { 621 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 622 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 623 denali->reg + TRANSFER_SPARE_REG); 624 625 if (denali->dma_avail) 626 return denali_dma_xfer(denali, buf, size, page, raw, write); 627 else 628 return denali_pio_xfer(denali, buf, size, page, raw, write); 629 } 630 631 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 632 int page, int write) 633 { 634 struct denali_nand_info *denali = mtd_to_denali(mtd); 635 unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; 636 unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; 637 int writesize = mtd->writesize; 638 int oobsize = mtd->oobsize; 639 uint8_t *bufpoi = chip->oob_poi; 640 int ecc_steps = chip->ecc.steps; 641 int ecc_size = chip->ecc.size; 642 int ecc_bytes = chip->ecc.bytes; 643 int oob_skip = denali->oob_skip_bytes; 644 size_t size = writesize + oobsize; 645 int i, pos, len; 646 647 /* BBM at the beginning of the OOB area */ 648 chip->cmdfunc(mtd, start_cmd, writesize, page); 649 if (write) 650 chip->write_buf(mtd, bufpoi, oob_skip); 651 else 652 chip->read_buf(mtd, bufpoi, oob_skip); 653 bufpoi += oob_skip; 654 655 /* OOB ECC */ 656 for (i = 0; i < ecc_steps; i++) { 657 pos = ecc_size + i * (ecc_size + ecc_bytes); 658 len = ecc_bytes; 659 660 if (pos >= writesize) 661 pos += oob_skip; 662 else if (pos + len > writesize) 663 len = writesize - pos; 664 665 chip->cmdfunc(mtd, rnd_cmd, pos, -1); 666 if (write) 667 chip->write_buf(mtd, bufpoi, len); 668 else 669 chip->read_buf(mtd, bufpoi, len); 670 bufpoi += len; 671 if (len < ecc_bytes) { 672 len = ecc_bytes - len; 673 chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); 674 if (write) 675 chip->write_buf(mtd, bufpoi, len); 676 else 677 chip->read_buf(mtd, bufpoi, len); 678 bufpoi += len; 679 } 680 } 681 682 /* OOB free */ 683 len = oobsize - (bufpoi - chip->oob_poi); 684 chip->cmdfunc(mtd, rnd_cmd, size - len, -1); 685 if (write) 686 chip->write_buf(mtd, bufpoi, len); 687 else 688 chip->read_buf(mtd, bufpoi, len); 689 } 690 691 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 692 uint8_t *buf, int oob_required, int page) 693 { 694 struct denali_nand_info *denali = mtd_to_denali(mtd); 695 int writesize = mtd->writesize; 696 int oobsize = mtd->oobsize; 697 int ecc_steps = chip->ecc.steps; 698 int ecc_size = chip->ecc.size; 699 int ecc_bytes = chip->ecc.bytes; 700 void *tmp_buf = denali->buf; 701 int oob_skip = denali->oob_skip_bytes; 702 size_t size = writesize + oobsize; 703 int ret, i, pos, len; 704 705 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 706 if (ret) 707 return ret; 708 709 /* Arrange the buffer for syndrome payload/ecc layout */ 710 if (buf) { 711 for (i = 0; i < ecc_steps; i++) { 712 pos = i * (ecc_size + ecc_bytes); 713 len = ecc_size; 714 715 if (pos >= writesize) 716 pos += oob_skip; 717 else if (pos + len > writesize) 718 len = writesize - pos; 719 720 memcpy(buf, tmp_buf + pos, len); 721 buf += len; 722 if (len < ecc_size) { 723 len = ecc_size - len; 724 memcpy(buf, tmp_buf + writesize + oob_skip, 725 len); 726 buf += len; 727 } 728 } 729 } 730 731 if (oob_required) { 732 uint8_t *oob = chip->oob_poi; 733 734 /* BBM at the beginning of the OOB area */ 735 memcpy(oob, tmp_buf + writesize, oob_skip); 736 oob += oob_skip; 737 738 /* OOB ECC */ 739 for (i = 0; i < ecc_steps; i++) { 740 pos = ecc_size + i * (ecc_size + ecc_bytes); 741 len = ecc_bytes; 742 743 if (pos >= writesize) 744 pos += oob_skip; 745 else if (pos + len > writesize) 746 len = writesize - pos; 747 748 memcpy(oob, tmp_buf + pos, len); 749 oob += len; 750 if (len < ecc_bytes) { 751 len = ecc_bytes - len; 752 memcpy(oob, tmp_buf + writesize + oob_skip, 753 len); 754 oob += len; 755 } 756 } 757 758 /* OOB free */ 759 len = oobsize - (oob - chip->oob_poi); 760 memcpy(oob, tmp_buf + size - len, len); 761 } 762 763 return 0; 764 } 765 766 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 767 int page) 768 { 769 denali_oob_xfer(mtd, chip, page, 0); 770 771 return 0; 772 } 773 774 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 775 int page) 776 { 777 struct denali_nand_info *denali = mtd_to_denali(mtd); 778 int status; 779 780 denali_reset_irq(denali); 781 782 denali_oob_xfer(mtd, chip, page, 1); 783 784 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 785 status = chip->waitfunc(mtd, chip); 786 787 return status & NAND_STATUS_FAIL ? -EIO : 0; 788 } 789 790 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 791 uint8_t *buf, int oob_required, int page) 792 { 793 struct denali_nand_info *denali = mtd_to_denali(mtd); 794 unsigned long uncor_ecc_flags = 0; 795 int stat = 0; 796 int ret; 797 798 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 799 if (ret && ret != -EBADMSG) 800 return ret; 801 802 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 803 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 804 else if (ret == -EBADMSG) 805 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 806 807 if (stat < 0) 808 return stat; 809 810 if (uncor_ecc_flags) { 811 ret = denali_read_oob(mtd, chip, page); 812 if (ret) 813 return ret; 814 815 stat = denali_check_erased_page(mtd, chip, buf, 816 uncor_ecc_flags, stat); 817 } 818 819 return stat; 820 } 821 822 static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 823 const uint8_t *buf, int oob_required, int page) 824 { 825 struct denali_nand_info *denali = mtd_to_denali(mtd); 826 int writesize = mtd->writesize; 827 int oobsize = mtd->oobsize; 828 int ecc_steps = chip->ecc.steps; 829 int ecc_size = chip->ecc.size; 830 int ecc_bytes = chip->ecc.bytes; 831 void *tmp_buf = denali->buf; 832 int oob_skip = denali->oob_skip_bytes; 833 size_t size = writesize + oobsize; 834 int i, pos, len; 835 836 /* 837 * Fill the buffer with 0xff first except the full page transfer. 838 * This simplifies the logic. 839 */ 840 if (!buf || !oob_required) 841 memset(tmp_buf, 0xff, size); 842 843 /* Arrange the buffer for syndrome payload/ecc layout */ 844 if (buf) { 845 for (i = 0; i < ecc_steps; i++) { 846 pos = i * (ecc_size + ecc_bytes); 847 len = ecc_size; 848 849 if (pos >= writesize) 850 pos += oob_skip; 851 else if (pos + len > writesize) 852 len = writesize - pos; 853 854 memcpy(tmp_buf + pos, buf, len); 855 buf += len; 856 if (len < ecc_size) { 857 len = ecc_size - len; 858 memcpy(tmp_buf + writesize + oob_skip, buf, 859 len); 860 buf += len; 861 } 862 } 863 } 864 865 if (oob_required) { 866 const uint8_t *oob = chip->oob_poi; 867 868 /* BBM at the beginning of the OOB area */ 869 memcpy(tmp_buf + writesize, oob, oob_skip); 870 oob += oob_skip; 871 872 /* OOB ECC */ 873 for (i = 0; i < ecc_steps; i++) { 874 pos = ecc_size + i * (ecc_size + ecc_bytes); 875 len = ecc_bytes; 876 877 if (pos >= writesize) 878 pos += oob_skip; 879 else if (pos + len > writesize) 880 len = writesize - pos; 881 882 memcpy(tmp_buf + pos, oob, len); 883 oob += len; 884 if (len < ecc_bytes) { 885 len = ecc_bytes - len; 886 memcpy(tmp_buf + writesize + oob_skip, oob, 887 len); 888 oob += len; 889 } 890 } 891 892 /* OOB free */ 893 len = oobsize - (oob - chip->oob_poi); 894 memcpy(tmp_buf + size - len, oob, len); 895 } 896 897 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 898 } 899 900 static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 901 const uint8_t *buf, int oob_required, int page) 902 { 903 struct denali_nand_info *denali = mtd_to_denali(mtd); 904 905 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 906 page, 0, 1); 907 } 908 909 static void denali_select_chip(struct mtd_info *mtd, int chip) 910 { 911 struct denali_nand_info *denali = mtd_to_denali(mtd); 912 913 denali->active_bank = chip; 914 } 915 916 static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 917 { 918 struct denali_nand_info *denali = mtd_to_denali(mtd); 919 uint32_t irq_status; 920 921 /* R/B# pin transitioned from low to high? */ 922 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 923 924 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 925 } 926 927 static int denali_erase(struct mtd_info *mtd, int page) 928 { 929 struct denali_nand_info *denali = mtd_to_denali(mtd); 930 uint32_t irq_status; 931 932 denali_reset_irq(denali); 933 934 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 935 DENALI_ERASE); 936 937 /* wait for erase to complete or failure to occur */ 938 irq_status = denali_wait_for_irq(denali, 939 INTR__ERASE_COMP | INTR__ERASE_FAIL); 940 941 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; 942 } 943 944 static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, 945 const struct nand_data_interface *conf) 946 { 947 struct denali_nand_info *denali = mtd_to_denali(mtd); 948 const struct nand_sdr_timings *timings; 949 unsigned long t_clk; 950 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 951 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 952 int addr_2_data_mask; 953 uint32_t tmp; 954 955 timings = nand_get_sdr_timings(conf); 956 if (IS_ERR(timings)) 957 return PTR_ERR(timings); 958 959 /* clk_x period in picoseconds */ 960 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 961 if (!t_clk) 962 return -EINVAL; 963 964 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 965 return 0; 966 967 /* tREA -> ACC_CLKS */ 968 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk); 969 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 970 971 tmp = ioread32(denali->reg + ACC_CLKS); 972 tmp &= ~ACC_CLKS__VALUE; 973 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 974 iowrite32(tmp, denali->reg + ACC_CLKS); 975 976 /* tRWH -> RE_2_WE */ 977 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk); 978 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 979 980 tmp = ioread32(denali->reg + RE_2_WE); 981 tmp &= ~RE_2_WE__VALUE; 982 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 983 iowrite32(tmp, denali->reg + RE_2_WE); 984 985 /* tRHZ -> RE_2_RE */ 986 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk); 987 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 988 989 tmp = ioread32(denali->reg + RE_2_RE); 990 tmp &= ~RE_2_RE__VALUE; 991 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 992 iowrite32(tmp, denali->reg + RE_2_RE); 993 994 /* 995 * tCCS, tWHR -> WE_2_RE 996 * 997 * With WE_2_RE properly set, the Denali controller automatically takes 998 * care of the delay; the driver need not set NAND_WAIT_TCCS. 999 */ 1000 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), 1001 t_clk); 1002 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1003 1004 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1005 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1006 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1007 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1008 1009 /* tADL -> ADDR_2_DATA */ 1010 1011 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 1012 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1013 if (denali->revision < 0x0501) 1014 addr_2_data_mask >>= 1; 1015 1016 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk); 1017 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1018 1019 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1020 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1021 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1022 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1023 1024 /* tREH, tWH -> RDWR_EN_HI_CNT */ 1025 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 1026 t_clk); 1027 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 1028 1029 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1030 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1031 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1032 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1033 1034 /* tRP, tWP -> RDWR_EN_LO_CNT */ 1035 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), 1036 t_clk); 1037 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1038 t_clk); 1039 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT); 1040 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1041 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1042 1043 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1044 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1045 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1046 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1047 1048 /* tCS, tCEA -> CS_SETUP_CNT */ 1049 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo, 1050 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks, 1051 0); 1052 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1053 1054 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1055 tmp &= ~CS_SETUP_CNT__VALUE; 1056 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1057 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1058 1059 return 0; 1060 } 1061 1062 static void denali_reset_banks(struct denali_nand_info *denali) 1063 { 1064 u32 irq_status; 1065 int i; 1066 1067 for (i = 0; i < denali->max_banks; i++) { 1068 denali->active_bank = i; 1069 1070 denali_reset_irq(denali); 1071 1072 iowrite32(DEVICE_RESET__BANK(i), 1073 denali->reg + DEVICE_RESET); 1074 1075 irq_status = denali_wait_for_irq(denali, 1076 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); 1077 if (!(irq_status & INTR__INT_ACT)) 1078 break; 1079 } 1080 1081 dev_dbg(denali->dev, "%d chips connected\n", i); 1082 denali->max_banks = i; 1083 } 1084 1085 static void denali_hw_init(struct denali_nand_info *denali) 1086 { 1087 /* 1088 * The REVISION register may not be reliable. Platforms are allowed to 1089 * override it. 1090 */ 1091 if (!denali->revision) 1092 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1093 1094 /* 1095 * tell driver how many bit controller will skip before writing 1096 * ECC code in OOB. This is normally used for bad block marker 1097 */ 1098 denali->oob_skip_bytes = CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES; 1099 iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES); 1100 denali_detect_max_banks(denali); 1101 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1102 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1103 1104 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1105 } 1106 1107 int denali_calc_ecc_bytes(int step_size, int strength) 1108 { 1109 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1110 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1111 } 1112 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1113 1114 static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, 1115 struct denali_nand_info *denali) 1116 { 1117 int oobavail = mtd->oobsize - denali->oob_skip_bytes; 1118 int ret; 1119 1120 /* 1121 * If .size and .strength are already set (usually by DT), 1122 * check if they are supported by this controller. 1123 */ 1124 if (chip->ecc.size && chip->ecc.strength) 1125 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); 1126 1127 /* 1128 * We want .size and .strength closest to the chip's requirement 1129 * unless NAND_ECC_MAXIMIZE is requested. 1130 */ 1131 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { 1132 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); 1133 if (!ret) 1134 return 0; 1135 } 1136 1137 /* Max ECC strength is the last thing we can do */ 1138 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); 1139 } 1140 1141 static struct nand_ecclayout nand_oob; 1142 1143 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1144 struct mtd_oob_region *oobregion) 1145 { 1146 struct denali_nand_info *denali = mtd_to_denali(mtd); 1147 struct nand_chip *chip = mtd_to_nand(mtd); 1148 1149 if (section) 1150 return -ERANGE; 1151 1152 oobregion->offset = denali->oob_skip_bytes; 1153 oobregion->length = chip->ecc.total; 1154 1155 return 0; 1156 } 1157 1158 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1159 struct mtd_oob_region *oobregion) 1160 { 1161 struct denali_nand_info *denali = mtd_to_denali(mtd); 1162 struct nand_chip *chip = mtd_to_nand(mtd); 1163 1164 if (section) 1165 return -ERANGE; 1166 1167 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1168 oobregion->length = mtd->oobsize - oobregion->offset; 1169 1170 return 0; 1171 } 1172 1173 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1174 .ecc = denali_ooblayout_ecc, 1175 .free = denali_ooblayout_free, 1176 }; 1177 1178 static int denali_multidev_fixup(struct denali_nand_info *denali) 1179 { 1180 struct nand_chip *chip = &denali->nand; 1181 struct mtd_info *mtd = nand_to_mtd(chip); 1182 1183 /* 1184 * Support for multi device: 1185 * When the IP configuration is x16 capable and two x8 chips are 1186 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1187 * In this case, the core framework knows nothing about this fact, 1188 * so we should tell it the _logical_ pagesize and anything necessary. 1189 */ 1190 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1191 1192 /* 1193 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1194 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1195 */ 1196 if (denali->devs_per_cs == 0) { 1197 denali->devs_per_cs = 1; 1198 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1199 } 1200 1201 if (denali->devs_per_cs == 1) 1202 return 0; 1203 1204 if (denali->devs_per_cs != 2) { 1205 dev_err(denali->dev, "unsupported number of devices %d\n", 1206 denali->devs_per_cs); 1207 return -EINVAL; 1208 } 1209 1210 /* 2 chips in parallel */ 1211 mtd->size <<= 1; 1212 mtd->erasesize <<= 1; 1213 mtd->writesize <<= 1; 1214 mtd->oobsize <<= 1; 1215 chip->chipsize <<= 1; 1216 chip->page_shift += 1; 1217 chip->phys_erase_shift += 1; 1218 chip->bbt_erase_shift += 1; 1219 chip->chip_shift += 1; 1220 chip->pagemask <<= 1; 1221 chip->ecc.size <<= 1; 1222 chip->ecc.bytes <<= 1; 1223 chip->ecc.strength <<= 1; 1224 denali->oob_skip_bytes <<= 1; 1225 1226 return 0; 1227 } 1228 1229 int denali_init(struct denali_nand_info *denali) 1230 { 1231 struct nand_chip *chip = &denali->nand; 1232 struct mtd_info *mtd = nand_to_mtd(chip); 1233 u32 features = ioread32(denali->reg + FEATURES); 1234 int ret; 1235 1236 denali_hw_init(denali); 1237 1238 denali_clear_irq_all(denali); 1239 1240 denali_reset_banks(denali); 1241 1242 denali->active_bank = DENALI_INVALID_BANK; 1243 1244 chip->flash_node = dev_of_offset(denali->dev); 1245 /* Fallback to the default name if DT did not give "label" property */ 1246 if (!mtd->name) 1247 mtd->name = "denali-nand"; 1248 1249 chip->select_chip = denali_select_chip; 1250 chip->read_byte = denali_read_byte; 1251 chip->write_byte = denali_write_byte; 1252 chip->read_word = denali_read_word; 1253 chip->cmd_ctrl = denali_cmd_ctrl; 1254 chip->dev_ready = denali_dev_ready; 1255 chip->waitfunc = denali_waitfunc; 1256 1257 if (features & FEATURES__INDEX_ADDR) { 1258 denali->host_read = denali_indexed_read; 1259 denali->host_write = denali_indexed_write; 1260 } else { 1261 denali->host_read = denali_direct_read; 1262 denali->host_write = denali_direct_write; 1263 } 1264 1265 /* clk rate info is needed for setup_data_interface */ 1266 if (denali->clk_x_rate) 1267 chip->setup_data_interface = denali_setup_data_interface; 1268 1269 ret = nand_scan_ident(mtd, denali->max_banks, NULL); 1270 if (ret) 1271 return ret; 1272 1273 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1274 denali->dma_avail = 1; 1275 1276 if (denali->dma_avail) { 1277 chip->buf_align = ARCH_DMA_MINALIGN; 1278 if (denali->caps & DENALI_CAP_DMA_64BIT) 1279 denali->setup_dma = denali_setup_dma64; 1280 else 1281 denali->setup_dma = denali_setup_dma32; 1282 } else { 1283 chip->buf_align = 4; 1284 } 1285 1286 chip->options |= NAND_USE_BOUNCE_BUFFER; 1287 chip->bbt_options |= NAND_BBT_USE_FLASH; 1288 chip->bbt_options |= NAND_BBT_NO_OOB; 1289 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1290 1291 /* no subpage writes on denali */ 1292 chip->options |= NAND_NO_SUBPAGE_WRITE; 1293 1294 ret = denali_ecc_setup(mtd, chip, denali); 1295 if (ret) { 1296 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1297 return ret; 1298 } 1299 1300 dev_dbg(denali->dev, 1301 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1302 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1303 1304 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1305 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1306 denali->reg + ECC_CORRECTION); 1307 iowrite32(mtd->erasesize / mtd->writesize, 1308 denali->reg + PAGES_PER_BLOCK); 1309 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1310 denali->reg + DEVICE_WIDTH); 1311 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1312 denali->reg + TWO_ROW_ADDR_CYCLES); 1313 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1314 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1315 1316 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1317 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1318 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1319 iowrite32(mtd->writesize / chip->ecc.size, 1320 denali->reg + CFG_NUM_DATA_BLOCKS); 1321 1322 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1323 1324 nand_oob.eccbytes = denali->nand.ecc.bytes; 1325 denali->nand.ecc.layout = &nand_oob; 1326 1327 if (chip->options & NAND_BUSWIDTH_16) { 1328 chip->read_buf = denali_read_buf16; 1329 chip->write_buf = denali_write_buf16; 1330 } else { 1331 chip->read_buf = denali_read_buf; 1332 chip->write_buf = denali_write_buf; 1333 } 1334 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; 1335 chip->ecc.read_page = denali_read_page; 1336 chip->ecc.read_page_raw = denali_read_page_raw; 1337 chip->ecc.write_page = denali_write_page; 1338 chip->ecc.write_page_raw = denali_write_page_raw; 1339 chip->ecc.read_oob = denali_read_oob; 1340 chip->ecc.write_oob = denali_write_oob; 1341 chip->erase = denali_erase; 1342 1343 ret = denali_multidev_fixup(denali); 1344 if (ret) 1345 return ret; 1346 1347 /* 1348 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1349 * use devm_kmalloc() because the memory allocated by devm_ does not 1350 * guarantee DMA-safe alignment. 1351 */ 1352 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1353 if (!denali->buf) 1354 return -ENOMEM; 1355 1356 ret = nand_scan_tail(mtd); 1357 if (ret) 1358 goto free_buf; 1359 1360 ret = nand_register(0, mtd); 1361 if (ret) { 1362 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1363 goto free_buf; 1364 } 1365 return 0; 1366 1367 free_buf: 1368 kfree(denali->buf); 1369 1370 return ret; 1371 } 1372