1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale i.MX28 NAND flash driver 4 * 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 6 * on behalf of DENX Software Engineering GmbH 7 * 8 * Based on code from LTIB: 9 * Freescale GPMI NFC NAND Flash Driver 10 * 11 * Copyright (C) 2010 Freescale Semiconductor, Inc. 12 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 13 */ 14 15 #include <common.h> 16 #include <dm.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/sizes.h> 19 #include <linux/types.h> 20 #include <malloc.h> 21 #include <linux/errno.h> 22 #include <asm/io.h> 23 #include <asm/arch/clock.h> 24 #include <asm/arch/imx-regs.h> 25 #include <asm/mach-imx/regs-bch.h> 26 #include <asm/mach-imx/regs-gpmi.h> 27 #include <asm/arch/sys_proto.h> 28 #include "mxs_nand.h" 29 30 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 31 32 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7)) 33 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 34 #else 35 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 36 #endif 37 #define MXS_NAND_METADATA_SIZE 10 38 #define MXS_NAND_BITS_PER_ECC_LEVEL 13 39 40 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 41 #define MXS_NAND_COMMAND_BUFFER_SIZE 32 42 #else 43 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE 44 #endif 45 46 #define MXS_NAND_BCH_TIMEOUT 10000 47 48 struct nand_ecclayout fake_ecc_layout; 49 50 /* 51 * Cache management functions 52 */ 53 #ifndef CONFIG_SYS_DCACHE_OFF 54 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) 55 { 56 uint32_t addr = (uint32_t)info->data_buf; 57 58 flush_dcache_range(addr, addr + info->data_buf_size); 59 } 60 61 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) 62 { 63 uint32_t addr = (uint32_t)info->data_buf; 64 65 invalidate_dcache_range(addr, addr + info->data_buf_size); 66 } 67 68 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) 69 { 70 uint32_t addr = (uint32_t)info->cmd_buf; 71 72 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); 73 } 74 #else 75 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} 76 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} 77 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} 78 #endif 79 80 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) 81 { 82 struct mxs_dma_desc *desc; 83 84 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { 85 printf("MXS NAND: Too many DMA descriptors requested\n"); 86 return NULL; 87 } 88 89 desc = info->desc[info->desc_index]; 90 info->desc_index++; 91 92 return desc; 93 } 94 95 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) 96 { 97 int i; 98 struct mxs_dma_desc *desc; 99 100 for (i = 0; i < info->desc_index; i++) { 101 desc = info->desc[i]; 102 memset(desc, 0, sizeof(struct mxs_dma_desc)); 103 desc->address = (dma_addr_t)desc; 104 } 105 106 info->desc_index = 0; 107 } 108 109 static uint32_t mxs_nand_aux_status_offset(void) 110 { 111 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; 112 } 113 114 static inline int mxs_nand_calc_mark_offset(struct bch_geometry *geo, 115 uint32_t page_data_size) 116 { 117 uint32_t chunk_data_size_in_bits = geo->ecc_chunk_size * 8; 118 uint32_t chunk_ecc_size_in_bits = geo->ecc_strength * geo->gf_len; 119 uint32_t chunk_total_size_in_bits; 120 uint32_t block_mark_chunk_number; 121 uint32_t block_mark_chunk_bit_offset; 122 uint32_t block_mark_bit_offset; 123 124 chunk_total_size_in_bits = 125 chunk_data_size_in_bits + chunk_ecc_size_in_bits; 126 127 /* Compute the bit offset of the block mark within the physical page. */ 128 block_mark_bit_offset = page_data_size * 8; 129 130 /* Subtract the metadata bits. */ 131 block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8; 132 133 /* 134 * Compute the chunk number (starting at zero) in which the block mark 135 * appears. 136 */ 137 block_mark_chunk_number = 138 block_mark_bit_offset / chunk_total_size_in_bits; 139 140 /* 141 * Compute the bit offset of the block mark within its chunk, and 142 * validate it. 143 */ 144 block_mark_chunk_bit_offset = block_mark_bit_offset - 145 (block_mark_chunk_number * chunk_total_size_in_bits); 146 147 if (block_mark_chunk_bit_offset > chunk_data_size_in_bits) 148 return -EINVAL; 149 150 /* 151 * Now that we know the chunk number in which the block mark appears, 152 * we can subtract all the ECC bits that appear before it. 153 */ 154 block_mark_bit_offset -= 155 block_mark_chunk_number * chunk_ecc_size_in_bits; 156 157 geo->block_mark_byte_offset = block_mark_bit_offset >> 3; 158 geo->block_mark_bit_offset = block_mark_bit_offset & 0x7; 159 160 return 0; 161 } 162 163 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, 164 struct mtd_info *mtd, 165 unsigned int ecc_strength, 166 unsigned int ecc_step) 167 { 168 struct nand_chip *chip = mtd_to_nand(mtd); 169 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 170 171 switch (ecc_step) { 172 case SZ_512: 173 geo->gf_len = 13; 174 break; 175 case SZ_1K: 176 geo->gf_len = 14; 177 break; 178 default: 179 return -EINVAL; 180 } 181 182 geo->ecc_chunk_size = ecc_step; 183 geo->ecc_strength = round_up(ecc_strength, 2); 184 185 /* Keep the C >= O */ 186 if (geo->ecc_chunk_size < mtd->oobsize) 187 return -EINVAL; 188 189 if (geo->ecc_strength > nand_info->max_ecc_strength_supported) 190 return -EINVAL; 191 192 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 193 194 return 0; 195 } 196 197 static inline int mxs_nand_calc_ecc_layout(struct bch_geometry *geo, 198 struct mtd_info *mtd) 199 { 200 struct nand_chip *chip = mtd_to_nand(mtd); 201 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 202 203 /* The default for the length of Galois Field. */ 204 geo->gf_len = 13; 205 206 /* The default for chunk size. */ 207 geo->ecc_chunk_size = 512; 208 209 if (geo->ecc_chunk_size < mtd->oobsize) { 210 geo->gf_len = 14; 211 geo->ecc_chunk_size *= 2; 212 } 213 214 if (mtd->oobsize > geo->ecc_chunk_size) { 215 printf("Not support the NAND chips whose oob size is larger then %d bytes!\n", 216 geo->ecc_chunk_size); 217 return -EINVAL; 218 } 219 220 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 221 222 /* 223 * Determine the ECC layout with the formula: 224 * ECC bits per chunk = (total page spare data bits) / 225 * (bits per ECC level) / (chunks per page) 226 * where: 227 * total page spare data bits = 228 * (page oob size - meta data size) * (bits per byte) 229 */ 230 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 231 / (geo->gf_len * geo->ecc_chunk_count); 232 233 geo->ecc_strength = min(round_down(geo->ecc_strength, 2), 234 nand_info->max_ecc_strength_supported); 235 236 return 0; 237 } 238 239 /* 240 * Wait for BCH complete IRQ and clear the IRQ 241 */ 242 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) 243 { 244 int timeout = MXS_NAND_BCH_TIMEOUT; 245 int ret; 246 247 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, 248 BCH_CTRL_COMPLETE_IRQ, timeout); 249 250 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); 251 252 return ret; 253 } 254 255 /* 256 * This is the function that we install in the cmd_ctrl function pointer of the 257 * owning struct nand_chip. The only functions in the reference implementation 258 * that use these functions pointers are cmdfunc and select_chip. 259 * 260 * In this driver, we implement our own select_chip, so this function will only 261 * be called by the reference implementation's cmdfunc. For this reason, we can 262 * ignore the chip enable bit and concentrate only on sending bytes to the NAND 263 * Flash. 264 */ 265 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 266 { 267 struct nand_chip *nand = mtd_to_nand(mtd); 268 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 269 struct mxs_dma_desc *d; 270 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 271 int ret; 272 273 /* 274 * If this condition is true, something is _VERY_ wrong in MTD 275 * subsystem! 276 */ 277 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { 278 printf("MXS NAND: Command queue too long\n"); 279 return; 280 } 281 282 /* 283 * Every operation begins with a command byte and a series of zero or 284 * more address bytes. These are distinguished by either the Address 285 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 286 * asserted. When MTD is ready to execute the command, it will 287 * deasert both latch enables. 288 * 289 * Rather than run a separate DMA operation for every single byte, we 290 * queue them up and run a single DMA operation for the entire series 291 * of command and data bytes. 292 */ 293 if (ctrl & (NAND_ALE | NAND_CLE)) { 294 if (data != NAND_CMD_NONE) 295 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; 296 return; 297 } 298 299 /* 300 * If control arrives here, MTD has deasserted both the ALE and CLE, 301 * which means it's ready to run an operation. Check if we have any 302 * bytes to send. 303 */ 304 if (nand_info->cmd_queue_len == 0) 305 return; 306 307 /* Compile the DMA descriptor -- a descriptor that sends command. */ 308 d = mxs_nand_get_dma_desc(nand_info); 309 d->cmd.data = 310 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 311 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | 312 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 313 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); 314 315 d->cmd.address = (dma_addr_t)nand_info->cmd_buf; 316 317 d->cmd.pio_words[0] = 318 GPMI_CTRL0_COMMAND_MODE_WRITE | 319 GPMI_CTRL0_WORD_LENGTH | 320 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 321 GPMI_CTRL0_ADDRESS_NAND_CLE | 322 GPMI_CTRL0_ADDRESS_INCREMENT | 323 nand_info->cmd_queue_len; 324 325 mxs_dma_desc_append(channel, d); 326 327 /* Flush caches */ 328 mxs_nand_flush_cmd_buf(nand_info); 329 330 /* Execute the DMA chain. */ 331 ret = mxs_dma_go(channel); 332 if (ret) 333 printf("MXS NAND: Error sending command\n"); 334 335 mxs_nand_return_dma_descs(nand_info); 336 337 /* Reset the command queue. */ 338 nand_info->cmd_queue_len = 0; 339 } 340 341 /* 342 * Test if the NAND flash is ready. 343 */ 344 static int mxs_nand_device_ready(struct mtd_info *mtd) 345 { 346 struct nand_chip *chip = mtd_to_nand(mtd); 347 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 348 uint32_t tmp; 349 350 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); 351 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); 352 353 return tmp & 1; 354 } 355 356 /* 357 * Select the NAND chip. 358 */ 359 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) 360 { 361 struct nand_chip *nand = mtd_to_nand(mtd); 362 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 363 364 nand_info->cur_chip = chip; 365 } 366 367 /* 368 * Handle block mark swapping. 369 * 370 * Note that, when this function is called, it doesn't know whether it's 371 * swapping the block mark, or swapping it *back* -- but it doesn't matter 372 * because the the operation is the same. 373 */ 374 static void mxs_nand_swap_block_mark(struct bch_geometry *geo, 375 uint8_t *data_buf, uint8_t *oob_buf) 376 { 377 uint32_t bit_offset = geo->block_mark_bit_offset; 378 uint32_t buf_offset = geo->block_mark_byte_offset; 379 380 uint32_t src; 381 uint32_t dst; 382 383 /* 384 * Get the byte from the data area that overlays the block mark. Since 385 * the ECC engine applies its own view to the bits in the page, the 386 * physical block mark won't (in general) appear on a byte boundary in 387 * the data. 388 */ 389 src = data_buf[buf_offset] >> bit_offset; 390 src |= data_buf[buf_offset + 1] << (8 - bit_offset); 391 392 dst = oob_buf[0]; 393 394 oob_buf[0] = src; 395 396 data_buf[buf_offset] &= ~(0xff << bit_offset); 397 data_buf[buf_offset + 1] &= 0xff << bit_offset; 398 399 data_buf[buf_offset] |= dst << bit_offset; 400 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); 401 } 402 403 /* 404 * Read data from NAND. 405 */ 406 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) 407 { 408 struct nand_chip *nand = mtd_to_nand(mtd); 409 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 410 struct mxs_dma_desc *d; 411 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 412 int ret; 413 414 if (length > NAND_MAX_PAGESIZE) { 415 printf("MXS NAND: DMA buffer too big\n"); 416 return; 417 } 418 419 if (!buf) { 420 printf("MXS NAND: DMA buffer is NULL\n"); 421 return; 422 } 423 424 /* Compile the DMA descriptor - a descriptor that reads data. */ 425 d = mxs_nand_get_dma_desc(nand_info); 426 d->cmd.data = 427 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | 428 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 429 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 430 (length << MXS_DMA_DESC_BYTES_OFFSET); 431 432 d->cmd.address = (dma_addr_t)nand_info->data_buf; 433 434 d->cmd.pio_words[0] = 435 GPMI_CTRL0_COMMAND_MODE_READ | 436 GPMI_CTRL0_WORD_LENGTH | 437 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 438 GPMI_CTRL0_ADDRESS_NAND_DATA | 439 length; 440 441 mxs_dma_desc_append(channel, d); 442 443 /* 444 * A DMA descriptor that waits for the command to end and the chip to 445 * become ready. 446 * 447 * I think we actually should *not* be waiting for the chip to become 448 * ready because, after all, we don't care. I think the original code 449 * did that and no one has re-thought it yet. 450 */ 451 d = mxs_nand_get_dma_desc(nand_info); 452 d->cmd.data = 453 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 454 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | 455 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 456 457 d->cmd.address = 0; 458 459 d->cmd.pio_words[0] = 460 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 461 GPMI_CTRL0_WORD_LENGTH | 462 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 463 GPMI_CTRL0_ADDRESS_NAND_DATA; 464 465 mxs_dma_desc_append(channel, d); 466 467 /* Invalidate caches */ 468 mxs_nand_inval_data_buf(nand_info); 469 470 /* Execute the DMA chain. */ 471 ret = mxs_dma_go(channel); 472 if (ret) { 473 printf("MXS NAND: DMA read error\n"); 474 goto rtn; 475 } 476 477 /* Invalidate caches */ 478 mxs_nand_inval_data_buf(nand_info); 479 480 memcpy(buf, nand_info->data_buf, length); 481 482 rtn: 483 mxs_nand_return_dma_descs(nand_info); 484 } 485 486 /* 487 * Write data to NAND. 488 */ 489 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, 490 int length) 491 { 492 struct nand_chip *nand = mtd_to_nand(mtd); 493 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 494 struct mxs_dma_desc *d; 495 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 496 int ret; 497 498 if (length > NAND_MAX_PAGESIZE) { 499 printf("MXS NAND: DMA buffer too big\n"); 500 return; 501 } 502 503 if (!buf) { 504 printf("MXS NAND: DMA buffer is NULL\n"); 505 return; 506 } 507 508 memcpy(nand_info->data_buf, buf, length); 509 510 /* Compile the DMA descriptor - a descriptor that writes data. */ 511 d = mxs_nand_get_dma_desc(nand_info); 512 d->cmd.data = 513 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 514 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 515 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 516 (length << MXS_DMA_DESC_BYTES_OFFSET); 517 518 d->cmd.address = (dma_addr_t)nand_info->data_buf; 519 520 d->cmd.pio_words[0] = 521 GPMI_CTRL0_COMMAND_MODE_WRITE | 522 GPMI_CTRL0_WORD_LENGTH | 523 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 524 GPMI_CTRL0_ADDRESS_NAND_DATA | 525 length; 526 527 mxs_dma_desc_append(channel, d); 528 529 /* Flush caches */ 530 mxs_nand_flush_data_buf(nand_info); 531 532 /* Execute the DMA chain. */ 533 ret = mxs_dma_go(channel); 534 if (ret) 535 printf("MXS NAND: DMA write error\n"); 536 537 mxs_nand_return_dma_descs(nand_info); 538 } 539 540 /* 541 * Read a single byte from NAND. 542 */ 543 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) 544 { 545 uint8_t buf; 546 mxs_nand_read_buf(mtd, &buf, 1); 547 return buf; 548 } 549 550 /* 551 * Read a page from NAND. 552 */ 553 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, 554 uint8_t *buf, int oob_required, 555 int page) 556 { 557 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 558 struct bch_geometry *geo = &nand_info->bch_geometry; 559 struct mxs_dma_desc *d; 560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 561 uint32_t corrected = 0, failed = 0; 562 uint8_t *status; 563 int i, ret; 564 565 /* Compile the DMA descriptor - wait for ready. */ 566 d = mxs_nand_get_dma_desc(nand_info); 567 d->cmd.data = 568 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 569 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 570 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 571 572 d->cmd.address = 0; 573 574 d->cmd.pio_words[0] = 575 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 576 GPMI_CTRL0_WORD_LENGTH | 577 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 578 GPMI_CTRL0_ADDRESS_NAND_DATA; 579 580 mxs_dma_desc_append(channel, d); 581 582 /* Compile the DMA descriptor - enable the BCH block and read. */ 583 d = mxs_nand_get_dma_desc(nand_info); 584 d->cmd.data = 585 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 586 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 587 588 d->cmd.address = 0; 589 590 d->cmd.pio_words[0] = 591 GPMI_CTRL0_COMMAND_MODE_READ | 592 GPMI_CTRL0_WORD_LENGTH | 593 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 594 GPMI_CTRL0_ADDRESS_NAND_DATA | 595 (mtd->writesize + mtd->oobsize); 596 d->cmd.pio_words[1] = 0; 597 d->cmd.pio_words[2] = 598 GPMI_ECCCTRL_ENABLE_ECC | 599 GPMI_ECCCTRL_ECC_CMD_DECODE | 600 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 601 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; 602 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 603 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 604 605 mxs_dma_desc_append(channel, d); 606 607 /* Compile the DMA descriptor - disable the BCH block. */ 608 d = mxs_nand_get_dma_desc(nand_info); 609 d->cmd.data = 610 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 611 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 612 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 613 614 d->cmd.address = 0; 615 616 d->cmd.pio_words[0] = 617 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 618 GPMI_CTRL0_WORD_LENGTH | 619 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 620 GPMI_CTRL0_ADDRESS_NAND_DATA | 621 (mtd->writesize + mtd->oobsize); 622 d->cmd.pio_words[1] = 0; 623 d->cmd.pio_words[2] = 0; 624 625 mxs_dma_desc_append(channel, d); 626 627 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ 628 d = mxs_nand_get_dma_desc(nand_info); 629 d->cmd.data = 630 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 631 MXS_DMA_DESC_DEC_SEM; 632 633 d->cmd.address = 0; 634 635 mxs_dma_desc_append(channel, d); 636 637 /* Invalidate caches */ 638 mxs_nand_inval_data_buf(nand_info); 639 640 /* Execute the DMA chain. */ 641 ret = mxs_dma_go(channel); 642 if (ret) { 643 printf("MXS NAND: DMA read error\n"); 644 goto rtn; 645 } 646 647 ret = mxs_nand_wait_for_bch_complete(nand_info); 648 if (ret) { 649 printf("MXS NAND: BCH read timeout\n"); 650 goto rtn; 651 } 652 653 /* Invalidate caches */ 654 mxs_nand_inval_data_buf(nand_info); 655 656 /* Read DMA completed, now do the mark swapping. */ 657 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 658 659 /* Loop over status bytes, accumulating ECC status. */ 660 status = nand_info->oob_buf + mxs_nand_aux_status_offset(); 661 for (i = 0; i < geo->ecc_chunk_count; i++) { 662 if (status[i] == 0x00) 663 continue; 664 665 if (status[i] == 0xff) 666 continue; 667 668 if (status[i] == 0xfe) { 669 failed++; 670 continue; 671 } 672 673 corrected += status[i]; 674 } 675 676 /* Propagate ECC status to the owning MTD. */ 677 mtd->ecc_stats.failed += failed; 678 mtd->ecc_stats.corrected += corrected; 679 680 /* 681 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for 682 * details about our policy for delivering the OOB. 683 * 684 * We fill the caller's buffer with set bits, and then copy the block 685 * mark to the caller's buffer. Note that, if block mark swapping was 686 * necessary, it has already been done, so we can rely on the first 687 * byte of the auxiliary buffer to contain the block mark. 688 */ 689 memset(nand->oob_poi, 0xff, mtd->oobsize); 690 691 nand->oob_poi[0] = nand_info->oob_buf[0]; 692 693 memcpy(buf, nand_info->data_buf, mtd->writesize); 694 695 rtn: 696 mxs_nand_return_dma_descs(nand_info); 697 698 return ret; 699 } 700 701 /* 702 * Write a page to NAND. 703 */ 704 static int mxs_nand_ecc_write_page(struct mtd_info *mtd, 705 struct nand_chip *nand, const uint8_t *buf, 706 int oob_required, int page) 707 { 708 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 709 struct bch_geometry *geo = &nand_info->bch_geometry; 710 struct mxs_dma_desc *d; 711 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 712 int ret; 713 714 memcpy(nand_info->data_buf, buf, mtd->writesize); 715 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); 716 717 /* Handle block mark swapping. */ 718 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 719 720 /* Compile the DMA descriptor - write data. */ 721 d = mxs_nand_get_dma_desc(nand_info); 722 d->cmd.data = 723 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 724 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 725 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 726 727 d->cmd.address = 0; 728 729 d->cmd.pio_words[0] = 730 GPMI_CTRL0_COMMAND_MODE_WRITE | 731 GPMI_CTRL0_WORD_LENGTH | 732 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 733 GPMI_CTRL0_ADDRESS_NAND_DATA; 734 d->cmd.pio_words[1] = 0; 735 d->cmd.pio_words[2] = 736 GPMI_ECCCTRL_ENABLE_ECC | 737 GPMI_ECCCTRL_ECC_CMD_ENCODE | 738 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 739 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); 740 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 741 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 742 743 mxs_dma_desc_append(channel, d); 744 745 /* Flush caches */ 746 mxs_nand_flush_data_buf(nand_info); 747 748 /* Execute the DMA chain. */ 749 ret = mxs_dma_go(channel); 750 if (ret) { 751 printf("MXS NAND: DMA write error\n"); 752 goto rtn; 753 } 754 755 ret = mxs_nand_wait_for_bch_complete(nand_info); 756 if (ret) { 757 printf("MXS NAND: BCH write timeout\n"); 758 goto rtn; 759 } 760 761 rtn: 762 mxs_nand_return_dma_descs(nand_info); 763 return 0; 764 } 765 766 /* 767 * Read OOB from NAND. 768 * 769 * This function is a veneer that replaces the function originally installed by 770 * the NAND Flash MTD code. 771 */ 772 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, 773 struct mtd_oob_ops *ops) 774 { 775 struct nand_chip *chip = mtd_to_nand(mtd); 776 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 777 int ret; 778 779 if (ops->mode == MTD_OPS_RAW) 780 nand_info->raw_oob_mode = 1; 781 else 782 nand_info->raw_oob_mode = 0; 783 784 ret = nand_info->hooked_read_oob(mtd, from, ops); 785 786 nand_info->raw_oob_mode = 0; 787 788 return ret; 789 } 790 791 /* 792 * Write OOB to NAND. 793 * 794 * This function is a veneer that replaces the function originally installed by 795 * the NAND Flash MTD code. 796 */ 797 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, 798 struct mtd_oob_ops *ops) 799 { 800 struct nand_chip *chip = mtd_to_nand(mtd); 801 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 802 int ret; 803 804 if (ops->mode == MTD_OPS_RAW) 805 nand_info->raw_oob_mode = 1; 806 else 807 nand_info->raw_oob_mode = 0; 808 809 ret = nand_info->hooked_write_oob(mtd, to, ops); 810 811 nand_info->raw_oob_mode = 0; 812 813 return ret; 814 } 815 816 /* 817 * Mark a block bad in NAND. 818 * 819 * This function is a veneer that replaces the function originally installed by 820 * the NAND Flash MTD code. 821 */ 822 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) 823 { 824 struct nand_chip *chip = mtd_to_nand(mtd); 825 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 826 int ret; 827 828 nand_info->marking_block_bad = 1; 829 830 ret = nand_info->hooked_block_markbad(mtd, ofs); 831 832 nand_info->marking_block_bad = 0; 833 834 return ret; 835 } 836 837 /* 838 * There are several places in this driver where we have to handle the OOB and 839 * block marks. This is the function where things are the most complicated, so 840 * this is where we try to explain it all. All the other places refer back to 841 * here. 842 * 843 * These are the rules, in order of decreasing importance: 844 * 845 * 1) Nothing the caller does can be allowed to imperil the block mark, so all 846 * write operations take measures to protect it. 847 * 848 * 2) In read operations, the first byte of the OOB we return must reflect the 849 * true state of the block mark, no matter where that block mark appears in 850 * the physical page. 851 * 852 * 3) ECC-based read operations return an OOB full of set bits (since we never 853 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 854 * return). 855 * 856 * 4) "Raw" read operations return a direct view of the physical bytes in the 857 * page, using the conventional definition of which bytes are data and which 858 * are OOB. This gives the caller a way to see the actual, physical bytes 859 * in the page, without the distortions applied by our ECC engine. 860 * 861 * What we do for this specific read operation depends on whether we're doing 862 * "raw" read, or an ECC-based read. 863 * 864 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 865 * easy. When reading a page, for example, the NAND Flash MTD code calls our 866 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 867 * ECC-based or raw view of the page is implicit in which function it calls 868 * (there is a similar pair of ECC-based/raw functions for writing). 869 * 870 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 871 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 872 * caller wants an ECC-based or raw view of the page is not propagated down to 873 * this driver. 874 * 875 * Since our OOB *is* covered by ECC, we need this information. So, we hook the 876 * ecc.read_oob and ecc.write_oob function pointers in the owning 877 * struct mtd_info with our own functions. These hook functions set the 878 * raw_oob_mode field so that, when control finally arrives here, we'll know 879 * what to do. 880 */ 881 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 882 int page) 883 { 884 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 885 886 /* 887 * First, fill in the OOB buffer. If we're doing a raw read, we need to 888 * get the bytes from the physical page. If we're not doing a raw read, 889 * we need to fill the buffer with set bits. 890 */ 891 if (nand_info->raw_oob_mode) { 892 /* 893 * If control arrives here, we're doing a "raw" read. Send the 894 * command to read the conventional OOB and read it. 895 */ 896 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 897 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); 898 } else { 899 /* 900 * If control arrives here, we're not doing a "raw" read. Fill 901 * the OOB buffer with set bits and correct the block mark. 902 */ 903 memset(nand->oob_poi, 0xff, mtd->oobsize); 904 905 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 906 mxs_nand_read_buf(mtd, nand->oob_poi, 1); 907 } 908 909 return 0; 910 911 } 912 913 /* 914 * Write OOB data to NAND. 915 */ 916 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 917 int page) 918 { 919 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 920 uint8_t block_mark = 0; 921 922 /* 923 * There are fundamental incompatibilities between the i.MX GPMI NFC and 924 * the NAND Flash MTD model that make it essentially impossible to write 925 * the out-of-band bytes. 926 * 927 * We permit *ONE* exception. If the *intent* of writing the OOB is to 928 * mark a block bad, we can do that. 929 */ 930 931 if (!nand_info->marking_block_bad) { 932 printf("NXS NAND: Writing OOB isn't supported\n"); 933 return -EIO; 934 } 935 936 /* Write the block mark. */ 937 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); 938 nand->write_buf(mtd, &block_mark, 1); 939 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 940 941 /* Check if it worked. */ 942 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) 943 return -EIO; 944 945 return 0; 946 } 947 948 /* 949 * Claims all blocks are good. 950 * 951 * In principle, this function is *only* called when the NAND Flash MTD system 952 * isn't allowed to keep an in-memory bad block table, so it is forced to ask 953 * the driver for bad block information. 954 * 955 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so 956 * this function is *only* called when we take it away. 957 * 958 * Thus, this function is only called when we want *all* blocks to look good, 959 * so it *always* return success. 960 */ 961 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 962 { 963 return 0; 964 } 965 966 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) 967 { 968 struct nand_chip *chip = mtd_to_nand(mtd); 969 struct nand_chip *nand = mtd_to_nand(mtd); 970 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 971 972 if (chip->ecc.strength > 0 && chip->ecc.size > 0) 973 return mxs_nand_calc_ecc_layout_by_info(geo, mtd, 974 chip->ecc.strength, chip->ecc.size); 975 976 if (nand_info->use_minimum_ecc || 977 mxs_nand_calc_ecc_layout(geo, mtd)) { 978 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 979 return -EINVAL; 980 981 return mxs_nand_calc_ecc_layout_by_info(geo, mtd, 982 chip->ecc_strength_ds, chip->ecc_step_ds); 983 } 984 985 return 0; 986 } 987 988 /* 989 * At this point, the physical NAND Flash chips have been identified and 990 * counted, so we know the physical geometry. This enables us to make some 991 * important configuration decisions. 992 * 993 * The return value of this function propagates directly back to this driver's 994 * board_nand_init(). Anything other than zero will cause this driver to 995 * tear everything down and declare failure. 996 */ 997 int mxs_nand_setup_ecc(struct mtd_info *mtd) 998 { 999 struct nand_chip *nand = mtd_to_nand(mtd); 1000 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1001 struct bch_geometry *geo = &nand_info->bch_geometry; 1002 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 1003 uint32_t tmp; 1004 int ret; 1005 1006 ret = mxs_nand_set_geometry(mtd, geo); 1007 if (ret) 1008 return ret; 1009 1010 mxs_nand_calc_mark_offset(geo, mtd->writesize); 1011 1012 /* Configure BCH and set NFC geometry */ 1013 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); 1014 1015 /* Configure layout 0 */ 1016 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1017 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1018 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1019 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1020 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1021 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1022 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1023 1024 tmp = (mtd->writesize + mtd->oobsize) 1025 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1026 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1027 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1028 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1029 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1030 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1031 1032 /* Set *all* chip selects to use layout 0 */ 1033 writel(0, &bch_regs->hw_bch_layoutselect); 1034 1035 /* Enable BCH complete interrupt */ 1036 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); 1037 1038 /* Hook some operations at the MTD level. */ 1039 if (mtd->_read_oob != mxs_nand_hook_read_oob) { 1040 nand_info->hooked_read_oob = mtd->_read_oob; 1041 mtd->_read_oob = mxs_nand_hook_read_oob; 1042 } 1043 1044 if (mtd->_write_oob != mxs_nand_hook_write_oob) { 1045 nand_info->hooked_write_oob = mtd->_write_oob; 1046 mtd->_write_oob = mxs_nand_hook_write_oob; 1047 } 1048 1049 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { 1050 nand_info->hooked_block_markbad = mtd->_block_markbad; 1051 mtd->_block_markbad = mxs_nand_hook_block_markbad; 1052 } 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * Allocate DMA buffers 1059 */ 1060 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) 1061 { 1062 uint8_t *buf; 1063 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; 1064 1065 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); 1066 1067 /* DMA buffers */ 1068 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); 1069 if (!buf) { 1070 printf("MXS NAND: Error allocating DMA buffers\n"); 1071 return -ENOMEM; 1072 } 1073 1074 memset(buf, 0, nand_info->data_buf_size); 1075 1076 nand_info->data_buf = buf; 1077 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; 1078 /* Command buffers */ 1079 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, 1080 MXS_NAND_COMMAND_BUFFER_SIZE); 1081 if (!nand_info->cmd_buf) { 1082 free(buf); 1083 printf("MXS NAND: Error allocating command buffers\n"); 1084 return -ENOMEM; 1085 } 1086 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); 1087 nand_info->cmd_queue_len = 0; 1088 1089 return 0; 1090 } 1091 1092 /* 1093 * Initializes the NFC hardware. 1094 */ 1095 static int mxs_nand_init_dma(struct mxs_nand_info *info) 1096 { 1097 int i = 0, j, ret = 0; 1098 1099 info->desc = malloc(sizeof(struct mxs_dma_desc *) * 1100 MXS_NAND_DMA_DESCRIPTOR_COUNT); 1101 if (!info->desc) { 1102 ret = -ENOMEM; 1103 goto err1; 1104 } 1105 1106 /* Allocate the DMA descriptors. */ 1107 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { 1108 info->desc[i] = mxs_dma_desc_alloc(); 1109 if (!info->desc[i]) { 1110 ret = -ENOMEM; 1111 goto err2; 1112 } 1113 } 1114 1115 /* Init the DMA controller. */ 1116 mxs_dma_init(); 1117 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; 1118 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { 1119 ret = mxs_dma_init_channel(j); 1120 if (ret) 1121 goto err3; 1122 } 1123 1124 /* Reset the GPMI block. */ 1125 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); 1126 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); 1127 1128 /* 1129 * Choose NAND mode, set IRQ polarity, disable write protection and 1130 * select BCH ECC. 1131 */ 1132 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, 1133 GPMI_CTRL1_GPMI_MODE, 1134 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | 1135 GPMI_CTRL1_BCH_MODE); 1136 1137 return 0; 1138 1139 err3: 1140 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) 1141 mxs_dma_release(j); 1142 err2: 1143 for (--i; i >= 0; i--) 1144 mxs_dma_desc_free(info->desc[i]); 1145 free(info->desc); 1146 err1: 1147 if (ret == -ENOMEM) 1148 printf("MXS NAND: Unable to allocate DMA descriptors\n"); 1149 return ret; 1150 } 1151 1152 int mxs_nand_init_spl(struct nand_chip *nand) 1153 { 1154 struct mxs_nand_info *nand_info; 1155 int err; 1156 1157 nand_info = malloc(sizeof(struct mxs_nand_info)); 1158 if (!nand_info) { 1159 printf("MXS NAND: Failed to allocate private data\n"); 1160 return -ENOMEM; 1161 } 1162 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1163 1164 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1165 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1166 1167 if (is_mx6sx() || is_mx7()) 1168 nand_info->max_ecc_strength_supported = 62; 1169 else 1170 nand_info->max_ecc_strength_supported = 40; 1171 1172 err = mxs_nand_alloc_buffers(nand_info); 1173 if (err) 1174 return err; 1175 1176 err = mxs_nand_init_dma(nand_info); 1177 if (err) 1178 return err; 1179 1180 nand_set_controller_data(nand, nand_info); 1181 1182 nand->options |= NAND_NO_SUBPAGE_WRITE; 1183 1184 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1185 nand->dev_ready = mxs_nand_device_ready; 1186 nand->select_chip = mxs_nand_select_chip; 1187 1188 nand->read_byte = mxs_nand_read_byte; 1189 nand->read_buf = mxs_nand_read_buf; 1190 1191 nand->ecc.read_page = mxs_nand_ecc_read_page; 1192 1193 nand->ecc.mode = NAND_ECC_HW; 1194 1195 return 0; 1196 } 1197 1198 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) 1199 { 1200 struct mtd_info *mtd; 1201 struct nand_chip *nand; 1202 int err; 1203 1204 nand = &nand_info->chip; 1205 mtd = nand_to_mtd(nand); 1206 err = mxs_nand_alloc_buffers(nand_info); 1207 if (err) 1208 return err; 1209 1210 err = mxs_nand_init_dma(nand_info); 1211 if (err) 1212 goto err_free_buffers; 1213 1214 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); 1215 1216 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT 1217 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1218 #endif 1219 1220 nand_set_controller_data(nand, nand_info); 1221 nand->options |= NAND_NO_SUBPAGE_WRITE; 1222 1223 if (nand_info->dev) 1224 nand->flash_node = dev_of_offset(nand_info->dev); 1225 1226 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1227 1228 nand->dev_ready = mxs_nand_device_ready; 1229 nand->select_chip = mxs_nand_select_chip; 1230 nand->block_bad = mxs_nand_block_bad; 1231 1232 nand->read_byte = mxs_nand_read_byte; 1233 1234 nand->read_buf = mxs_nand_read_buf; 1235 nand->write_buf = mxs_nand_write_buf; 1236 1237 /* first scan to find the device and get the page size */ 1238 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) 1239 goto err_free_buffers; 1240 1241 if (mxs_nand_setup_ecc(mtd)) 1242 goto err_free_buffers; 1243 1244 nand->ecc.read_page = mxs_nand_ecc_read_page; 1245 nand->ecc.write_page = mxs_nand_ecc_write_page; 1246 nand->ecc.read_oob = mxs_nand_ecc_read_oob; 1247 nand->ecc.write_oob = mxs_nand_ecc_write_oob; 1248 1249 nand->ecc.layout = &fake_ecc_layout; 1250 nand->ecc.mode = NAND_ECC_HW; 1251 nand->ecc.size = nand_info->bch_geometry.ecc_chunk_size; 1252 nand->ecc.strength = nand_info->bch_geometry.ecc_strength; 1253 1254 /* second phase scan */ 1255 err = nand_scan_tail(mtd); 1256 if (err) 1257 goto err_free_buffers; 1258 1259 err = nand_register(0, mtd); 1260 if (err) 1261 goto err_free_buffers; 1262 1263 return 0; 1264 1265 err_free_buffers: 1266 free(nand_info->data_buf); 1267 free(nand_info->cmd_buf); 1268 1269 return err; 1270 } 1271 1272 #ifndef CONFIG_NAND_MXS_DT 1273 void board_nand_init(void) 1274 { 1275 struct mxs_nand_info *nand_info; 1276 1277 nand_info = malloc(sizeof(struct mxs_nand_info)); 1278 if (!nand_info) { 1279 printf("MXS NAND: Failed to allocate private data\n"); 1280 return; 1281 } 1282 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1283 1284 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1285 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1286 1287 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ 1288 if (is_mx6sx() || is_mx7()) 1289 nand_info->max_ecc_strength_supported = 62; 1290 else 1291 nand_info->max_ecc_strength_supported = 40; 1292 1293 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC 1294 nand_info->use_minimum_ecc = true; 1295 #endif 1296 1297 if (mxs_nand_init_ctrl(nand_info) < 0) 1298 goto err; 1299 1300 return; 1301 1302 err: 1303 free(nand_info); 1304 } 1305 #endif 1306