1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale GPMI NAND Flash Driver 4 * 5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. 6 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 7 */ 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/interrupt.h> 13 #include <linux/module.h> 14 #include <linux/mtd/partitions.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/dma/mxs-dma.h> 19 #include "gpmi-nand.h" 20 #include "gpmi-regs.h" 21 #include "bch-regs.h" 22 23 /* Resource names for the GPMI NAND driver. */ 24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" 25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 27 28 /* Converts time to clock cycles */ 29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) 30 31 #define MXS_SET_ADDR 0x4 32 #define MXS_CLR_ADDR 0x8 33 /* 34 * Clear the bit and poll it cleared. This is usually called with 35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE 36 * (bit 30). 37 */ 38 static int clear_poll_bit(void __iomem *addr, u32 mask) 39 { 40 int timeout = 0x400; 41 42 /* clear the bit */ 43 writel(mask, addr + MXS_CLR_ADDR); 44 45 /* 46 * SFTRST needs 3 GPMI clocks to settle, the reference manual 47 * recommends to wait 1us. 48 */ 49 udelay(1); 50 51 /* poll the bit becoming clear */ 52 while ((readl(addr) & mask) && --timeout) 53 /* nothing */; 54 55 return !timeout; 56 } 57 58 #define MODULE_CLKGATE (1 << 30) 59 #define MODULE_SFTRST (1 << 31) 60 /* 61 * The current mxs_reset_block() will do two things: 62 * [1] enable the module. 63 * [2] reset the module. 64 * 65 * In most of the cases, it's ok. 66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). 67 * If you try to soft reset the BCH block, it becomes unusable until 68 * the next hard reset. This case occurs in the NAND boot mode. When the board 69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. 70 * So If the driver tries to reset the BCH again, the BCH will not work anymore. 71 * You will see a DMA timeout in this case. The bug has been fixed 72 * in the following chips, such as MX28. 73 * 74 * To avoid this bug, just add a new parameter `just_enable` for 75 * the mxs_reset_block(), and rewrite it here. 76 */ 77 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) 78 { 79 int ret; 80 int timeout = 0x400; 81 82 /* clear and poll SFTRST */ 83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 84 if (unlikely(ret)) 85 goto error; 86 87 /* clear CLKGATE */ 88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); 89 90 if (!just_enable) { 91 /* set SFTRST to reset the block */ 92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR); 93 udelay(1); 94 95 /* poll CLKGATE becoming set */ 96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) 97 /* nothing */; 98 if (unlikely(!timeout)) 99 goto error; 100 } 101 102 /* clear and poll SFTRST */ 103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 104 if (unlikely(ret)) 105 goto error; 106 107 /* clear and poll CLKGATE */ 108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); 109 if (unlikely(ret)) 110 goto error; 111 112 return 0; 113 114 error: 115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); 116 return -ETIMEDOUT; 117 } 118 119 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) 120 { 121 struct clk *clk; 122 int ret; 123 int i; 124 125 for (i = 0; i < GPMI_CLK_MAX; i++) { 126 clk = this->resources.clock[i]; 127 if (!clk) 128 break; 129 130 if (v) { 131 ret = clk_prepare_enable(clk); 132 if (ret) 133 goto err_clk; 134 } else { 135 clk_disable_unprepare(clk); 136 } 137 } 138 return 0; 139 140 err_clk: 141 for (; i > 0; i--) 142 clk_disable_unprepare(this->resources.clock[i - 1]); 143 return ret; 144 } 145 146 static int gpmi_init(struct gpmi_nand_data *this) 147 { 148 struct resources *r = &this->resources; 149 int ret; 150 151 ret = pm_runtime_get_sync(this->dev); 152 if (ret < 0) 153 return ret; 154 155 ret = gpmi_reset_block(r->gpmi_regs, false); 156 if (ret) 157 goto err_out; 158 159 /* 160 * Reset BCH here, too. We got failures otherwise :( 161 * See later BCH reset for explanation of MX23 and MX28 handling 162 */ 163 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); 164 if (ret) 165 goto err_out; 166 167 /* Choose NAND mode. */ 168 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); 169 170 /* Set the IRQ polarity. */ 171 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, 172 r->gpmi_regs + HW_GPMI_CTRL1_SET); 173 174 /* Disable Write-Protection. */ 175 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); 176 177 /* Select BCH ECC. */ 178 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); 179 180 /* 181 * Decouple the chip select from dma channel. We use dma0 for all 182 * the chips. 183 */ 184 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); 185 186 err_out: 187 pm_runtime_mark_last_busy(this->dev); 188 pm_runtime_put_autosuspend(this->dev); 189 return ret; 190 } 191 192 /* This function is very useful. It is called only when the bug occur. */ 193 static void gpmi_dump_info(struct gpmi_nand_data *this) 194 { 195 struct resources *r = &this->resources; 196 struct bch_geometry *geo = &this->bch_geometry; 197 u32 reg; 198 int i; 199 200 dev_err(this->dev, "Show GPMI registers :\n"); 201 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { 202 reg = readl(r->gpmi_regs + i * 0x10); 203 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 204 } 205 206 /* start to print out the BCH info */ 207 dev_err(this->dev, "Show BCH registers :\n"); 208 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { 209 reg = readl(r->bch_regs + i * 0x10); 210 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 211 } 212 dev_err(this->dev, "BCH Geometry :\n" 213 "GF length : %u\n" 214 "ECC Strength : %u\n" 215 "Page Size in Bytes : %u\n" 216 "Metadata Size in Bytes : %u\n" 217 "ECC Chunk Size in Bytes: %u\n" 218 "ECC Chunk Count : %u\n" 219 "Payload Size in Bytes : %u\n" 220 "Auxiliary Size in Bytes: %u\n" 221 "Auxiliary Status Offset: %u\n" 222 "Block Mark Byte Offset : %u\n" 223 "Block Mark Bit Offset : %u\n", 224 geo->gf_len, 225 geo->ecc_strength, 226 geo->page_size, 227 geo->metadata_size, 228 geo->ecc_chunk_size, 229 geo->ecc_chunk_count, 230 geo->payload_size, 231 geo->auxiliary_size, 232 geo->auxiliary_status_offset, 233 geo->block_mark_byte_offset, 234 geo->block_mark_bit_offset); 235 } 236 237 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) 238 { 239 struct bch_geometry *geo = &this->bch_geometry; 240 241 /* Do the sanity check. */ 242 if (GPMI_IS_MXS(this)) { 243 /* The mx23/mx28 only support the GF13. */ 244 if (geo->gf_len == 14) 245 return false; 246 } 247 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength; 248 } 249 250 /* 251 * If we can get the ECC information from the nand chip, we do not 252 * need to calculate them ourselves. 253 * 254 * We may have available oob space in this case. 255 */ 256 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this, 257 unsigned int ecc_strength, 258 unsigned int ecc_step) 259 { 260 struct bch_geometry *geo = &this->bch_geometry; 261 struct nand_chip *chip = &this->nand; 262 struct mtd_info *mtd = nand_to_mtd(chip); 263 unsigned int block_mark_bit_offset; 264 265 switch (ecc_step) { 266 case SZ_512: 267 geo->gf_len = 13; 268 break; 269 case SZ_1K: 270 geo->gf_len = 14; 271 break; 272 default: 273 dev_err(this->dev, 274 "unsupported nand chip. ecc bits : %d, ecc size : %d\n", 275 chip->base.eccreq.strength, 276 chip->base.eccreq.step_size); 277 return -EINVAL; 278 } 279 geo->ecc_chunk_size = ecc_step; 280 geo->ecc_strength = round_up(ecc_strength, 2); 281 if (!gpmi_check_ecc(this)) 282 return -EINVAL; 283 284 /* Keep the C >= O */ 285 if (geo->ecc_chunk_size < mtd->oobsize) { 286 dev_err(this->dev, 287 "unsupported nand chip. ecc size: %d, oob size : %d\n", 288 ecc_step, mtd->oobsize); 289 return -EINVAL; 290 } 291 292 /* The default value, see comment in the legacy_set_geometry(). */ 293 geo->metadata_size = 10; 294 295 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 296 297 /* 298 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below: 299 * 300 * | P | 301 * |<----------------------------------------------------->| 302 * | | 303 * | (Block Mark) | 304 * | P' | | | | 305 * |<-------------------------------------------->| D | | O' | 306 * | |<---->| |<--->| 307 * V V V V V 308 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 309 * | M | data |E| data |E| data |E| data |E| | 310 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 311 * ^ ^ 312 * | O | 313 * |<------------>| 314 * | | 315 * 316 * P : the page size for BCH module. 317 * E : The ECC strength. 318 * G : the length of Galois Field. 319 * N : The chunk count of per page. 320 * M : the metasize of per page. 321 * C : the ecc chunk size, aka the "data" above. 322 * P': the nand chip's page size. 323 * O : the nand chip's oob size. 324 * O': the free oob. 325 * 326 * The formula for P is : 327 * 328 * E * G * N 329 * P = ------------ + P' + M 330 * 8 331 * 332 * The position of block mark moves forward in the ECC-based view 333 * of page, and the delta is: 334 * 335 * E * G * (N - 1) 336 * D = (---------------- + M) 337 * 8 338 * 339 * Please see the comment in legacy_set_geometry(). 340 * With the condition C >= O , we still can get same result. 341 * So the bit position of the physical block mark within the ECC-based 342 * view of the page is : 343 * (P' - D) * 8 344 */ 345 geo->page_size = mtd->writesize + geo->metadata_size + 346 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 347 348 geo->payload_size = mtd->writesize; 349 350 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); 351 geo->auxiliary_size = ALIGN(geo->metadata_size, 4) 352 + ALIGN(geo->ecc_chunk_count, 4); 353 354 if (!this->swap_block_mark) 355 return 0; 356 357 /* For bit swap. */ 358 block_mark_bit_offset = mtd->writesize * 8 - 359 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 360 + geo->metadata_size * 8); 361 362 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 363 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 364 return 0; 365 } 366 367 /* 368 * Calculate the ECC strength by hand: 369 * E : The ECC strength. 370 * G : the length of Galois Field. 371 * N : The chunk count of per page. 372 * O : the oobsize of the NAND chip. 373 * M : the metasize of per page. 374 * 375 * The formula is : 376 * E * G * N 377 * ------------ <= (O - M) 378 * 8 379 * 380 * So, we get E by: 381 * (O - M) * 8 382 * E <= ------------- 383 * G * N 384 */ 385 static inline int get_ecc_strength(struct gpmi_nand_data *this) 386 { 387 struct bch_geometry *geo = &this->bch_geometry; 388 struct mtd_info *mtd = nand_to_mtd(&this->nand); 389 int ecc_strength; 390 391 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) 392 / (geo->gf_len * geo->ecc_chunk_count); 393 394 /* We need the minor even number. */ 395 return round_down(ecc_strength, 2); 396 } 397 398 static int legacy_set_geometry(struct gpmi_nand_data *this) 399 { 400 struct bch_geometry *geo = &this->bch_geometry; 401 struct mtd_info *mtd = nand_to_mtd(&this->nand); 402 unsigned int metadata_size; 403 unsigned int status_size; 404 unsigned int block_mark_bit_offset; 405 406 /* 407 * The size of the metadata can be changed, though we set it to 10 408 * bytes now. But it can't be too large, because we have to save 409 * enough space for BCH. 410 */ 411 geo->metadata_size = 10; 412 413 /* The default for the length of Galois Field. */ 414 geo->gf_len = 13; 415 416 /* The default for chunk size. */ 417 geo->ecc_chunk_size = 512; 418 while (geo->ecc_chunk_size < mtd->oobsize) { 419 geo->ecc_chunk_size *= 2; /* keep C >= O */ 420 geo->gf_len = 14; 421 } 422 423 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 424 425 /* We use the same ECC strength for all chunks. */ 426 geo->ecc_strength = get_ecc_strength(this); 427 if (!gpmi_check_ecc(this)) { 428 dev_err(this->dev, 429 "ecc strength: %d cannot be supported by the controller (%d)\n" 430 "try to use minimum ecc strength that NAND chip required\n", 431 geo->ecc_strength, 432 this->devdata->bch_max_ecc_strength); 433 return -EINVAL; 434 } 435 436 geo->page_size = mtd->writesize + geo->metadata_size + 437 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 438 geo->payload_size = mtd->writesize; 439 440 /* 441 * The auxiliary buffer contains the metadata and the ECC status. The 442 * metadata is padded to the nearest 32-bit boundary. The ECC status 443 * contains one byte for every ECC chunk, and is also padded to the 444 * nearest 32-bit boundary. 445 */ 446 metadata_size = ALIGN(geo->metadata_size, 4); 447 status_size = ALIGN(geo->ecc_chunk_count, 4); 448 449 geo->auxiliary_size = metadata_size + status_size; 450 geo->auxiliary_status_offset = metadata_size; 451 452 if (!this->swap_block_mark) 453 return 0; 454 455 /* 456 * We need to compute the byte and bit offsets of 457 * the physical block mark within the ECC-based view of the page. 458 * 459 * NAND chip with 2K page shows below: 460 * (Block Mark) 461 * | | 462 * | D | 463 * |<---->| 464 * V V 465 * +---+----------+-+----------+-+----------+-+----------+-+ 466 * | M | data |E| data |E| data |E| data |E| 467 * +---+----------+-+----------+-+----------+-+----------+-+ 468 * 469 * The position of block mark moves forward in the ECC-based view 470 * of page, and the delta is: 471 * 472 * E * G * (N - 1) 473 * D = (---------------- + M) 474 * 8 475 * 476 * With the formula to compute the ECC strength, and the condition 477 * : C >= O (C is the ecc chunk size) 478 * 479 * It's easy to deduce to the following result: 480 * 481 * E * G (O - M) C - M C - M 482 * ----------- <= ------- <= -------- < --------- 483 * 8 N N (N - 1) 484 * 485 * So, we get: 486 * 487 * E * G * (N - 1) 488 * D = (---------------- + M) < C 489 * 8 490 * 491 * The above inequality means the position of block mark 492 * within the ECC-based view of the page is still in the data chunk, 493 * and it's NOT in the ECC bits of the chunk. 494 * 495 * Use the following to compute the bit position of the 496 * physical block mark within the ECC-based view of the page: 497 * (page_size - D) * 8 498 * 499 * --Huang Shijie 500 */ 501 block_mark_bit_offset = mtd->writesize * 8 - 502 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 503 + geo->metadata_size * 8); 504 505 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 506 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 507 return 0; 508 } 509 510 static int common_nfc_set_geometry(struct gpmi_nand_data *this) 511 { 512 struct nand_chip *chip = &this->nand; 513 514 if (chip->ecc.strength > 0 && chip->ecc.size > 0) 515 return set_geometry_by_ecc_info(this, chip->ecc.strength, 516 chip->ecc.size); 517 518 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")) 519 || legacy_set_geometry(this)) { 520 if (!(chip->base.eccreq.strength > 0 && 521 chip->base.eccreq.step_size > 0)) 522 return -EINVAL; 523 524 return set_geometry_by_ecc_info(this, 525 chip->base.eccreq.strength, 526 chip->base.eccreq.step_size); 527 } 528 529 return 0; 530 } 531 532 /* Configures the geometry for BCH. */ 533 static int bch_set_geometry(struct gpmi_nand_data *this) 534 { 535 struct resources *r = &this->resources; 536 int ret; 537 538 ret = common_nfc_set_geometry(this); 539 if (ret) 540 return ret; 541 542 ret = pm_runtime_get_sync(this->dev); 543 if (ret < 0) 544 return ret; 545 546 /* 547 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 548 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. 549 * and MX28. 550 */ 551 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); 552 if (ret) 553 goto err_out; 554 555 /* Set *all* chip selects to use layout 0. */ 556 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); 557 558 ret = 0; 559 err_out: 560 pm_runtime_mark_last_busy(this->dev); 561 pm_runtime_put_autosuspend(this->dev); 562 563 return ret; 564 } 565 566 /* 567 * <1> Firstly, we should know what's the GPMI-clock means. 568 * The GPMI-clock is the internal clock in the gpmi nand controller. 569 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period 570 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. 571 * 572 * <2> Secondly, we should know what's the frequency on the nand chip pins. 573 * The frequency on the nand chip pins is derived from the GPMI-clock. 574 * We can get it from the following equation: 575 * 576 * F = G / (DS + DH) 577 * 578 * F : the frequency on the nand chip pins. 579 * G : the GPMI clock, such as 100MHz. 580 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP 581 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD 582 * 583 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, 584 * the nand EDO(extended Data Out) timing could be applied. 585 * The GPMI implements a feedback read strobe to sample the read data. 586 * The feedback read strobe can be delayed to support the nand EDO timing 587 * where the read strobe may deasserts before the read data is valid, and 588 * read data is valid for some time after read strobe. 589 * 590 * The following figure illustrates some aspects of a NAND Flash read: 591 * 592 * |<---tREA---->| 593 * | | 594 * | | | 595 * |<--tRP-->| | 596 * | | | 597 * __ ___|__________________________________ 598 * RDN \________/ | 599 * | 600 * /---------\ 601 * Read Data --------------< >--------- 602 * \---------/ 603 * | | 604 * |<-D->| 605 * FeedbackRDN ________ ____________ 606 * \___________/ 607 * 608 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. 609 * 610 * 611 * <4> Now, we begin to describe how to compute the right RDN_DELAY. 612 * 613 * 4.1) From the aspect of the nand chip pins: 614 * Delay = (tREA + C - tRP) {1} 615 * 616 * tREA : the maximum read access time. 617 * C : a constant to adjust the delay. default is 4000ps. 618 * tRP : the read pulse width, which is exactly: 619 * tRP = (GPMI-clock-period) * DATA_SETUP 620 * 621 * 4.2) From the aspect of the GPMI nand controller: 622 * Delay = RDN_DELAY * 0.125 * RP {2} 623 * 624 * RP : the DLL reference period. 625 * if (GPMI-clock-period > DLL_THRETHOLD) 626 * RP = GPMI-clock-period / 2; 627 * else 628 * RP = GPMI-clock-period; 629 * 630 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period 631 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD 632 * is 16000ps, but in mx6q, we use 12000ps. 633 * 634 * 4.3) since {1} equals {2}, we get: 635 * 636 * (tREA + 4000 - tRP) * 8 637 * RDN_DELAY = ----------------------- {3} 638 * RP 639 */ 640 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, 641 const struct nand_sdr_timings *sdr) 642 { 643 struct gpmi_nfc_hardware_timing *hw = &this->hw; 644 unsigned int dll_threshold_ps = this->devdata->max_chain_delay; 645 unsigned int period_ps, reference_period_ps; 646 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; 647 unsigned int tRP_ps; 648 bool use_half_period; 649 int sample_delay_ps, sample_delay_factor; 650 u16 busy_timeout_cycles; 651 u8 wrn_dly_sel; 652 653 if (sdr->tRC_min >= 30000) { 654 /* ONFI non-EDO modes [0-3] */ 655 hw->clk_rate = 22000000; 656 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; 657 } else if (sdr->tRC_min >= 25000) { 658 /* ONFI EDO mode 4 */ 659 hw->clk_rate = 80000000; 660 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 661 } else { 662 /* ONFI EDO mode 5 */ 663 hw->clk_rate = 100000000; 664 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 665 } 666 667 /* SDR core timings are given in picoseconds */ 668 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); 669 670 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); 671 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); 672 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); 673 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); 674 675 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | 676 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | 677 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); 678 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); 679 680 /* 681 * Derive NFC ideal delay from {3}: 682 * 683 * (tREA + 4000 - tRP) * 8 684 * RDN_DELAY = ----------------------- 685 * RP 686 */ 687 if (period_ps > dll_threshold_ps) { 688 use_half_period = true; 689 reference_period_ps = period_ps / 2; 690 } else { 691 use_half_period = false; 692 reference_period_ps = period_ps; 693 } 694 695 tRP_ps = data_setup_cycles * period_ps; 696 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; 697 if (sample_delay_ps > 0) 698 sample_delay_factor = sample_delay_ps / reference_period_ps; 699 else 700 sample_delay_factor = 0; 701 702 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel); 703 if (sample_delay_factor) 704 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | 705 BM_GPMI_CTRL1_DLL_ENABLE | 706 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); 707 } 708 709 static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) 710 { 711 struct gpmi_nfc_hardware_timing *hw = &this->hw; 712 struct resources *r = &this->resources; 713 void __iomem *gpmi_regs = r->gpmi_regs; 714 unsigned int dll_wait_time_us; 715 716 clk_set_rate(r->clock[0], hw->clk_rate); 717 718 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); 719 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); 720 721 /* 722 * Clear several CTRL1 fields, DLL must be disabled when setting 723 * RDN_DELAY or HALF_PERIOD. 724 */ 725 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR); 726 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET); 727 728 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ 729 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64; 730 if (!dll_wait_time_us) 731 dll_wait_time_us = 1; 732 733 /* Wait for the DLL to settle. */ 734 udelay(dll_wait_time_us); 735 } 736 737 static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, 738 const struct nand_data_interface *conf) 739 { 740 struct gpmi_nand_data *this = nand_get_controller_data(chip); 741 const struct nand_sdr_timings *sdr; 742 743 /* Retrieve required NAND timings */ 744 sdr = nand_get_sdr_timings(conf); 745 if (IS_ERR(sdr)) 746 return PTR_ERR(sdr); 747 748 /* Only MX6 GPMI controller can reach EDO timings */ 749 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this)) 750 return -ENOTSUPP; 751 752 /* Stop here if this call was just a check */ 753 if (chipnr < 0) 754 return 0; 755 756 /* Do the actual derivation of the controller timings */ 757 gpmi_nfc_compute_timings(this, sdr); 758 759 this->hw.must_apply_timings = true; 760 761 return 0; 762 } 763 764 /* Clears a BCH interrupt. */ 765 static void gpmi_clear_bch(struct gpmi_nand_data *this) 766 { 767 struct resources *r = &this->resources; 768 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); 769 } 770 771 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 772 { 773 /* We use the DMA channel 0 to access all the nand chips. */ 774 return this->dma_chans[0]; 775 } 776 777 /* This will be called after the DMA operation is finished. */ 778 static void dma_irq_callback(void *param) 779 { 780 struct gpmi_nand_data *this = param; 781 struct completion *dma_c = &this->dma_done; 782 783 complete(dma_c); 784 } 785 786 static irqreturn_t bch_irq(int irq, void *cookie) 787 { 788 struct gpmi_nand_data *this = cookie; 789 790 gpmi_clear_bch(this); 791 complete(&this->bch_done); 792 return IRQ_HANDLED; 793 } 794 795 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len) 796 { 797 /* 798 * raw_len is the length to read/write including bch data which 799 * we are passed in exec_op. Calculate the data length from it. 800 */ 801 if (this->bch) 802 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size); 803 else 804 return raw_len; 805 } 806 807 /* Can we use the upper's buffer directly for DMA? */ 808 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, 809 int raw_len, struct scatterlist *sgl, 810 enum dma_data_direction dr) 811 { 812 int ret; 813 int len = gpmi_raw_len_to_len(this, raw_len); 814 815 /* first try to map the upper buffer directly */ 816 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) { 817 sg_init_one(sgl, buf, len); 818 ret = dma_map_sg(this->dev, sgl, 1, dr); 819 if (ret == 0) 820 goto map_fail; 821 822 return true; 823 } 824 825 map_fail: 826 /* We have to use our own DMA buffer. */ 827 sg_init_one(sgl, this->data_buffer_dma, len); 828 829 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma) 830 memcpy(this->data_buffer_dma, buf, len); 831 832 dma_map_sg(this->dev, sgl, 1, dr); 833 834 return false; 835 } 836 837 /** 838 * gpmi_copy_bits - copy bits from one memory region to another 839 * @dst: destination buffer 840 * @dst_bit_off: bit offset we're starting to write at 841 * @src: source buffer 842 * @src_bit_off: bit offset we're starting to read from 843 * @nbits: number of bits to copy 844 * 845 * This functions copies bits from one memory region to another, and is used by 846 * the GPMI driver to copy ECC sections which are not guaranteed to be byte 847 * aligned. 848 * 849 * src and dst should not overlap. 850 * 851 */ 852 static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src, 853 size_t src_bit_off, size_t nbits) 854 { 855 size_t i; 856 size_t nbytes; 857 u32 src_buffer = 0; 858 size_t bits_in_src_buffer = 0; 859 860 if (!nbits) 861 return; 862 863 /* 864 * Move src and dst pointers to the closest byte pointer and store bit 865 * offsets within a byte. 866 */ 867 src += src_bit_off / 8; 868 src_bit_off %= 8; 869 870 dst += dst_bit_off / 8; 871 dst_bit_off %= 8; 872 873 /* 874 * Initialize the src_buffer value with bits available in the first 875 * byte of data so that we end up with a byte aligned src pointer. 876 */ 877 if (src_bit_off) { 878 src_buffer = src[0] >> src_bit_off; 879 if (nbits >= (8 - src_bit_off)) { 880 bits_in_src_buffer += 8 - src_bit_off; 881 } else { 882 src_buffer &= GENMASK(nbits - 1, 0); 883 bits_in_src_buffer += nbits; 884 } 885 nbits -= bits_in_src_buffer; 886 src++; 887 } 888 889 /* Calculate the number of bytes that can be copied from src to dst. */ 890 nbytes = nbits / 8; 891 892 /* Try to align dst to a byte boundary. */ 893 if (dst_bit_off) { 894 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { 895 src_buffer |= src[0] << bits_in_src_buffer; 896 bits_in_src_buffer += 8; 897 src++; 898 nbytes--; 899 } 900 901 if (bits_in_src_buffer >= (8 - dst_bit_off)) { 902 dst[0] &= GENMASK(dst_bit_off - 1, 0); 903 dst[0] |= src_buffer << dst_bit_off; 904 src_buffer >>= (8 - dst_bit_off); 905 bits_in_src_buffer -= (8 - dst_bit_off); 906 dst_bit_off = 0; 907 dst++; 908 if (bits_in_src_buffer > 7) { 909 bits_in_src_buffer -= 8; 910 dst[0] = src_buffer; 911 dst++; 912 src_buffer >>= 8; 913 } 914 } 915 } 916 917 if (!bits_in_src_buffer && !dst_bit_off) { 918 /* 919 * Both src and dst pointers are byte aligned, thus we can 920 * just use the optimized memcpy function. 921 */ 922 if (nbytes) 923 memcpy(dst, src, nbytes); 924 } else { 925 /* 926 * src buffer is not byte aligned, hence we have to copy each 927 * src byte to the src_buffer variable before extracting a byte 928 * to store in dst. 929 */ 930 for (i = 0; i < nbytes; i++) { 931 src_buffer |= src[i] << bits_in_src_buffer; 932 dst[i] = src_buffer; 933 src_buffer >>= 8; 934 } 935 } 936 /* Update dst and src pointers */ 937 dst += nbytes; 938 src += nbytes; 939 940 /* 941 * nbits is the number of remaining bits. It should not exceed 8 as 942 * we've already copied as much bytes as possible. 943 */ 944 nbits %= 8; 945 946 /* 947 * If there's no more bits to copy to the destination and src buffer 948 * was already byte aligned, then we're done. 949 */ 950 if (!nbits && !bits_in_src_buffer) 951 return; 952 953 /* Copy the remaining bits to src_buffer */ 954 if (nbits) 955 src_buffer |= (*src & GENMASK(nbits - 1, 0)) << 956 bits_in_src_buffer; 957 bits_in_src_buffer += nbits; 958 959 /* 960 * In case there were not enough bits to get a byte aligned dst buffer 961 * prepare the src_buffer variable to match the dst organization (shift 962 * src_buffer by dst_bit_off and retrieve the least significant bits 963 * from dst). 964 */ 965 if (dst_bit_off) 966 src_buffer = (src_buffer << dst_bit_off) | 967 (*dst & GENMASK(dst_bit_off - 1, 0)); 968 bits_in_src_buffer += dst_bit_off; 969 970 /* 971 * Keep most significant bits from dst if we end up with an unaligned 972 * number of bits. 973 */ 974 nbytes = bits_in_src_buffer / 8; 975 if (bits_in_src_buffer % 8) { 976 src_buffer |= (dst[nbytes] & 977 GENMASK(7, bits_in_src_buffer % 8)) << 978 (nbytes * 8); 979 nbytes++; 980 } 981 982 /* Copy the remaining bytes to dst */ 983 for (i = 0; i < nbytes; i++) { 984 dst[i] = src_buffer; 985 src_buffer >>= 8; 986 } 987 } 988 989 /* add our owner bbt descriptor */ 990 static uint8_t scan_ff_pattern[] = { 0xff }; 991 static struct nand_bbt_descr gpmi_bbt_descr = { 992 .options = 0, 993 .offs = 0, 994 .len = 1, 995 .pattern = scan_ff_pattern 996 }; 997 998 /* 999 * We may change the layout if we can get the ECC info from the datasheet, 1000 * else we will use all the (page + OOB). 1001 */ 1002 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, 1003 struct mtd_oob_region *oobregion) 1004 { 1005 struct nand_chip *chip = mtd_to_nand(mtd); 1006 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1007 struct bch_geometry *geo = &this->bch_geometry; 1008 1009 if (section) 1010 return -ERANGE; 1011 1012 oobregion->offset = 0; 1013 oobregion->length = geo->page_size - mtd->writesize; 1014 1015 return 0; 1016 } 1017 1018 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, 1019 struct mtd_oob_region *oobregion) 1020 { 1021 struct nand_chip *chip = mtd_to_nand(mtd); 1022 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1023 struct bch_geometry *geo = &this->bch_geometry; 1024 1025 if (section) 1026 return -ERANGE; 1027 1028 /* The available oob size we have. */ 1029 if (geo->page_size < mtd->writesize + mtd->oobsize) { 1030 oobregion->offset = geo->page_size - mtd->writesize; 1031 oobregion->length = mtd->oobsize - oobregion->offset; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static const char * const gpmi_clks_for_mx2x[] = { 1038 "gpmi_io", 1039 }; 1040 1041 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { 1042 .ecc = gpmi_ooblayout_ecc, 1043 .free = gpmi_ooblayout_free, 1044 }; 1045 1046 static const struct gpmi_devdata gpmi_devdata_imx23 = { 1047 .type = IS_MX23, 1048 .bch_max_ecc_strength = 20, 1049 .max_chain_delay = 16000, 1050 .clks = gpmi_clks_for_mx2x, 1051 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 1052 }; 1053 1054 static const struct gpmi_devdata gpmi_devdata_imx28 = { 1055 .type = IS_MX28, 1056 .bch_max_ecc_strength = 20, 1057 .max_chain_delay = 16000, 1058 .clks = gpmi_clks_for_mx2x, 1059 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 1060 }; 1061 1062 static const char * const gpmi_clks_for_mx6[] = { 1063 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 1064 }; 1065 1066 static const struct gpmi_devdata gpmi_devdata_imx6q = { 1067 .type = IS_MX6Q, 1068 .bch_max_ecc_strength = 40, 1069 .max_chain_delay = 12000, 1070 .clks = gpmi_clks_for_mx6, 1071 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 1072 }; 1073 1074 static const struct gpmi_devdata gpmi_devdata_imx6sx = { 1075 .type = IS_MX6SX, 1076 .bch_max_ecc_strength = 62, 1077 .max_chain_delay = 12000, 1078 .clks = gpmi_clks_for_mx6, 1079 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 1080 }; 1081 1082 static const char * const gpmi_clks_for_mx7d[] = { 1083 "gpmi_io", "gpmi_bch_apb", 1084 }; 1085 1086 static const struct gpmi_devdata gpmi_devdata_imx7d = { 1087 .type = IS_MX7D, 1088 .bch_max_ecc_strength = 62, 1089 .max_chain_delay = 12000, 1090 .clks = gpmi_clks_for_mx7d, 1091 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), 1092 }; 1093 1094 static int acquire_register_block(struct gpmi_nand_data *this, 1095 const char *res_name) 1096 { 1097 struct platform_device *pdev = this->pdev; 1098 struct resources *res = &this->resources; 1099 struct resource *r; 1100 void __iomem *p; 1101 1102 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 1103 p = devm_ioremap_resource(&pdev->dev, r); 1104 if (IS_ERR(p)) 1105 return PTR_ERR(p); 1106 1107 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) 1108 res->gpmi_regs = p; 1109 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) 1110 res->bch_regs = p; 1111 else 1112 dev_err(this->dev, "unknown resource name : %s\n", res_name); 1113 1114 return 0; 1115 } 1116 1117 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 1118 { 1119 struct platform_device *pdev = this->pdev; 1120 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; 1121 struct resource *r; 1122 int err; 1123 1124 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); 1125 if (!r) { 1126 dev_err(this->dev, "Can't get resource for %s\n", res_name); 1127 return -ENODEV; 1128 } 1129 1130 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this); 1131 if (err) 1132 dev_err(this->dev, "error requesting BCH IRQ\n"); 1133 1134 return err; 1135 } 1136 1137 static void release_dma_channels(struct gpmi_nand_data *this) 1138 { 1139 unsigned int i; 1140 for (i = 0; i < DMA_CHANS; i++) 1141 if (this->dma_chans[i]) { 1142 dma_release_channel(this->dma_chans[i]); 1143 this->dma_chans[i] = NULL; 1144 } 1145 } 1146 1147 static int acquire_dma_channels(struct gpmi_nand_data *this) 1148 { 1149 struct platform_device *pdev = this->pdev; 1150 struct dma_chan *dma_chan; 1151 int ret = 0; 1152 1153 /* request dma channel */ 1154 dma_chan = dma_request_chan(&pdev->dev, "rx-tx"); 1155 if (IS_ERR(dma_chan)) { 1156 ret = PTR_ERR(dma_chan); 1157 if (ret != -EPROBE_DEFER) 1158 dev_err(this->dev, "DMA channel request failed: %d\n", 1159 ret); 1160 release_dma_channels(this); 1161 } else { 1162 this->dma_chans[0] = dma_chan; 1163 } 1164 1165 return ret; 1166 } 1167 1168 static int gpmi_get_clks(struct gpmi_nand_data *this) 1169 { 1170 struct resources *r = &this->resources; 1171 struct clk *clk; 1172 int err, i; 1173 1174 for (i = 0; i < this->devdata->clks_count; i++) { 1175 clk = devm_clk_get(this->dev, this->devdata->clks[i]); 1176 if (IS_ERR(clk)) { 1177 err = PTR_ERR(clk); 1178 goto err_clock; 1179 } 1180 1181 r->clock[i] = clk; 1182 } 1183 1184 if (GPMI_IS_MX6(this)) 1185 /* 1186 * Set the default value for the gpmi clock. 1187 * 1188 * If you want to use the ONFI nand which is in the 1189 * Synchronous Mode, you should change the clock as you need. 1190 */ 1191 clk_set_rate(r->clock[0], 22000000); 1192 1193 return 0; 1194 1195 err_clock: 1196 dev_dbg(this->dev, "failed in finding the clocks.\n"); 1197 return err; 1198 } 1199 1200 static int acquire_resources(struct gpmi_nand_data *this) 1201 { 1202 int ret; 1203 1204 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); 1205 if (ret) 1206 goto exit_regs; 1207 1208 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); 1209 if (ret) 1210 goto exit_regs; 1211 1212 ret = acquire_bch_irq(this, bch_irq); 1213 if (ret) 1214 goto exit_regs; 1215 1216 ret = acquire_dma_channels(this); 1217 if (ret) 1218 goto exit_regs; 1219 1220 ret = gpmi_get_clks(this); 1221 if (ret) 1222 goto exit_clock; 1223 return 0; 1224 1225 exit_clock: 1226 release_dma_channels(this); 1227 exit_regs: 1228 return ret; 1229 } 1230 1231 static void release_resources(struct gpmi_nand_data *this) 1232 { 1233 release_dma_channels(this); 1234 } 1235 1236 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) 1237 { 1238 struct device *dev = this->dev; 1239 struct bch_geometry *geo = &this->bch_geometry; 1240 1241 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt)) 1242 dma_free_coherent(dev, geo->auxiliary_size, 1243 this->auxiliary_virt, 1244 this->auxiliary_phys); 1245 kfree(this->data_buffer_dma); 1246 kfree(this->raw_buffer); 1247 1248 this->data_buffer_dma = NULL; 1249 this->raw_buffer = NULL; 1250 } 1251 1252 /* Allocate the DMA buffers */ 1253 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) 1254 { 1255 struct bch_geometry *geo = &this->bch_geometry; 1256 struct device *dev = this->dev; 1257 struct mtd_info *mtd = nand_to_mtd(&this->nand); 1258 1259 /* 1260 * [2] Allocate a read/write data buffer. 1261 * The gpmi_alloc_dma_buffer can be called twice. 1262 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer 1263 * is called before the NAND identification; and we allocate a 1264 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer 1265 * is called after. 1266 */ 1267 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE, 1268 GFP_DMA | GFP_KERNEL); 1269 if (this->data_buffer_dma == NULL) 1270 goto error_alloc; 1271 1272 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size, 1273 &this->auxiliary_phys, GFP_DMA); 1274 if (!this->auxiliary_virt) 1275 goto error_alloc; 1276 1277 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL); 1278 if (!this->raw_buffer) 1279 goto error_alloc; 1280 1281 return 0; 1282 1283 error_alloc: 1284 gpmi_free_dma_buffer(this); 1285 return -ENOMEM; 1286 } 1287 1288 /* 1289 * Handles block mark swapping. 1290 * It can be called in swapping the block mark, or swapping it back, 1291 * because the the operations are the same. 1292 */ 1293 static void block_mark_swapping(struct gpmi_nand_data *this, 1294 void *payload, void *auxiliary) 1295 { 1296 struct bch_geometry *nfc_geo = &this->bch_geometry; 1297 unsigned char *p; 1298 unsigned char *a; 1299 unsigned int bit; 1300 unsigned char mask; 1301 unsigned char from_data; 1302 unsigned char from_oob; 1303 1304 if (!this->swap_block_mark) 1305 return; 1306 1307 /* 1308 * If control arrives here, we're swapping. Make some convenience 1309 * variables. 1310 */ 1311 bit = nfc_geo->block_mark_bit_offset; 1312 p = payload + nfc_geo->block_mark_byte_offset; 1313 a = auxiliary; 1314 1315 /* 1316 * Get the byte from the data area that overlays the block mark. Since 1317 * the ECC engine applies its own view to the bits in the page, the 1318 * physical block mark won't (in general) appear on a byte boundary in 1319 * the data. 1320 */ 1321 from_data = (p[0] >> bit) | (p[1] << (8 - bit)); 1322 1323 /* Get the byte from the OOB. */ 1324 from_oob = a[0]; 1325 1326 /* Swap them. */ 1327 a[0] = from_data; 1328 1329 mask = (0x1 << bit) - 1; 1330 p[0] = (p[0] & mask) | (from_oob << bit); 1331 1332 mask = ~0 << bit; 1333 p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 1334 } 1335 1336 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first, 1337 int last, int meta) 1338 { 1339 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1340 struct bch_geometry *nfc_geo = &this->bch_geometry; 1341 struct mtd_info *mtd = nand_to_mtd(chip); 1342 int i; 1343 unsigned char *status; 1344 unsigned int max_bitflips = 0; 1345 1346 /* Loop over status bytes, accumulating ECC status. */ 1347 status = this->auxiliary_virt + ALIGN(meta, 4); 1348 1349 for (i = first; i < last; i++, status++) { 1350 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 1351 continue; 1352 1353 if (*status == STATUS_UNCORRECTABLE) { 1354 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1355 u8 *eccbuf = this->raw_buffer; 1356 int offset, bitoffset; 1357 int eccbytes; 1358 int flips; 1359 1360 /* Read ECC bytes into our internal raw_buffer */ 1361 offset = nfc_geo->metadata_size * 8; 1362 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1); 1363 offset -= eccbits; 1364 bitoffset = offset % 8; 1365 eccbytes = DIV_ROUND_UP(offset + eccbits, 8); 1366 offset /= 8; 1367 eccbytes -= offset; 1368 nand_change_read_column_op(chip, offset, eccbuf, 1369 eccbytes, false); 1370 1371 /* 1372 * ECC data are not byte aligned and we may have 1373 * in-band data in the first and last byte of 1374 * eccbuf. Set non-eccbits to one so that 1375 * nand_check_erased_ecc_chunk() does not count them 1376 * as bitflips. 1377 */ 1378 if (bitoffset) 1379 eccbuf[0] |= GENMASK(bitoffset - 1, 0); 1380 1381 bitoffset = (bitoffset + eccbits) % 8; 1382 if (bitoffset) 1383 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); 1384 1385 /* 1386 * The ECC hardware has an uncorrectable ECC status 1387 * code in case we have bitflips in an erased page. As 1388 * nothing was written into this subpage the ECC is 1389 * obviously wrong and we can not trust it. We assume 1390 * at this point that we are reading an erased page and 1391 * try to correct the bitflips in buffer up to 1392 * ecc_strength bitflips. If this is a page with random 1393 * data, we exceed this number of bitflips and have a 1394 * ECC failure. Otherwise we use the corrected buffer. 1395 */ 1396 if (i == 0) { 1397 /* The first block includes metadata */ 1398 flips = nand_check_erased_ecc_chunk( 1399 buf + i * nfc_geo->ecc_chunk_size, 1400 nfc_geo->ecc_chunk_size, 1401 eccbuf, eccbytes, 1402 this->auxiliary_virt, 1403 nfc_geo->metadata_size, 1404 nfc_geo->ecc_strength); 1405 } else { 1406 flips = nand_check_erased_ecc_chunk( 1407 buf + i * nfc_geo->ecc_chunk_size, 1408 nfc_geo->ecc_chunk_size, 1409 eccbuf, eccbytes, 1410 NULL, 0, 1411 nfc_geo->ecc_strength); 1412 } 1413 1414 if (flips > 0) { 1415 max_bitflips = max_t(unsigned int, max_bitflips, 1416 flips); 1417 mtd->ecc_stats.corrected += flips; 1418 continue; 1419 } 1420 1421 mtd->ecc_stats.failed++; 1422 continue; 1423 } 1424 1425 mtd->ecc_stats.corrected += *status; 1426 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1427 } 1428 1429 return max_bitflips; 1430 } 1431 1432 static void gpmi_bch_layout_std(struct gpmi_nand_data *this) 1433 { 1434 struct bch_geometry *geo = &this->bch_geometry; 1435 unsigned int ecc_strength = geo->ecc_strength >> 1; 1436 unsigned int gf_len = geo->gf_len; 1437 unsigned int block_size = geo->ecc_chunk_size; 1438 1439 this->bch_flashlayout0 = 1440 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) | 1441 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) | 1442 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | 1443 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) | 1444 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this); 1445 1446 this->bch_flashlayout1 = 1447 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) | 1448 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | 1449 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) | 1450 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this); 1451 } 1452 1453 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, 1454 int oob_required, int page) 1455 { 1456 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1457 struct mtd_info *mtd = nand_to_mtd(chip); 1458 struct bch_geometry *geo = &this->bch_geometry; 1459 unsigned int max_bitflips; 1460 int ret; 1461 1462 gpmi_bch_layout_std(this); 1463 this->bch = true; 1464 1465 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size); 1466 if (ret) 1467 return ret; 1468 1469 max_bitflips = gpmi_count_bitflips(chip, buf, 0, 1470 geo->ecc_chunk_count, 1471 geo->auxiliary_status_offset); 1472 1473 /* handle the block mark swapping */ 1474 block_mark_swapping(this, buf, this->auxiliary_virt); 1475 1476 if (oob_required) { 1477 /* 1478 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() 1479 * for details about our policy for delivering the OOB. 1480 * 1481 * We fill the caller's buffer with set bits, and then copy the 1482 * block mark to th caller's buffer. Note that, if block mark 1483 * swapping was necessary, it has already been done, so we can 1484 * rely on the first byte of the auxiliary buffer to contain 1485 * the block mark. 1486 */ 1487 memset(chip->oob_poi, ~0, mtd->oobsize); 1488 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; 1489 } 1490 1491 return max_bitflips; 1492 } 1493 1494 /* Fake a virtual small page for the subpage read */ 1495 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, 1496 uint32_t len, uint8_t *buf, int page) 1497 { 1498 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1499 struct bch_geometry *geo = &this->bch_geometry; 1500 int size = chip->ecc.size; /* ECC chunk size */ 1501 int meta, n, page_size; 1502 unsigned int max_bitflips; 1503 unsigned int ecc_strength; 1504 int first, last, marker_pos; 1505 int ecc_parity_size; 1506 int col = 0; 1507 int ret; 1508 1509 /* The size of ECC parity */ 1510 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1511 1512 /* Align it with the chunk size */ 1513 first = offs / size; 1514 last = (offs + len - 1) / size; 1515 1516 if (this->swap_block_mark) { 1517 /* 1518 * Find the chunk which contains the Block Marker. 1519 * If this chunk is in the range of [first, last], 1520 * we have to read out the whole page. 1521 * Why? since we had swapped the data at the position of Block 1522 * Marker to the metadata which is bound with the chunk 0. 1523 */ 1524 marker_pos = geo->block_mark_byte_offset / size; 1525 if (last >= marker_pos && first <= marker_pos) { 1526 dev_dbg(this->dev, 1527 "page:%d, first:%d, last:%d, marker at:%d\n", 1528 page, first, last, marker_pos); 1529 return gpmi_ecc_read_page(chip, buf, 0, page); 1530 } 1531 } 1532 1533 meta = geo->metadata_size; 1534 if (first) { 1535 col = meta + (size + ecc_parity_size) * first; 1536 meta = 0; 1537 buf = buf + first * size; 1538 } 1539 1540 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1541 1542 n = last - first + 1; 1543 page_size = meta + (size + ecc_parity_size) * n; 1544 ecc_strength = geo->ecc_strength >> 1; 1545 1546 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) | 1547 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) | 1548 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | 1549 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) | 1550 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this); 1551 1552 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) | 1553 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | 1554 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) | 1555 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this); 1556 1557 this->bch = true; 1558 1559 ret = nand_read_page_op(chip, page, col, buf, page_size); 1560 if (ret) 1561 return ret; 1562 1563 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", 1564 page, offs, len, col, first, n, page_size); 1565 1566 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta); 1567 1568 return max_bitflips; 1569 } 1570 1571 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf, 1572 int oob_required, int page) 1573 { 1574 struct mtd_info *mtd = nand_to_mtd(chip); 1575 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1576 struct bch_geometry *nfc_geo = &this->bch_geometry; 1577 int ret; 1578 1579 dev_dbg(this->dev, "ecc write page.\n"); 1580 1581 gpmi_bch_layout_std(this); 1582 this->bch = true; 1583 1584 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); 1585 1586 if (this->swap_block_mark) { 1587 /* 1588 * When doing bad block marker swapping we must always copy the 1589 * input buffer as we can't modify the const buffer. 1590 */ 1591 memcpy(this->data_buffer_dma, buf, mtd->writesize); 1592 buf = this->data_buffer_dma; 1593 block_mark_swapping(this, this->data_buffer_dma, 1594 this->auxiliary_virt); 1595 } 1596 1597 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size); 1598 1599 return ret; 1600 } 1601 1602 /* 1603 * There are several places in this driver where we have to handle the OOB and 1604 * block marks. This is the function where things are the most complicated, so 1605 * this is where we try to explain it all. All the other places refer back to 1606 * here. 1607 * 1608 * These are the rules, in order of decreasing importance: 1609 * 1610 * 1) Nothing the caller does can be allowed to imperil the block mark. 1611 * 1612 * 2) In read operations, the first byte of the OOB we return must reflect the 1613 * true state of the block mark, no matter where that block mark appears in 1614 * the physical page. 1615 * 1616 * 3) ECC-based read operations return an OOB full of set bits (since we never 1617 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 1618 * return). 1619 * 1620 * 4) "Raw" read operations return a direct view of the physical bytes in the 1621 * page, using the conventional definition of which bytes are data and which 1622 * are OOB. This gives the caller a way to see the actual, physical bytes 1623 * in the page, without the distortions applied by our ECC engine. 1624 * 1625 * 1626 * What we do for this specific read operation depends on two questions: 1627 * 1628 * 1) Are we doing a "raw" read, or an ECC-based read? 1629 * 1630 * 2) Are we using block mark swapping or transcription? 1631 * 1632 * There are four cases, illustrated by the following Karnaugh map: 1633 * 1634 * | Raw | ECC-based | 1635 * -------------+-------------------------+-------------------------+ 1636 * | Read the conventional | | 1637 * | OOB at the end of the | | 1638 * Swapping | page and return it. It | | 1639 * | contains exactly what | | 1640 * | we want. | Read the block mark and | 1641 * -------------+-------------------------+ return it in a buffer | 1642 * | Read the conventional | full of set bits. | 1643 * | OOB at the end of the | | 1644 * | page and also the block | | 1645 * Transcribing | mark in the metadata. | | 1646 * | Copy the block mark | | 1647 * | into the first byte of | | 1648 * | the OOB. | | 1649 * -------------+-------------------------+-------------------------+ 1650 * 1651 * Note that we break rule #4 in the Transcribing/Raw case because we're not 1652 * giving an accurate view of the actual, physical bytes in the page (we're 1653 * overwriting the block mark). That's OK because it's more important to follow 1654 * rule #2. 1655 * 1656 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1657 * easy. When reading a page, for example, the NAND Flash MTD code calls our 1658 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1659 * ECC-based or raw view of the page is implicit in which function it calls 1660 * (there is a similar pair of ECC-based/raw functions for writing). 1661 */ 1662 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) 1663 { 1664 struct mtd_info *mtd = nand_to_mtd(chip); 1665 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1666 int ret; 1667 1668 /* clear the OOB buffer */ 1669 memset(chip->oob_poi, ~0, mtd->oobsize); 1670 1671 /* Read out the conventional OOB. */ 1672 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi, 1673 mtd->oobsize); 1674 if (ret) 1675 return ret; 1676 1677 /* 1678 * Now, we want to make sure the block mark is correct. In the 1679 * non-transcribing case (!GPMI_IS_MX23()), we already have it. 1680 * Otherwise, we need to explicitly read it. 1681 */ 1682 if (GPMI_IS_MX23(this)) { 1683 /* Read the block mark into the first byte of the OOB buffer. */ 1684 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1); 1685 if (ret) 1686 return ret; 1687 } 1688 1689 return 0; 1690 } 1691 1692 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page) 1693 { 1694 struct mtd_info *mtd = nand_to_mtd(chip); 1695 struct mtd_oob_region of = { }; 1696 1697 /* Do we have available oob area? */ 1698 mtd_ooblayout_free(mtd, 0, &of); 1699 if (!of.length) 1700 return -EPERM; 1701 1702 if (!nand_is_slc(chip)) 1703 return -EPERM; 1704 1705 return nand_prog_page_op(chip, page, mtd->writesize + of.offset, 1706 chip->oob_poi + of.offset, of.length); 1707 } 1708 1709 /* 1710 * This function reads a NAND page without involving the ECC engine (no HW 1711 * ECC correction). 1712 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1713 * inline (interleaved with payload DATA), and do not align data chunk on 1714 * byte boundaries. 1715 * We thus need to take care moving the payload data and ECC bits stored in the 1716 * page into the provided buffers, which is why we're using gpmi_copy_bits. 1717 * 1718 * See set_geometry_by_ecc_info inline comments to have a full description 1719 * of the layout used by the GPMI controller. 1720 */ 1721 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, 1722 int oob_required, int page) 1723 { 1724 struct mtd_info *mtd = nand_to_mtd(chip); 1725 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1726 struct bch_geometry *nfc_geo = &this->bch_geometry; 1727 int eccsize = nfc_geo->ecc_chunk_size; 1728 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1729 u8 *tmp_buf = this->raw_buffer; 1730 size_t src_bit_off; 1731 size_t oob_bit_off; 1732 size_t oob_byte_off; 1733 uint8_t *oob = chip->oob_poi; 1734 int step; 1735 int ret; 1736 1737 ret = nand_read_page_op(chip, page, 0, tmp_buf, 1738 mtd->writesize + mtd->oobsize); 1739 if (ret) 1740 return ret; 1741 1742 /* 1743 * If required, swap the bad block marker and the data stored in the 1744 * metadata section, so that we don't wrongly consider a block as bad. 1745 * 1746 * See the layout description for a detailed explanation on why this 1747 * is needed. 1748 */ 1749 if (this->swap_block_mark) 1750 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1751 1752 /* 1753 * Copy the metadata section into the oob buffer (this section is 1754 * guaranteed to be aligned on a byte boundary). 1755 */ 1756 if (oob_required) 1757 memcpy(oob, tmp_buf, nfc_geo->metadata_size); 1758 1759 oob_bit_off = nfc_geo->metadata_size * 8; 1760 src_bit_off = oob_bit_off; 1761 1762 /* Extract interleaved payload data and ECC bits */ 1763 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1764 if (buf) 1765 gpmi_copy_bits(buf, step * eccsize * 8, 1766 tmp_buf, src_bit_off, 1767 eccsize * 8); 1768 src_bit_off += eccsize * 8; 1769 1770 /* Align last ECC block to align a byte boundary */ 1771 if (step == nfc_geo->ecc_chunk_count - 1 && 1772 (oob_bit_off + eccbits) % 8) 1773 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1774 1775 if (oob_required) 1776 gpmi_copy_bits(oob, oob_bit_off, 1777 tmp_buf, src_bit_off, 1778 eccbits); 1779 1780 src_bit_off += eccbits; 1781 oob_bit_off += eccbits; 1782 } 1783 1784 if (oob_required) { 1785 oob_byte_off = oob_bit_off / 8; 1786 1787 if (oob_byte_off < mtd->oobsize) 1788 memcpy(oob + oob_byte_off, 1789 tmp_buf + mtd->writesize + oob_byte_off, 1790 mtd->oobsize - oob_byte_off); 1791 } 1792 1793 return 0; 1794 } 1795 1796 /* 1797 * This function writes a NAND page without involving the ECC engine (no HW 1798 * ECC generation). 1799 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1800 * inline (interleaved with payload DATA), and do not align data chunk on 1801 * byte boundaries. 1802 * We thus need to take care moving the OOB area at the right place in the 1803 * final page, which is why we're using gpmi_copy_bits. 1804 * 1805 * See set_geometry_by_ecc_info inline comments to have a full description 1806 * of the layout used by the GPMI controller. 1807 */ 1808 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 1809 int oob_required, int page) 1810 { 1811 struct mtd_info *mtd = nand_to_mtd(chip); 1812 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1813 struct bch_geometry *nfc_geo = &this->bch_geometry; 1814 int eccsize = nfc_geo->ecc_chunk_size; 1815 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1816 u8 *tmp_buf = this->raw_buffer; 1817 uint8_t *oob = chip->oob_poi; 1818 size_t dst_bit_off; 1819 size_t oob_bit_off; 1820 size_t oob_byte_off; 1821 int step; 1822 1823 /* 1824 * Initialize all bits to 1 in case we don't have a buffer for the 1825 * payload or oob data in order to leave unspecified bits of data 1826 * to their initial state. 1827 */ 1828 if (!buf || !oob_required) 1829 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize); 1830 1831 /* 1832 * First copy the metadata section (stored in oob buffer) at the 1833 * beginning of the page, as imposed by the GPMI layout. 1834 */ 1835 memcpy(tmp_buf, oob, nfc_geo->metadata_size); 1836 oob_bit_off = nfc_geo->metadata_size * 8; 1837 dst_bit_off = oob_bit_off; 1838 1839 /* Interleave payload data and ECC bits */ 1840 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1841 if (buf) 1842 gpmi_copy_bits(tmp_buf, dst_bit_off, 1843 buf, step * eccsize * 8, eccsize * 8); 1844 dst_bit_off += eccsize * 8; 1845 1846 /* Align last ECC block to align a byte boundary */ 1847 if (step == nfc_geo->ecc_chunk_count - 1 && 1848 (oob_bit_off + eccbits) % 8) 1849 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1850 1851 if (oob_required) 1852 gpmi_copy_bits(tmp_buf, dst_bit_off, 1853 oob, oob_bit_off, eccbits); 1854 1855 dst_bit_off += eccbits; 1856 oob_bit_off += eccbits; 1857 } 1858 1859 oob_byte_off = oob_bit_off / 8; 1860 1861 if (oob_required && oob_byte_off < mtd->oobsize) 1862 memcpy(tmp_buf + mtd->writesize + oob_byte_off, 1863 oob + oob_byte_off, mtd->oobsize - oob_byte_off); 1864 1865 /* 1866 * If required, swap the bad block marker and the first byte of the 1867 * metadata section, so that we don't modify the bad block marker. 1868 * 1869 * See the layout description for a detailed explanation on why this 1870 * is needed. 1871 */ 1872 if (this->swap_block_mark) 1873 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1874 1875 return nand_prog_page_op(chip, page, 0, tmp_buf, 1876 mtd->writesize + mtd->oobsize); 1877 } 1878 1879 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page) 1880 { 1881 return gpmi_ecc_read_page_raw(chip, NULL, 1, page); 1882 } 1883 1884 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page) 1885 { 1886 return gpmi_ecc_write_page_raw(chip, NULL, 1, page); 1887 } 1888 1889 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs) 1890 { 1891 struct mtd_info *mtd = nand_to_mtd(chip); 1892 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1893 int ret = 0; 1894 uint8_t *block_mark; 1895 int column, page, chipnr; 1896 1897 chipnr = (int)(ofs >> chip->chip_shift); 1898 nand_select_target(chip, chipnr); 1899 1900 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0; 1901 1902 /* Write the block mark. */ 1903 block_mark = this->data_buffer_dma; 1904 block_mark[0] = 0; /* bad block marker */ 1905 1906 /* Shift to get page */ 1907 page = (int)(ofs >> chip->page_shift); 1908 1909 ret = nand_prog_page_op(chip, page, column, block_mark, 1); 1910 1911 nand_deselect_target(chip); 1912 1913 return ret; 1914 } 1915 1916 static int nand_boot_set_geometry(struct gpmi_nand_data *this) 1917 { 1918 struct boot_rom_geometry *geometry = &this->rom_geometry; 1919 1920 /* 1921 * Set the boot block stride size. 1922 * 1923 * In principle, we should be reading this from the OTP bits, since 1924 * that's where the ROM is going to get it. In fact, we don't have any 1925 * way to read the OTP bits, so we go with the default and hope for the 1926 * best. 1927 */ 1928 geometry->stride_size_in_pages = 64; 1929 1930 /* 1931 * Set the search area stride exponent. 1932 * 1933 * In principle, we should be reading this from the OTP bits, since 1934 * that's where the ROM is going to get it. In fact, we don't have any 1935 * way to read the OTP bits, so we go with the default and hope for the 1936 * best. 1937 */ 1938 geometry->search_area_stride_exponent = 2; 1939 return 0; 1940 } 1941 1942 static const char *fingerprint = "STMP"; 1943 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) 1944 { 1945 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1946 struct device *dev = this->dev; 1947 struct nand_chip *chip = &this->nand; 1948 unsigned int search_area_size_in_strides; 1949 unsigned int stride; 1950 unsigned int page; 1951 u8 *buffer = nand_get_data_buf(chip); 1952 int found_an_ncb_fingerprint = false; 1953 int ret; 1954 1955 /* Compute the number of strides in a search area. */ 1956 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1957 1958 nand_select_target(chip, 0); 1959 1960 /* 1961 * Loop through the first search area, looking for the NCB fingerprint. 1962 */ 1963 dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); 1964 1965 for (stride = 0; stride < search_area_size_in_strides; stride++) { 1966 /* Compute the page addresses. */ 1967 page = stride * rom_geo->stride_size_in_pages; 1968 1969 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); 1970 1971 /* 1972 * Read the NCB fingerprint. The fingerprint is four bytes long 1973 * and starts in the 12th byte of the page. 1974 */ 1975 ret = nand_read_page_op(chip, page, 12, buffer, 1976 strlen(fingerprint)); 1977 if (ret) 1978 continue; 1979 1980 /* Look for the fingerprint. */ 1981 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { 1982 found_an_ncb_fingerprint = true; 1983 break; 1984 } 1985 1986 } 1987 1988 nand_deselect_target(chip); 1989 1990 if (found_an_ncb_fingerprint) 1991 dev_dbg(dev, "\tFound a fingerprint\n"); 1992 else 1993 dev_dbg(dev, "\tNo fingerprint found\n"); 1994 return found_an_ncb_fingerprint; 1995 } 1996 1997 /* Writes a transcription stamp. */ 1998 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) 1999 { 2000 struct device *dev = this->dev; 2001 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 2002 struct nand_chip *chip = &this->nand; 2003 struct mtd_info *mtd = nand_to_mtd(chip); 2004 unsigned int block_size_in_pages; 2005 unsigned int search_area_size_in_strides; 2006 unsigned int search_area_size_in_pages; 2007 unsigned int search_area_size_in_blocks; 2008 unsigned int block; 2009 unsigned int stride; 2010 unsigned int page; 2011 u8 *buffer = nand_get_data_buf(chip); 2012 int status; 2013 2014 /* Compute the search area geometry. */ 2015 block_size_in_pages = mtd->erasesize / mtd->writesize; 2016 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 2017 search_area_size_in_pages = search_area_size_in_strides * 2018 rom_geo->stride_size_in_pages; 2019 search_area_size_in_blocks = 2020 (search_area_size_in_pages + (block_size_in_pages - 1)) / 2021 block_size_in_pages; 2022 2023 dev_dbg(dev, "Search Area Geometry :\n"); 2024 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); 2025 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); 2026 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); 2027 2028 nand_select_target(chip, 0); 2029 2030 /* Loop over blocks in the first search area, erasing them. */ 2031 dev_dbg(dev, "Erasing the search area...\n"); 2032 2033 for (block = 0; block < search_area_size_in_blocks; block++) { 2034 /* Erase this block. */ 2035 dev_dbg(dev, "\tErasing block 0x%x\n", block); 2036 status = nand_erase_op(chip, block); 2037 if (status) 2038 dev_err(dev, "[%s] Erase failed.\n", __func__); 2039 } 2040 2041 /* Write the NCB fingerprint into the page buffer. */ 2042 memset(buffer, ~0, mtd->writesize); 2043 memcpy(buffer + 12, fingerprint, strlen(fingerprint)); 2044 2045 /* Loop through the first search area, writing NCB fingerprints. */ 2046 dev_dbg(dev, "Writing NCB fingerprints...\n"); 2047 for (stride = 0; stride < search_area_size_in_strides; stride++) { 2048 /* Compute the page addresses. */ 2049 page = stride * rom_geo->stride_size_in_pages; 2050 2051 /* Write the first page of the current stride. */ 2052 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 2053 2054 status = chip->ecc.write_page_raw(chip, buffer, 0, page); 2055 if (status) 2056 dev_err(dev, "[%s] Write failed.\n", __func__); 2057 } 2058 2059 nand_deselect_target(chip); 2060 2061 return 0; 2062 } 2063 2064 static int mx23_boot_init(struct gpmi_nand_data *this) 2065 { 2066 struct device *dev = this->dev; 2067 struct nand_chip *chip = &this->nand; 2068 struct mtd_info *mtd = nand_to_mtd(chip); 2069 unsigned int block_count; 2070 unsigned int block; 2071 int chipnr; 2072 int page; 2073 loff_t byte; 2074 uint8_t block_mark; 2075 int ret = 0; 2076 2077 /* 2078 * If control arrives here, we can't use block mark swapping, which 2079 * means we're forced to use transcription. First, scan for the 2080 * transcription stamp. If we find it, then we don't have to do 2081 * anything -- the block marks are already transcribed. 2082 */ 2083 if (mx23_check_transcription_stamp(this)) 2084 return 0; 2085 2086 /* 2087 * If control arrives here, we couldn't find a transcription stamp, so 2088 * so we presume the block marks are in the conventional location. 2089 */ 2090 dev_dbg(dev, "Transcribing bad block marks...\n"); 2091 2092 /* Compute the number of blocks in the entire medium. */ 2093 block_count = nanddev_eraseblocks_per_target(&chip->base); 2094 2095 /* 2096 * Loop over all the blocks in the medium, transcribing block marks as 2097 * we go. 2098 */ 2099 for (block = 0; block < block_count; block++) { 2100 /* 2101 * Compute the chip, page and byte addresses for this block's 2102 * conventional mark. 2103 */ 2104 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); 2105 page = block << (chip->phys_erase_shift - chip->page_shift); 2106 byte = block << chip->phys_erase_shift; 2107 2108 /* Send the command to read the conventional block mark. */ 2109 nand_select_target(chip, chipnr); 2110 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark, 2111 1); 2112 nand_deselect_target(chip); 2113 2114 if (ret) 2115 continue; 2116 2117 /* 2118 * Check if the block is marked bad. If so, we need to mark it 2119 * again, but this time the result will be a mark in the 2120 * location where we transcribe block marks. 2121 */ 2122 if (block_mark != 0xff) { 2123 dev_dbg(dev, "Transcribing mark in block %u\n", block); 2124 ret = chip->legacy.block_markbad(chip, byte); 2125 if (ret) 2126 dev_err(dev, 2127 "Failed to mark block bad with ret %d\n", 2128 ret); 2129 } 2130 } 2131 2132 /* Write the stamp that indicates we've transcribed the block marks. */ 2133 mx23_write_transcription_stamp(this); 2134 return 0; 2135 } 2136 2137 static int nand_boot_init(struct gpmi_nand_data *this) 2138 { 2139 nand_boot_set_geometry(this); 2140 2141 /* This is ROM arch-specific initilization before the BBT scanning. */ 2142 if (GPMI_IS_MX23(this)) 2143 return mx23_boot_init(this); 2144 return 0; 2145 } 2146 2147 static int gpmi_set_geometry(struct gpmi_nand_data *this) 2148 { 2149 int ret; 2150 2151 /* Free the temporary DMA memory for reading ID. */ 2152 gpmi_free_dma_buffer(this); 2153 2154 /* Set up the NFC geometry which is used by BCH. */ 2155 ret = bch_set_geometry(this); 2156 if (ret) { 2157 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret); 2158 return ret; 2159 } 2160 2161 /* Alloc the new DMA buffers according to the pagesize and oobsize */ 2162 return gpmi_alloc_dma_buffer(this); 2163 } 2164 2165 static int gpmi_init_last(struct gpmi_nand_data *this) 2166 { 2167 struct nand_chip *chip = &this->nand; 2168 struct mtd_info *mtd = nand_to_mtd(chip); 2169 struct nand_ecc_ctrl *ecc = &chip->ecc; 2170 struct bch_geometry *bch_geo = &this->bch_geometry; 2171 int ret; 2172 2173 /* Set up the medium geometry */ 2174 ret = gpmi_set_geometry(this); 2175 if (ret) 2176 return ret; 2177 2178 /* Init the nand_ecc_ctrl{} */ 2179 ecc->read_page = gpmi_ecc_read_page; 2180 ecc->write_page = gpmi_ecc_write_page; 2181 ecc->read_oob = gpmi_ecc_read_oob; 2182 ecc->write_oob = gpmi_ecc_write_oob; 2183 ecc->read_page_raw = gpmi_ecc_read_page_raw; 2184 ecc->write_page_raw = gpmi_ecc_write_page_raw; 2185 ecc->read_oob_raw = gpmi_ecc_read_oob_raw; 2186 ecc->write_oob_raw = gpmi_ecc_write_oob_raw; 2187 ecc->mode = NAND_ECC_HW; 2188 ecc->size = bch_geo->ecc_chunk_size; 2189 ecc->strength = bch_geo->ecc_strength; 2190 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops); 2191 2192 /* 2193 * We only enable the subpage read when: 2194 * (1) the chip is imx6, and 2195 * (2) the size of the ECC parity is byte aligned. 2196 */ 2197 if (GPMI_IS_MX6(this) && 2198 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { 2199 ecc->read_subpage = gpmi_ecc_read_subpage; 2200 chip->options |= NAND_SUBPAGE_READ; 2201 } 2202 2203 return 0; 2204 } 2205 2206 static int gpmi_nand_attach_chip(struct nand_chip *chip) 2207 { 2208 struct gpmi_nand_data *this = nand_get_controller_data(chip); 2209 int ret; 2210 2211 if (chip->bbt_options & NAND_BBT_USE_FLASH) { 2212 chip->bbt_options |= NAND_BBT_NO_OOB; 2213 2214 if (of_property_read_bool(this->dev->of_node, 2215 "fsl,no-blockmark-swap")) 2216 this->swap_block_mark = false; 2217 } 2218 dev_dbg(this->dev, "Blockmark swapping %sabled\n", 2219 this->swap_block_mark ? "en" : "dis"); 2220 2221 ret = gpmi_init_last(this); 2222 if (ret) 2223 return ret; 2224 2225 chip->options |= NAND_SKIP_BBTSCAN; 2226 2227 return 0; 2228 } 2229 2230 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this) 2231 { 2232 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers]; 2233 2234 this->ntransfers++; 2235 2236 if (this->ntransfers == GPMI_MAX_TRANSFERS) 2237 return NULL; 2238 2239 return transfer; 2240 } 2241 2242 static struct dma_async_tx_descriptor *gpmi_chain_command( 2243 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr) 2244 { 2245 struct dma_chan *channel = get_dma_chan(this); 2246 struct dma_async_tx_descriptor *desc; 2247 struct gpmi_transfer *transfer; 2248 int chip = this->nand.cur_cs; 2249 u32 pio[3]; 2250 2251 /* [1] send out the PIO words */ 2252 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) 2253 | BM_GPMI_CTRL0_WORD_LENGTH 2254 | BF_GPMI_CTRL0_CS(chip, this) 2255 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2256 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) 2257 | BM_GPMI_CTRL0_ADDRESS_INCREMENT 2258 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1); 2259 pio[1] = 0; 2260 pio[2] = 0; 2261 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2262 DMA_TRANS_NONE, 0); 2263 if (!desc) 2264 return NULL; 2265 2266 transfer = get_next_transfer(this); 2267 if (!transfer) 2268 return NULL; 2269 2270 transfer->cmdbuf[0] = cmd; 2271 if (naddr) 2272 memcpy(&transfer->cmdbuf[1], addr, naddr); 2273 2274 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1); 2275 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE); 2276 2277 transfer->direction = DMA_TO_DEVICE; 2278 2279 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV, 2280 MXS_DMA_CTRL_WAIT4END); 2281 return desc; 2282 } 2283 2284 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready( 2285 struct gpmi_nand_data *this) 2286 { 2287 struct dma_chan *channel = get_dma_chan(this); 2288 u32 pio[2]; 2289 2290 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY) 2291 | BM_GPMI_CTRL0_WORD_LENGTH 2292 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2293 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2294 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2295 | BF_GPMI_CTRL0_XFER_COUNT(0); 2296 pio[1] = 0; 2297 2298 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE, 2299 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY); 2300 } 2301 2302 static struct dma_async_tx_descriptor *gpmi_chain_data_read( 2303 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct) 2304 { 2305 struct dma_async_tx_descriptor *desc; 2306 struct dma_chan *channel = get_dma_chan(this); 2307 struct gpmi_transfer *transfer; 2308 u32 pio[6] = {}; 2309 2310 transfer = get_next_transfer(this); 2311 if (!transfer) 2312 return NULL; 2313 2314 transfer->direction = DMA_FROM_DEVICE; 2315 2316 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl, 2317 DMA_FROM_DEVICE); 2318 2319 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) 2320 | BM_GPMI_CTRL0_WORD_LENGTH 2321 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2322 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2323 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2324 | BF_GPMI_CTRL0_XFER_COUNT(raw_len); 2325 2326 if (this->bch) { 2327 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 2328 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE) 2329 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 2330 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); 2331 pio[3] = raw_len; 2332 pio[4] = transfer->sgl.dma_address; 2333 pio[5] = this->auxiliary_phys; 2334 } 2335 2336 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2337 DMA_TRANS_NONE, 0); 2338 if (!desc) 2339 return NULL; 2340 2341 if (!this->bch) 2342 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, 2343 DMA_DEV_TO_MEM, 2344 MXS_DMA_CTRL_WAIT4END); 2345 2346 return desc; 2347 } 2348 2349 static struct dma_async_tx_descriptor *gpmi_chain_data_write( 2350 struct gpmi_nand_data *this, const void *buf, int raw_len) 2351 { 2352 struct dma_chan *channel = get_dma_chan(this); 2353 struct dma_async_tx_descriptor *desc; 2354 struct gpmi_transfer *transfer; 2355 u32 pio[6] = {}; 2356 2357 transfer = get_next_transfer(this); 2358 if (!transfer) 2359 return NULL; 2360 2361 transfer->direction = DMA_TO_DEVICE; 2362 2363 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE); 2364 2365 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) 2366 | BM_GPMI_CTRL0_WORD_LENGTH 2367 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2368 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2369 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2370 | BF_GPMI_CTRL0_XFER_COUNT(raw_len); 2371 2372 if (this->bch) { 2373 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 2374 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE) 2375 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | 2376 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); 2377 pio[3] = raw_len; 2378 pio[4] = transfer->sgl.dma_address; 2379 pio[5] = this->auxiliary_phys; 2380 } 2381 2382 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2383 DMA_TRANS_NONE, 2384 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0)); 2385 if (!desc) 2386 return NULL; 2387 2388 if (!this->bch) 2389 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, 2390 DMA_MEM_TO_DEV, 2391 MXS_DMA_CTRL_WAIT4END); 2392 2393 return desc; 2394 } 2395 2396 static int gpmi_nfc_exec_op(struct nand_chip *chip, 2397 const struct nand_operation *op, 2398 bool check_only) 2399 { 2400 const struct nand_op_instr *instr; 2401 struct gpmi_nand_data *this = nand_get_controller_data(chip); 2402 struct dma_async_tx_descriptor *desc = NULL; 2403 int i, ret, buf_len = 0, nbufs = 0; 2404 u8 cmd = 0; 2405 void *buf_read = NULL; 2406 const void *buf_write = NULL; 2407 bool direct = false; 2408 struct completion *completion; 2409 unsigned long to; 2410 2411 this->ntransfers = 0; 2412 for (i = 0; i < GPMI_MAX_TRANSFERS; i++) 2413 this->transfers[i].direction = DMA_NONE; 2414 2415 ret = pm_runtime_get_sync(this->dev); 2416 if (ret < 0) 2417 return ret; 2418 2419 /* 2420 * This driver currently supports only one NAND chip. Plus, dies share 2421 * the same configuration. So once timings have been applied on the 2422 * controller side, they will not change anymore. When the time will 2423 * come, the check on must_apply_timings will have to be dropped. 2424 */ 2425 if (this->hw.must_apply_timings) { 2426 this->hw.must_apply_timings = false; 2427 gpmi_nfc_apply_timings(this); 2428 } 2429 2430 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); 2431 2432 for (i = 0; i < op->ninstrs; i++) { 2433 instr = &op->instrs[i]; 2434 2435 nand_op_trace(" ", instr); 2436 2437 switch (instr->type) { 2438 case NAND_OP_WAITRDY_INSTR: 2439 desc = gpmi_chain_wait_ready(this); 2440 break; 2441 case NAND_OP_CMD_INSTR: 2442 cmd = instr->ctx.cmd.opcode; 2443 2444 /* 2445 * When this command has an address cycle chain it 2446 * together with the address cycle 2447 */ 2448 if (i + 1 != op->ninstrs && 2449 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR) 2450 continue; 2451 2452 desc = gpmi_chain_command(this, cmd, NULL, 0); 2453 2454 break; 2455 case NAND_OP_ADDR_INSTR: 2456 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs, 2457 instr->ctx.addr.naddrs); 2458 break; 2459 case NAND_OP_DATA_OUT_INSTR: 2460 buf_write = instr->ctx.data.buf.out; 2461 buf_len = instr->ctx.data.len; 2462 nbufs++; 2463 2464 desc = gpmi_chain_data_write(this, buf_write, buf_len); 2465 2466 break; 2467 case NAND_OP_DATA_IN_INSTR: 2468 if (!instr->ctx.data.len) 2469 break; 2470 buf_read = instr->ctx.data.buf.in; 2471 buf_len = instr->ctx.data.len; 2472 nbufs++; 2473 2474 desc = gpmi_chain_data_read(this, buf_read, buf_len, 2475 &direct); 2476 break; 2477 } 2478 2479 if (!desc) { 2480 ret = -ENXIO; 2481 goto unmap; 2482 } 2483 } 2484 2485 dev_dbg(this->dev, "%s setup done\n", __func__); 2486 2487 if (nbufs > 1) { 2488 dev_err(this->dev, "Multiple data instructions not supported\n"); 2489 ret = -EINVAL; 2490 goto unmap; 2491 } 2492 2493 if (this->bch) { 2494 writel(this->bch_flashlayout0, 2495 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0); 2496 writel(this->bch_flashlayout1, 2497 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); 2498 } 2499 2500 if (this->bch && buf_read) { 2501 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 2502 this->resources.bch_regs + HW_BCH_CTRL_SET); 2503 completion = &this->bch_done; 2504 } else { 2505 desc->callback = dma_irq_callback; 2506 desc->callback_param = this; 2507 completion = &this->dma_done; 2508 } 2509 2510 init_completion(completion); 2511 2512 dmaengine_submit(desc); 2513 dma_async_issue_pending(get_dma_chan(this)); 2514 2515 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); 2516 if (!to) { 2517 dev_err(this->dev, "DMA timeout, last DMA\n"); 2518 gpmi_dump_info(this); 2519 ret = -ETIMEDOUT; 2520 goto unmap; 2521 } 2522 2523 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 2524 this->resources.bch_regs + HW_BCH_CTRL_CLR); 2525 gpmi_clear_bch(this); 2526 2527 ret = 0; 2528 2529 unmap: 2530 for (i = 0; i < this->ntransfers; i++) { 2531 struct gpmi_transfer *transfer = &this->transfers[i]; 2532 2533 if (transfer->direction != DMA_NONE) 2534 dma_unmap_sg(this->dev, &transfer->sgl, 1, 2535 transfer->direction); 2536 } 2537 2538 if (!ret && buf_read && !direct) 2539 memcpy(buf_read, this->data_buffer_dma, 2540 gpmi_raw_len_to_len(this, buf_len)); 2541 2542 this->bch = false; 2543 2544 pm_runtime_mark_last_busy(this->dev); 2545 pm_runtime_put_autosuspend(this->dev); 2546 2547 return ret; 2548 } 2549 2550 static const struct nand_controller_ops gpmi_nand_controller_ops = { 2551 .attach_chip = gpmi_nand_attach_chip, 2552 .setup_data_interface = gpmi_setup_data_interface, 2553 .exec_op = gpmi_nfc_exec_op, 2554 }; 2555 2556 static int gpmi_nand_init(struct gpmi_nand_data *this) 2557 { 2558 struct nand_chip *chip = &this->nand; 2559 struct mtd_info *mtd = nand_to_mtd(chip); 2560 int ret; 2561 2562 /* init the MTD data structures */ 2563 mtd->name = "gpmi-nand"; 2564 mtd->dev.parent = this->dev; 2565 2566 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ 2567 nand_set_controller_data(chip, this); 2568 nand_set_flash_node(chip, this->pdev->dev.of_node); 2569 chip->legacy.block_markbad = gpmi_block_markbad; 2570 chip->badblock_pattern = &gpmi_bbt_descr; 2571 chip->options |= NAND_NO_SUBPAGE_WRITE; 2572 2573 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 2574 this->swap_block_mark = !GPMI_IS_MX23(this); 2575 2576 /* 2577 * Allocate a temporary DMA buffer for reading ID in the 2578 * nand_scan_ident(). 2579 */ 2580 this->bch_geometry.payload_size = 1024; 2581 this->bch_geometry.auxiliary_size = 128; 2582 ret = gpmi_alloc_dma_buffer(this); 2583 if (ret) 2584 goto err_out; 2585 2586 nand_controller_init(&this->base); 2587 this->base.ops = &gpmi_nand_controller_ops; 2588 chip->controller = &this->base; 2589 2590 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1); 2591 if (ret) 2592 goto err_out; 2593 2594 ret = nand_boot_init(this); 2595 if (ret) 2596 goto err_nand_cleanup; 2597 ret = nand_create_bbt(chip); 2598 if (ret) 2599 goto err_nand_cleanup; 2600 2601 ret = mtd_device_register(mtd, NULL, 0); 2602 if (ret) 2603 goto err_nand_cleanup; 2604 return 0; 2605 2606 err_nand_cleanup: 2607 nand_cleanup(chip); 2608 err_out: 2609 gpmi_free_dma_buffer(this); 2610 return ret; 2611 } 2612 2613 static const struct of_device_id gpmi_nand_id_table[] = { 2614 { 2615 .compatible = "fsl,imx23-gpmi-nand", 2616 .data = &gpmi_devdata_imx23, 2617 }, { 2618 .compatible = "fsl,imx28-gpmi-nand", 2619 .data = &gpmi_devdata_imx28, 2620 }, { 2621 .compatible = "fsl,imx6q-gpmi-nand", 2622 .data = &gpmi_devdata_imx6q, 2623 }, { 2624 .compatible = "fsl,imx6sx-gpmi-nand", 2625 .data = &gpmi_devdata_imx6sx, 2626 }, { 2627 .compatible = "fsl,imx7d-gpmi-nand", 2628 .data = &gpmi_devdata_imx7d, 2629 }, {} 2630 }; 2631 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 2632 2633 static int gpmi_nand_probe(struct platform_device *pdev) 2634 { 2635 struct gpmi_nand_data *this; 2636 const struct of_device_id *of_id; 2637 int ret; 2638 2639 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); 2640 if (!this) 2641 return -ENOMEM; 2642 2643 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); 2644 if (of_id) { 2645 this->devdata = of_id->data; 2646 } else { 2647 dev_err(&pdev->dev, "Failed to find the right device id.\n"); 2648 return -ENODEV; 2649 } 2650 2651 platform_set_drvdata(pdev, this); 2652 this->pdev = pdev; 2653 this->dev = &pdev->dev; 2654 2655 ret = acquire_resources(this); 2656 if (ret) 2657 goto exit_acquire_resources; 2658 2659 ret = __gpmi_enable_clk(this, true); 2660 if (ret) 2661 goto exit_nfc_init; 2662 2663 pm_runtime_set_autosuspend_delay(&pdev->dev, 500); 2664 pm_runtime_use_autosuspend(&pdev->dev); 2665 pm_runtime_set_active(&pdev->dev); 2666 pm_runtime_enable(&pdev->dev); 2667 pm_runtime_get_sync(&pdev->dev); 2668 2669 ret = gpmi_init(this); 2670 if (ret) 2671 goto exit_nfc_init; 2672 2673 ret = gpmi_nand_init(this); 2674 if (ret) 2675 goto exit_nfc_init; 2676 2677 pm_runtime_mark_last_busy(&pdev->dev); 2678 pm_runtime_put_autosuspend(&pdev->dev); 2679 2680 dev_info(this->dev, "driver registered.\n"); 2681 2682 return 0; 2683 2684 exit_nfc_init: 2685 pm_runtime_put(&pdev->dev); 2686 pm_runtime_disable(&pdev->dev); 2687 release_resources(this); 2688 exit_acquire_resources: 2689 2690 return ret; 2691 } 2692 2693 static int gpmi_nand_remove(struct platform_device *pdev) 2694 { 2695 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 2696 2697 pm_runtime_put_sync(&pdev->dev); 2698 pm_runtime_disable(&pdev->dev); 2699 2700 nand_release(&this->nand); 2701 gpmi_free_dma_buffer(this); 2702 release_resources(this); 2703 return 0; 2704 } 2705 2706 #ifdef CONFIG_PM_SLEEP 2707 static int gpmi_pm_suspend(struct device *dev) 2708 { 2709 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2710 2711 release_dma_channels(this); 2712 return 0; 2713 } 2714 2715 static int gpmi_pm_resume(struct device *dev) 2716 { 2717 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2718 int ret; 2719 2720 ret = acquire_dma_channels(this); 2721 if (ret < 0) 2722 return ret; 2723 2724 /* re-init the GPMI registers */ 2725 ret = gpmi_init(this); 2726 if (ret) { 2727 dev_err(this->dev, "Error setting GPMI : %d\n", ret); 2728 return ret; 2729 } 2730 2731 /* Set flag to get timing setup restored for next exec_op */ 2732 if (this->hw.clk_rate) 2733 this->hw.must_apply_timings = true; 2734 2735 /* re-init the BCH registers */ 2736 ret = bch_set_geometry(this); 2737 if (ret) { 2738 dev_err(this->dev, "Error setting BCH : %d\n", ret); 2739 return ret; 2740 } 2741 2742 return 0; 2743 } 2744 #endif /* CONFIG_PM_SLEEP */ 2745 2746 static int __maybe_unused gpmi_runtime_suspend(struct device *dev) 2747 { 2748 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2749 2750 return __gpmi_enable_clk(this, false); 2751 } 2752 2753 static int __maybe_unused gpmi_runtime_resume(struct device *dev) 2754 { 2755 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2756 2757 return __gpmi_enable_clk(this, true); 2758 } 2759 2760 static const struct dev_pm_ops gpmi_pm_ops = { 2761 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) 2762 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) 2763 }; 2764 2765 static struct platform_driver gpmi_nand_driver = { 2766 .driver = { 2767 .name = "gpmi-nand", 2768 .pm = &gpmi_pm_ops, 2769 .of_match_table = gpmi_nand_id_table, 2770 }, 2771 .probe = gpmi_nand_probe, 2772 .remove = gpmi_nand_remove, 2773 }; 2774 module_platform_driver(gpmi_nand_driver); 2775 2776 MODULE_AUTHOR("Freescale Semiconductor, Inc."); 2777 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); 2778 MODULE_LICENSE("GPL"); 2779