1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale GPMI NAND Flash Driver 4 * 5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. 6 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 7 */ 8 #include <linux/clk.h> 9 #include <linux/slab.h> 10 #include <linux/sched/task_stack.h> 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/mtd/partitions.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include "gpmi-nand.h" 17 #include "bch-regs.h" 18 19 /* Resource names for the GPMI NAND driver. */ 20 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" 21 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 22 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 23 24 /* add our owner bbt descriptor */ 25 static uint8_t scan_ff_pattern[] = { 0xff }; 26 static struct nand_bbt_descr gpmi_bbt_descr = { 27 .options = 0, 28 .offs = 0, 29 .len = 1, 30 .pattern = scan_ff_pattern 31 }; 32 33 /* 34 * We may change the layout if we can get the ECC info from the datasheet, 35 * else we will use all the (page + OOB). 36 */ 37 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, 38 struct mtd_oob_region *oobregion) 39 { 40 struct nand_chip *chip = mtd_to_nand(mtd); 41 struct gpmi_nand_data *this = nand_get_controller_data(chip); 42 struct bch_geometry *geo = &this->bch_geometry; 43 44 if (section) 45 return -ERANGE; 46 47 oobregion->offset = 0; 48 oobregion->length = geo->page_size - mtd->writesize; 49 50 return 0; 51 } 52 53 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, 54 struct mtd_oob_region *oobregion) 55 { 56 struct nand_chip *chip = mtd_to_nand(mtd); 57 struct gpmi_nand_data *this = nand_get_controller_data(chip); 58 struct bch_geometry *geo = &this->bch_geometry; 59 60 if (section) 61 return -ERANGE; 62 63 /* The available oob size we have. */ 64 if (geo->page_size < mtd->writesize + mtd->oobsize) { 65 oobregion->offset = geo->page_size - mtd->writesize; 66 oobregion->length = mtd->oobsize - oobregion->offset; 67 } 68 69 return 0; 70 } 71 72 static const char * const gpmi_clks_for_mx2x[] = { 73 "gpmi_io", 74 }; 75 76 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { 77 .ecc = gpmi_ooblayout_ecc, 78 .free = gpmi_ooblayout_free, 79 }; 80 81 static const struct gpmi_devdata gpmi_devdata_imx23 = { 82 .type = IS_MX23, 83 .bch_max_ecc_strength = 20, 84 .max_chain_delay = 16000, 85 .clks = gpmi_clks_for_mx2x, 86 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 87 }; 88 89 static const struct gpmi_devdata gpmi_devdata_imx28 = { 90 .type = IS_MX28, 91 .bch_max_ecc_strength = 20, 92 .max_chain_delay = 16000, 93 .clks = gpmi_clks_for_mx2x, 94 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 95 }; 96 97 static const char * const gpmi_clks_for_mx6[] = { 98 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 99 }; 100 101 static const struct gpmi_devdata gpmi_devdata_imx6q = { 102 .type = IS_MX6Q, 103 .bch_max_ecc_strength = 40, 104 .max_chain_delay = 12000, 105 .clks = gpmi_clks_for_mx6, 106 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 107 }; 108 109 static const struct gpmi_devdata gpmi_devdata_imx6sx = { 110 .type = IS_MX6SX, 111 .bch_max_ecc_strength = 62, 112 .max_chain_delay = 12000, 113 .clks = gpmi_clks_for_mx6, 114 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 115 }; 116 117 static const char * const gpmi_clks_for_mx7d[] = { 118 "gpmi_io", "gpmi_bch_apb", 119 }; 120 121 static const struct gpmi_devdata gpmi_devdata_imx7d = { 122 .type = IS_MX7D, 123 .bch_max_ecc_strength = 62, 124 .max_chain_delay = 12000, 125 .clks = gpmi_clks_for_mx7d, 126 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), 127 }; 128 129 static irqreturn_t bch_irq(int irq, void *cookie) 130 { 131 struct gpmi_nand_data *this = cookie; 132 133 gpmi_clear_bch(this); 134 complete(&this->bch_done); 135 return IRQ_HANDLED; 136 } 137 138 /* 139 * Calculate the ECC strength by hand: 140 * E : The ECC strength. 141 * G : the length of Galois Field. 142 * N : The chunk count of per page. 143 * O : the oobsize of the NAND chip. 144 * M : the metasize of per page. 145 * 146 * The formula is : 147 * E * G * N 148 * ------------ <= (O - M) 149 * 8 150 * 151 * So, we get E by: 152 * (O - M) * 8 153 * E <= ------------- 154 * G * N 155 */ 156 static inline int get_ecc_strength(struct gpmi_nand_data *this) 157 { 158 struct bch_geometry *geo = &this->bch_geometry; 159 struct mtd_info *mtd = nand_to_mtd(&this->nand); 160 int ecc_strength; 161 162 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) 163 / (geo->gf_len * geo->ecc_chunk_count); 164 165 /* We need the minor even number. */ 166 return round_down(ecc_strength, 2); 167 } 168 169 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) 170 { 171 struct bch_geometry *geo = &this->bch_geometry; 172 173 /* Do the sanity check. */ 174 if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) { 175 /* The mx23/mx28 only support the GF13. */ 176 if (geo->gf_len == 14) 177 return false; 178 } 179 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength; 180 } 181 182 /* 183 * If we can get the ECC information from the nand chip, we do not 184 * need to calculate them ourselves. 185 * 186 * We may have available oob space in this case. 187 */ 188 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this, 189 unsigned int ecc_strength, 190 unsigned int ecc_step) 191 { 192 struct bch_geometry *geo = &this->bch_geometry; 193 struct nand_chip *chip = &this->nand; 194 struct mtd_info *mtd = nand_to_mtd(chip); 195 unsigned int block_mark_bit_offset; 196 197 switch (ecc_step) { 198 case SZ_512: 199 geo->gf_len = 13; 200 break; 201 case SZ_1K: 202 geo->gf_len = 14; 203 break; 204 default: 205 dev_err(this->dev, 206 "unsupported nand chip. ecc bits : %d, ecc size : %d\n", 207 chip->ecc_strength_ds, chip->ecc_step_ds); 208 return -EINVAL; 209 } 210 geo->ecc_chunk_size = ecc_step; 211 geo->ecc_strength = round_up(ecc_strength, 2); 212 if (!gpmi_check_ecc(this)) 213 return -EINVAL; 214 215 /* Keep the C >= O */ 216 if (geo->ecc_chunk_size < mtd->oobsize) { 217 dev_err(this->dev, 218 "unsupported nand chip. ecc size: %d, oob size : %d\n", 219 ecc_step, mtd->oobsize); 220 return -EINVAL; 221 } 222 223 /* The default value, see comment in the legacy_set_geometry(). */ 224 geo->metadata_size = 10; 225 226 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 227 228 /* 229 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below: 230 * 231 * | P | 232 * |<----------------------------------------------------->| 233 * | | 234 * | (Block Mark) | 235 * | P' | | | | 236 * |<-------------------------------------------->| D | | O' | 237 * | |<---->| |<--->| 238 * V V V V V 239 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 240 * | M | data |E| data |E| data |E| data |E| | 241 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 242 * ^ ^ 243 * | O | 244 * |<------------>| 245 * | | 246 * 247 * P : the page size for BCH module. 248 * E : The ECC strength. 249 * G : the length of Galois Field. 250 * N : The chunk count of per page. 251 * M : the metasize of per page. 252 * C : the ecc chunk size, aka the "data" above. 253 * P': the nand chip's page size. 254 * O : the nand chip's oob size. 255 * O': the free oob. 256 * 257 * The formula for P is : 258 * 259 * E * G * N 260 * P = ------------ + P' + M 261 * 8 262 * 263 * The position of block mark moves forward in the ECC-based view 264 * of page, and the delta is: 265 * 266 * E * G * (N - 1) 267 * D = (---------------- + M) 268 * 8 269 * 270 * Please see the comment in legacy_set_geometry(). 271 * With the condition C >= O , we still can get same result. 272 * So the bit position of the physical block mark within the ECC-based 273 * view of the page is : 274 * (P' - D) * 8 275 */ 276 geo->page_size = mtd->writesize + geo->metadata_size + 277 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 278 279 geo->payload_size = mtd->writesize; 280 281 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); 282 geo->auxiliary_size = ALIGN(geo->metadata_size, 4) 283 + ALIGN(geo->ecc_chunk_count, 4); 284 285 if (!this->swap_block_mark) 286 return 0; 287 288 /* For bit swap. */ 289 block_mark_bit_offset = mtd->writesize * 8 - 290 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 291 + geo->metadata_size * 8); 292 293 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 294 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 295 return 0; 296 } 297 298 static int legacy_set_geometry(struct gpmi_nand_data *this) 299 { 300 struct bch_geometry *geo = &this->bch_geometry; 301 struct mtd_info *mtd = nand_to_mtd(&this->nand); 302 unsigned int metadata_size; 303 unsigned int status_size; 304 unsigned int block_mark_bit_offset; 305 306 /* 307 * The size of the metadata can be changed, though we set it to 10 308 * bytes now. But it can't be too large, because we have to save 309 * enough space for BCH. 310 */ 311 geo->metadata_size = 10; 312 313 /* The default for the length of Galois Field. */ 314 geo->gf_len = 13; 315 316 /* The default for chunk size. */ 317 geo->ecc_chunk_size = 512; 318 while (geo->ecc_chunk_size < mtd->oobsize) { 319 geo->ecc_chunk_size *= 2; /* keep C >= O */ 320 geo->gf_len = 14; 321 } 322 323 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 324 325 /* We use the same ECC strength for all chunks. */ 326 geo->ecc_strength = get_ecc_strength(this); 327 if (!gpmi_check_ecc(this)) { 328 dev_err(this->dev, 329 "ecc strength: %d cannot be supported by the controller (%d)\n" 330 "try to use minimum ecc strength that NAND chip required\n", 331 geo->ecc_strength, 332 this->devdata->bch_max_ecc_strength); 333 return -EINVAL; 334 } 335 336 geo->page_size = mtd->writesize + geo->metadata_size + 337 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 338 geo->payload_size = mtd->writesize; 339 340 /* 341 * The auxiliary buffer contains the metadata and the ECC status. The 342 * metadata is padded to the nearest 32-bit boundary. The ECC status 343 * contains one byte for every ECC chunk, and is also padded to the 344 * nearest 32-bit boundary. 345 */ 346 metadata_size = ALIGN(geo->metadata_size, 4); 347 status_size = ALIGN(geo->ecc_chunk_count, 4); 348 349 geo->auxiliary_size = metadata_size + status_size; 350 geo->auxiliary_status_offset = metadata_size; 351 352 if (!this->swap_block_mark) 353 return 0; 354 355 /* 356 * We need to compute the byte and bit offsets of 357 * the physical block mark within the ECC-based view of the page. 358 * 359 * NAND chip with 2K page shows below: 360 * (Block Mark) 361 * | | 362 * | D | 363 * |<---->| 364 * V V 365 * +---+----------+-+----------+-+----------+-+----------+-+ 366 * | M | data |E| data |E| data |E| data |E| 367 * +---+----------+-+----------+-+----------+-+----------+-+ 368 * 369 * The position of block mark moves forward in the ECC-based view 370 * of page, and the delta is: 371 * 372 * E * G * (N - 1) 373 * D = (---------------- + M) 374 * 8 375 * 376 * With the formula to compute the ECC strength, and the condition 377 * : C >= O (C is the ecc chunk size) 378 * 379 * It's easy to deduce to the following result: 380 * 381 * E * G (O - M) C - M C - M 382 * ----------- <= ------- <= -------- < --------- 383 * 8 N N (N - 1) 384 * 385 * So, we get: 386 * 387 * E * G * (N - 1) 388 * D = (---------------- + M) < C 389 * 8 390 * 391 * The above inequality means the position of block mark 392 * within the ECC-based view of the page is still in the data chunk, 393 * and it's NOT in the ECC bits of the chunk. 394 * 395 * Use the following to compute the bit position of the 396 * physical block mark within the ECC-based view of the page: 397 * (page_size - D) * 8 398 * 399 * --Huang Shijie 400 */ 401 block_mark_bit_offset = mtd->writesize * 8 - 402 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 403 + geo->metadata_size * 8); 404 405 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 406 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 407 return 0; 408 } 409 410 int common_nfc_set_geometry(struct gpmi_nand_data *this) 411 { 412 struct nand_chip *chip = &this->nand; 413 414 if (chip->ecc.strength > 0 && chip->ecc.size > 0) 415 return set_geometry_by_ecc_info(this, chip->ecc.strength, 416 chip->ecc.size); 417 418 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")) 419 || legacy_set_geometry(this)) { 420 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 421 return -EINVAL; 422 423 return set_geometry_by_ecc_info(this, chip->ecc_strength_ds, 424 chip->ecc_step_ds); 425 } 426 427 return 0; 428 } 429 430 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 431 { 432 /* We use the DMA channel 0 to access all the nand chips. */ 433 return this->dma_chans[0]; 434 } 435 436 /* Can we use the upper's buffer directly for DMA? */ 437 bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len, 438 enum dma_data_direction dr) 439 { 440 struct scatterlist *sgl = &this->data_sgl; 441 int ret; 442 443 /* first try to map the upper buffer directly */ 444 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) { 445 sg_init_one(sgl, buf, len); 446 ret = dma_map_sg(this->dev, sgl, 1, dr); 447 if (ret == 0) 448 goto map_fail; 449 450 return true; 451 } 452 453 map_fail: 454 /* We have to use our own DMA buffer. */ 455 sg_init_one(sgl, this->data_buffer_dma, len); 456 457 if (dr == DMA_TO_DEVICE) 458 memcpy(this->data_buffer_dma, buf, len); 459 460 dma_map_sg(this->dev, sgl, 1, dr); 461 462 return false; 463 } 464 465 /* This will be called after the DMA operation is finished. */ 466 static void dma_irq_callback(void *param) 467 { 468 struct gpmi_nand_data *this = param; 469 struct completion *dma_c = &this->dma_done; 470 471 complete(dma_c); 472 } 473 474 int start_dma_without_bch_irq(struct gpmi_nand_data *this, 475 struct dma_async_tx_descriptor *desc) 476 { 477 struct completion *dma_c = &this->dma_done; 478 unsigned long timeout; 479 480 init_completion(dma_c); 481 482 desc->callback = dma_irq_callback; 483 desc->callback_param = this; 484 dmaengine_submit(desc); 485 dma_async_issue_pending(get_dma_chan(this)); 486 487 /* Wait for the interrupt from the DMA block. */ 488 timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); 489 if (!timeout) { 490 dev_err(this->dev, "DMA timeout, last DMA\n"); 491 gpmi_dump_info(this); 492 return -ETIMEDOUT; 493 } 494 return 0; 495 } 496 497 /* 498 * This function is used in BCH reading or BCH writing pages. 499 * It will wait for the BCH interrupt as long as ONE second. 500 * Actually, we must wait for two interrupts : 501 * [1] firstly the DMA interrupt and 502 * [2] secondly the BCH interrupt. 503 */ 504 int start_dma_with_bch_irq(struct gpmi_nand_data *this, 505 struct dma_async_tx_descriptor *desc) 506 { 507 struct completion *bch_c = &this->bch_done; 508 unsigned long timeout; 509 510 /* Prepare to receive an interrupt from the BCH block. */ 511 init_completion(bch_c); 512 513 /* start the DMA */ 514 start_dma_without_bch_irq(this, desc); 515 516 /* Wait for the interrupt from the BCH block. */ 517 timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); 518 if (!timeout) { 519 dev_err(this->dev, "BCH timeout\n"); 520 gpmi_dump_info(this); 521 return -ETIMEDOUT; 522 } 523 return 0; 524 } 525 526 static int acquire_register_block(struct gpmi_nand_data *this, 527 const char *res_name) 528 { 529 struct platform_device *pdev = this->pdev; 530 struct resources *res = &this->resources; 531 struct resource *r; 532 void __iomem *p; 533 534 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 535 p = devm_ioremap_resource(&pdev->dev, r); 536 if (IS_ERR(p)) 537 return PTR_ERR(p); 538 539 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) 540 res->gpmi_regs = p; 541 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) 542 res->bch_regs = p; 543 else 544 dev_err(this->dev, "unknown resource name : %s\n", res_name); 545 546 return 0; 547 } 548 549 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 550 { 551 struct platform_device *pdev = this->pdev; 552 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; 553 struct resource *r; 554 int err; 555 556 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); 557 if (!r) { 558 dev_err(this->dev, "Can't get resource for %s\n", res_name); 559 return -ENODEV; 560 } 561 562 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this); 563 if (err) 564 dev_err(this->dev, "error requesting BCH IRQ\n"); 565 566 return err; 567 } 568 569 static void release_dma_channels(struct gpmi_nand_data *this) 570 { 571 unsigned int i; 572 for (i = 0; i < DMA_CHANS; i++) 573 if (this->dma_chans[i]) { 574 dma_release_channel(this->dma_chans[i]); 575 this->dma_chans[i] = NULL; 576 } 577 } 578 579 static int acquire_dma_channels(struct gpmi_nand_data *this) 580 { 581 struct platform_device *pdev = this->pdev; 582 struct dma_chan *dma_chan; 583 584 /* request dma channel */ 585 dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx"); 586 if (!dma_chan) { 587 dev_err(this->dev, "Failed to request DMA channel.\n"); 588 goto acquire_err; 589 } 590 591 this->dma_chans[0] = dma_chan; 592 return 0; 593 594 acquire_err: 595 release_dma_channels(this); 596 return -EINVAL; 597 } 598 599 static int gpmi_get_clks(struct gpmi_nand_data *this) 600 { 601 struct resources *r = &this->resources; 602 struct clk *clk; 603 int err, i; 604 605 for (i = 0; i < this->devdata->clks_count; i++) { 606 clk = devm_clk_get(this->dev, this->devdata->clks[i]); 607 if (IS_ERR(clk)) { 608 err = PTR_ERR(clk); 609 goto err_clock; 610 } 611 612 r->clock[i] = clk; 613 } 614 615 if (GPMI_IS_MX6(this)) 616 /* 617 * Set the default value for the gpmi clock. 618 * 619 * If you want to use the ONFI nand which is in the 620 * Synchronous Mode, you should change the clock as you need. 621 */ 622 clk_set_rate(r->clock[0], 22000000); 623 624 return 0; 625 626 err_clock: 627 dev_dbg(this->dev, "failed in finding the clocks.\n"); 628 return err; 629 } 630 631 static int acquire_resources(struct gpmi_nand_data *this) 632 { 633 int ret; 634 635 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); 636 if (ret) 637 goto exit_regs; 638 639 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); 640 if (ret) 641 goto exit_regs; 642 643 ret = acquire_bch_irq(this, bch_irq); 644 if (ret) 645 goto exit_regs; 646 647 ret = acquire_dma_channels(this); 648 if (ret) 649 goto exit_regs; 650 651 ret = gpmi_get_clks(this); 652 if (ret) 653 goto exit_clock; 654 return 0; 655 656 exit_clock: 657 release_dma_channels(this); 658 exit_regs: 659 return ret; 660 } 661 662 static void release_resources(struct gpmi_nand_data *this) 663 { 664 release_dma_channels(this); 665 } 666 667 static int send_page_prepare(struct gpmi_nand_data *this, 668 const void *source, unsigned length, 669 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 670 const void **use_virt, dma_addr_t *use_phys) 671 { 672 struct device *dev = this->dev; 673 674 if (virt_addr_valid(source)) { 675 dma_addr_t source_phys; 676 677 source_phys = dma_map_single(dev, (void *)source, length, 678 DMA_TO_DEVICE); 679 if (dma_mapping_error(dev, source_phys)) { 680 if (alt_size < length) { 681 dev_err(dev, "Alternate buffer is too small\n"); 682 return -ENOMEM; 683 } 684 goto map_failed; 685 } 686 *use_virt = source; 687 *use_phys = source_phys; 688 return 0; 689 } 690 map_failed: 691 /* 692 * Copy the content of the source buffer into the alternate 693 * buffer and set up the return values accordingly. 694 */ 695 memcpy(alt_virt, source, length); 696 697 *use_virt = alt_virt; 698 *use_phys = alt_phys; 699 return 0; 700 } 701 702 static void send_page_end(struct gpmi_nand_data *this, 703 const void *source, unsigned length, 704 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 705 const void *used_virt, dma_addr_t used_phys) 706 { 707 struct device *dev = this->dev; 708 if (used_virt == source) 709 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); 710 } 711 712 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) 713 { 714 struct device *dev = this->dev; 715 716 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) 717 dma_free_coherent(dev, this->page_buffer_size, 718 this->page_buffer_virt, 719 this->page_buffer_phys); 720 kfree(this->cmd_buffer); 721 kfree(this->data_buffer_dma); 722 kfree(this->raw_buffer); 723 724 this->cmd_buffer = NULL; 725 this->data_buffer_dma = NULL; 726 this->raw_buffer = NULL; 727 this->page_buffer_virt = NULL; 728 this->page_buffer_size = 0; 729 } 730 731 /* Allocate the DMA buffers */ 732 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) 733 { 734 struct bch_geometry *geo = &this->bch_geometry; 735 struct device *dev = this->dev; 736 struct mtd_info *mtd = nand_to_mtd(&this->nand); 737 738 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ 739 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); 740 if (this->cmd_buffer == NULL) 741 goto error_alloc; 742 743 /* 744 * [2] Allocate a read/write data buffer. 745 * The gpmi_alloc_dma_buffer can be called twice. 746 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer 747 * is called before the NAND identification; and we allocate a 748 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer 749 * is called after. 750 */ 751 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE, 752 GFP_DMA | GFP_KERNEL); 753 if (this->data_buffer_dma == NULL) 754 goto error_alloc; 755 756 /* 757 * [3] Allocate the page buffer. 758 * 759 * Both the payload buffer and the auxiliary buffer must appear on 760 * 32-bit boundaries. We presume the size of the payload buffer is a 761 * power of two and is much larger than four, which guarantees the 762 * auxiliary buffer will appear on a 32-bit boundary. 763 */ 764 this->page_buffer_size = geo->payload_size + geo->auxiliary_size; 765 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, 766 &this->page_buffer_phys, GFP_DMA); 767 if (!this->page_buffer_virt) 768 goto error_alloc; 769 770 this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 771 if (!this->raw_buffer) 772 goto error_alloc; 773 774 /* Slice up the page buffer. */ 775 this->payload_virt = this->page_buffer_virt; 776 this->payload_phys = this->page_buffer_phys; 777 this->auxiliary_virt = this->payload_virt + geo->payload_size; 778 this->auxiliary_phys = this->payload_phys + geo->payload_size; 779 return 0; 780 781 error_alloc: 782 gpmi_free_dma_buffer(this); 783 return -ENOMEM; 784 } 785 786 static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 787 { 788 struct nand_chip *chip = mtd_to_nand(mtd); 789 struct gpmi_nand_data *this = nand_get_controller_data(chip); 790 int ret; 791 792 /* 793 * Every operation begins with a command byte and a series of zero or 794 * more address bytes. These are distinguished by either the Address 795 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 796 * asserted. When MTD is ready to execute the command, it will deassert 797 * both latch enables. 798 * 799 * Rather than run a separate DMA operation for every single byte, we 800 * queue them up and run a single DMA operation for the entire series 801 * of command and data bytes. NAND_CMD_NONE means the END of the queue. 802 */ 803 if ((ctrl & (NAND_ALE | NAND_CLE))) { 804 if (data != NAND_CMD_NONE) 805 this->cmd_buffer[this->command_length++] = data; 806 return; 807 } 808 809 if (!this->command_length) 810 return; 811 812 ret = gpmi_send_command(this); 813 if (ret) 814 dev_err(this->dev, "Chip: %u, Error %d\n", 815 this->current_chip, ret); 816 817 this->command_length = 0; 818 } 819 820 static int gpmi_dev_ready(struct mtd_info *mtd) 821 { 822 struct nand_chip *chip = mtd_to_nand(mtd); 823 struct gpmi_nand_data *this = nand_get_controller_data(chip); 824 825 return gpmi_is_ready(this, this->current_chip); 826 } 827 828 static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) 829 { 830 struct nand_chip *chip = mtd_to_nand(mtd); 831 struct gpmi_nand_data *this = nand_get_controller_data(chip); 832 int ret; 833 834 /* 835 * For power consumption matters, disable/enable the clock each time a 836 * die is selected/unselected. 837 */ 838 if (this->current_chip < 0 && chipnr >= 0) { 839 ret = gpmi_enable_clk(this); 840 if (ret) 841 dev_err(this->dev, "Failed to enable the clock\n"); 842 } else if (this->current_chip >= 0 && chipnr < 0) { 843 ret = gpmi_disable_clk(this); 844 if (ret) 845 dev_err(this->dev, "Failed to disable the clock\n"); 846 } 847 848 /* 849 * This driver currently supports only one NAND chip. Plus, dies share 850 * the same configuration. So once timings have been applied on the 851 * controller side, they will not change anymore. When the time will 852 * come, the check on must_apply_timings will have to be dropped. 853 */ 854 if (chipnr >= 0 && this->hw.must_apply_timings) { 855 this->hw.must_apply_timings = false; 856 gpmi_nfc_apply_timings(this); 857 } 858 859 this->current_chip = chipnr; 860 } 861 862 static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 863 { 864 struct nand_chip *chip = mtd_to_nand(mtd); 865 struct gpmi_nand_data *this = nand_get_controller_data(chip); 866 867 dev_dbg(this->dev, "len is %d\n", len); 868 869 gpmi_read_data(this, buf, len); 870 } 871 872 static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 873 { 874 struct nand_chip *chip = mtd_to_nand(mtd); 875 struct gpmi_nand_data *this = nand_get_controller_data(chip); 876 877 dev_dbg(this->dev, "len is %d\n", len); 878 879 gpmi_send_data(this, buf, len); 880 } 881 882 static uint8_t gpmi_read_byte(struct mtd_info *mtd) 883 { 884 struct nand_chip *chip = mtd_to_nand(mtd); 885 struct gpmi_nand_data *this = nand_get_controller_data(chip); 886 uint8_t *buf = this->data_buffer_dma; 887 888 gpmi_read_buf(mtd, buf, 1); 889 return buf[0]; 890 } 891 892 /* 893 * Handles block mark swapping. 894 * It can be called in swapping the block mark, or swapping it back, 895 * because the the operations are the same. 896 */ 897 static void block_mark_swapping(struct gpmi_nand_data *this, 898 void *payload, void *auxiliary) 899 { 900 struct bch_geometry *nfc_geo = &this->bch_geometry; 901 unsigned char *p; 902 unsigned char *a; 903 unsigned int bit; 904 unsigned char mask; 905 unsigned char from_data; 906 unsigned char from_oob; 907 908 if (!this->swap_block_mark) 909 return; 910 911 /* 912 * If control arrives here, we're swapping. Make some convenience 913 * variables. 914 */ 915 bit = nfc_geo->block_mark_bit_offset; 916 p = payload + nfc_geo->block_mark_byte_offset; 917 a = auxiliary; 918 919 /* 920 * Get the byte from the data area that overlays the block mark. Since 921 * the ECC engine applies its own view to the bits in the page, the 922 * physical block mark won't (in general) appear on a byte boundary in 923 * the data. 924 */ 925 from_data = (p[0] >> bit) | (p[1] << (8 - bit)); 926 927 /* Get the byte from the OOB. */ 928 from_oob = a[0]; 929 930 /* Swap them. */ 931 a[0] = from_data; 932 933 mask = (0x1 << bit) - 1; 934 p[0] = (p[0] & mask) | (from_oob << bit); 935 936 mask = ~0 << bit; 937 p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 938 } 939 940 static int gpmi_ecc_read_page_data(struct nand_chip *chip, 941 uint8_t *buf, int oob_required, 942 int page) 943 { 944 struct gpmi_nand_data *this = nand_get_controller_data(chip); 945 struct bch_geometry *nfc_geo = &this->bch_geometry; 946 struct mtd_info *mtd = nand_to_mtd(chip); 947 dma_addr_t payload_phys; 948 unsigned int i; 949 unsigned char *status; 950 unsigned int max_bitflips = 0; 951 int ret; 952 bool direct = false; 953 954 dev_dbg(this->dev, "page number is : %d\n", page); 955 956 payload_phys = this->payload_phys; 957 958 if (virt_addr_valid(buf)) { 959 dma_addr_t dest_phys; 960 961 dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size, 962 DMA_FROM_DEVICE); 963 if (!dma_mapping_error(this->dev, dest_phys)) { 964 payload_phys = dest_phys; 965 direct = true; 966 } 967 } 968 969 /* go! */ 970 ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys); 971 972 if (direct) 973 dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size, 974 DMA_FROM_DEVICE); 975 976 if (ret) { 977 dev_err(this->dev, "Error in ECC-based read: %d\n", ret); 978 return ret; 979 } 980 981 /* Loop over status bytes, accumulating ECC status. */ 982 status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset; 983 984 if (!direct) 985 memcpy(buf, this->payload_virt, nfc_geo->payload_size); 986 987 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 988 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 989 continue; 990 991 if (*status == STATUS_UNCORRECTABLE) { 992 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 993 u8 *eccbuf = this->raw_buffer; 994 int offset, bitoffset; 995 int eccbytes; 996 int flips; 997 998 /* Read ECC bytes into our internal raw_buffer */ 999 offset = nfc_geo->metadata_size * 8; 1000 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1); 1001 offset -= eccbits; 1002 bitoffset = offset % 8; 1003 eccbytes = DIV_ROUND_UP(offset + eccbits, 8); 1004 offset /= 8; 1005 eccbytes -= offset; 1006 nand_change_read_column_op(chip, offset, eccbuf, 1007 eccbytes, false); 1008 1009 /* 1010 * ECC data are not byte aligned and we may have 1011 * in-band data in the first and last byte of 1012 * eccbuf. Set non-eccbits to one so that 1013 * nand_check_erased_ecc_chunk() does not count them 1014 * as bitflips. 1015 */ 1016 if (bitoffset) 1017 eccbuf[0] |= GENMASK(bitoffset - 1, 0); 1018 1019 bitoffset = (bitoffset + eccbits) % 8; 1020 if (bitoffset) 1021 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); 1022 1023 /* 1024 * The ECC hardware has an uncorrectable ECC status 1025 * code in case we have bitflips in an erased page. As 1026 * nothing was written into this subpage the ECC is 1027 * obviously wrong and we can not trust it. We assume 1028 * at this point that we are reading an erased page and 1029 * try to correct the bitflips in buffer up to 1030 * ecc_strength bitflips. If this is a page with random 1031 * data, we exceed this number of bitflips and have a 1032 * ECC failure. Otherwise we use the corrected buffer. 1033 */ 1034 if (i == 0) { 1035 /* The first block includes metadata */ 1036 flips = nand_check_erased_ecc_chunk( 1037 buf + i * nfc_geo->ecc_chunk_size, 1038 nfc_geo->ecc_chunk_size, 1039 eccbuf, eccbytes, 1040 this->auxiliary_virt, 1041 nfc_geo->metadata_size, 1042 nfc_geo->ecc_strength); 1043 } else { 1044 flips = nand_check_erased_ecc_chunk( 1045 buf + i * nfc_geo->ecc_chunk_size, 1046 nfc_geo->ecc_chunk_size, 1047 eccbuf, eccbytes, 1048 NULL, 0, 1049 nfc_geo->ecc_strength); 1050 } 1051 1052 if (flips > 0) { 1053 max_bitflips = max_t(unsigned int, max_bitflips, 1054 flips); 1055 mtd->ecc_stats.corrected += flips; 1056 continue; 1057 } 1058 1059 mtd->ecc_stats.failed++; 1060 continue; 1061 } 1062 1063 mtd->ecc_stats.corrected += *status; 1064 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1065 } 1066 1067 /* handle the block mark swapping */ 1068 block_mark_swapping(this, buf, this->auxiliary_virt); 1069 1070 if (oob_required) { 1071 /* 1072 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() 1073 * for details about our policy for delivering the OOB. 1074 * 1075 * We fill the caller's buffer with set bits, and then copy the 1076 * block mark to th caller's buffer. Note that, if block mark 1077 * swapping was necessary, it has already been done, so we can 1078 * rely on the first byte of the auxiliary buffer to contain 1079 * the block mark. 1080 */ 1081 memset(chip->oob_poi, ~0, mtd->oobsize); 1082 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; 1083 } 1084 1085 return max_bitflips; 1086 } 1087 1088 static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1089 uint8_t *buf, int oob_required, int page) 1090 { 1091 nand_read_page_op(chip, page, 0, NULL, 0); 1092 1093 return gpmi_ecc_read_page_data(chip, buf, oob_required, page); 1094 } 1095 1096 /* Fake a virtual small page for the subpage read */ 1097 static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1098 uint32_t offs, uint32_t len, uint8_t *buf, int page) 1099 { 1100 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1101 void __iomem *bch_regs = this->resources.bch_regs; 1102 struct bch_geometry old_geo = this->bch_geometry; 1103 struct bch_geometry *geo = &this->bch_geometry; 1104 int size = chip->ecc.size; /* ECC chunk size */ 1105 int meta, n, page_size; 1106 u32 r1_old, r2_old, r1_new, r2_new; 1107 unsigned int max_bitflips; 1108 int first, last, marker_pos; 1109 int ecc_parity_size; 1110 int col = 0; 1111 int old_swap_block_mark = this->swap_block_mark; 1112 1113 /* The size of ECC parity */ 1114 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1115 1116 /* Align it with the chunk size */ 1117 first = offs / size; 1118 last = (offs + len - 1) / size; 1119 1120 if (this->swap_block_mark) { 1121 /* 1122 * Find the chunk which contains the Block Marker. 1123 * If this chunk is in the range of [first, last], 1124 * we have to read out the whole page. 1125 * Why? since we had swapped the data at the position of Block 1126 * Marker to the metadata which is bound with the chunk 0. 1127 */ 1128 marker_pos = geo->block_mark_byte_offset / size; 1129 if (last >= marker_pos && first <= marker_pos) { 1130 dev_dbg(this->dev, 1131 "page:%d, first:%d, last:%d, marker at:%d\n", 1132 page, first, last, marker_pos); 1133 return gpmi_ecc_read_page(mtd, chip, buf, 0, page); 1134 } 1135 } 1136 1137 meta = geo->metadata_size; 1138 if (first) { 1139 col = meta + (size + ecc_parity_size) * first; 1140 meta = 0; 1141 buf = buf + first * size; 1142 } 1143 1144 nand_read_page_op(chip, page, col, NULL, 0); 1145 1146 /* Save the old environment */ 1147 r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0); 1148 r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1); 1149 1150 /* change the BCH registers and bch_geometry{} */ 1151 n = last - first + 1; 1152 page_size = meta + (size + ecc_parity_size) * n; 1153 1154 r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS | 1155 BM_BCH_FLASH0LAYOUT0_META_SIZE); 1156 r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) 1157 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta); 1158 writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0); 1159 1160 r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE; 1161 r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size); 1162 writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1); 1163 1164 geo->ecc_chunk_count = n; 1165 geo->payload_size = n * size; 1166 geo->page_size = page_size; 1167 geo->auxiliary_status_offset = ALIGN(meta, 4); 1168 1169 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", 1170 page, offs, len, col, first, n, page_size); 1171 1172 /* Read the subpage now */ 1173 this->swap_block_mark = false; 1174 max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page); 1175 1176 /* Restore */ 1177 writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); 1178 writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1); 1179 this->bch_geometry = old_geo; 1180 this->swap_block_mark = old_swap_block_mark; 1181 1182 return max_bitflips; 1183 } 1184 1185 static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1186 const uint8_t *buf, int oob_required, int page) 1187 { 1188 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1189 struct bch_geometry *nfc_geo = &this->bch_geometry; 1190 const void *payload_virt; 1191 dma_addr_t payload_phys; 1192 const void *auxiliary_virt; 1193 dma_addr_t auxiliary_phys; 1194 int ret; 1195 1196 dev_dbg(this->dev, "ecc write page.\n"); 1197 1198 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 1199 1200 if (this->swap_block_mark) { 1201 /* 1202 * If control arrives here, we're doing block mark swapping. 1203 * Since we can't modify the caller's buffers, we must copy them 1204 * into our own. 1205 */ 1206 memcpy(this->payload_virt, buf, mtd->writesize); 1207 payload_virt = this->payload_virt; 1208 payload_phys = this->payload_phys; 1209 1210 memcpy(this->auxiliary_virt, chip->oob_poi, 1211 nfc_geo->auxiliary_size); 1212 auxiliary_virt = this->auxiliary_virt; 1213 auxiliary_phys = this->auxiliary_phys; 1214 1215 /* Handle block mark swapping. */ 1216 block_mark_swapping(this, 1217 (void *)payload_virt, (void *)auxiliary_virt); 1218 } else { 1219 /* 1220 * If control arrives here, we're not doing block mark swapping, 1221 * so we can to try and use the caller's buffers. 1222 */ 1223 ret = send_page_prepare(this, 1224 buf, mtd->writesize, 1225 this->payload_virt, this->payload_phys, 1226 nfc_geo->payload_size, 1227 &payload_virt, &payload_phys); 1228 if (ret) { 1229 dev_err(this->dev, "Inadequate payload DMA buffer\n"); 1230 return 0; 1231 } 1232 1233 ret = send_page_prepare(this, 1234 chip->oob_poi, mtd->oobsize, 1235 this->auxiliary_virt, this->auxiliary_phys, 1236 nfc_geo->auxiliary_size, 1237 &auxiliary_virt, &auxiliary_phys); 1238 if (ret) { 1239 dev_err(this->dev, "Inadequate auxiliary DMA buffer\n"); 1240 goto exit_auxiliary; 1241 } 1242 } 1243 1244 /* Ask the NFC. */ 1245 ret = gpmi_send_page(this, payload_phys, auxiliary_phys); 1246 if (ret) 1247 dev_err(this->dev, "Error in ECC-based write: %d\n", ret); 1248 1249 if (!this->swap_block_mark) { 1250 send_page_end(this, chip->oob_poi, mtd->oobsize, 1251 this->auxiliary_virt, this->auxiliary_phys, 1252 nfc_geo->auxiliary_size, 1253 auxiliary_virt, auxiliary_phys); 1254 exit_auxiliary: 1255 send_page_end(this, buf, mtd->writesize, 1256 this->payload_virt, this->payload_phys, 1257 nfc_geo->payload_size, 1258 payload_virt, payload_phys); 1259 } 1260 1261 if (ret) 1262 return ret; 1263 1264 return nand_prog_page_end_op(chip); 1265 } 1266 1267 /* 1268 * There are several places in this driver where we have to handle the OOB and 1269 * block marks. This is the function where things are the most complicated, so 1270 * this is where we try to explain it all. All the other places refer back to 1271 * here. 1272 * 1273 * These are the rules, in order of decreasing importance: 1274 * 1275 * 1) Nothing the caller does can be allowed to imperil the block mark. 1276 * 1277 * 2) In read operations, the first byte of the OOB we return must reflect the 1278 * true state of the block mark, no matter where that block mark appears in 1279 * the physical page. 1280 * 1281 * 3) ECC-based read operations return an OOB full of set bits (since we never 1282 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 1283 * return). 1284 * 1285 * 4) "Raw" read operations return a direct view of the physical bytes in the 1286 * page, using the conventional definition of which bytes are data and which 1287 * are OOB. This gives the caller a way to see the actual, physical bytes 1288 * in the page, without the distortions applied by our ECC engine. 1289 * 1290 * 1291 * What we do for this specific read operation depends on two questions: 1292 * 1293 * 1) Are we doing a "raw" read, or an ECC-based read? 1294 * 1295 * 2) Are we using block mark swapping or transcription? 1296 * 1297 * There are four cases, illustrated by the following Karnaugh map: 1298 * 1299 * | Raw | ECC-based | 1300 * -------------+-------------------------+-------------------------+ 1301 * | Read the conventional | | 1302 * | OOB at the end of the | | 1303 * Swapping | page and return it. It | | 1304 * | contains exactly what | | 1305 * | we want. | Read the block mark and | 1306 * -------------+-------------------------+ return it in a buffer | 1307 * | Read the conventional | full of set bits. | 1308 * | OOB at the end of the | | 1309 * | page and also the block | | 1310 * Transcribing | mark in the metadata. | | 1311 * | Copy the block mark | | 1312 * | into the first byte of | | 1313 * | the OOB. | | 1314 * -------------+-------------------------+-------------------------+ 1315 * 1316 * Note that we break rule #4 in the Transcribing/Raw case because we're not 1317 * giving an accurate view of the actual, physical bytes in the page (we're 1318 * overwriting the block mark). That's OK because it's more important to follow 1319 * rule #2. 1320 * 1321 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1322 * easy. When reading a page, for example, the NAND Flash MTD code calls our 1323 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1324 * ECC-based or raw view of the page is implicit in which function it calls 1325 * (there is a similar pair of ECC-based/raw functions for writing). 1326 */ 1327 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1328 int page) 1329 { 1330 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1331 1332 dev_dbg(this->dev, "page number is %d\n", page); 1333 /* clear the OOB buffer */ 1334 memset(chip->oob_poi, ~0, mtd->oobsize); 1335 1336 /* Read out the conventional OOB. */ 1337 nand_read_page_op(chip, page, mtd->writesize, NULL, 0); 1338 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1339 1340 /* 1341 * Now, we want to make sure the block mark is correct. In the 1342 * non-transcribing case (!GPMI_IS_MX23()), we already have it. 1343 * Otherwise, we need to explicitly read it. 1344 */ 1345 if (GPMI_IS_MX23(this)) { 1346 /* Read the block mark into the first byte of the OOB buffer. */ 1347 nand_read_page_op(chip, page, 0, NULL, 0); 1348 chip->oob_poi[0] = chip->read_byte(mtd); 1349 } 1350 1351 return 0; 1352 } 1353 1354 static int 1355 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) 1356 { 1357 struct mtd_oob_region of = { }; 1358 1359 /* Do we have available oob area? */ 1360 mtd_ooblayout_free(mtd, 0, &of); 1361 if (!of.length) 1362 return -EPERM; 1363 1364 if (!nand_is_slc(chip)) 1365 return -EPERM; 1366 1367 return nand_prog_page_op(chip, page, mtd->writesize + of.offset, 1368 chip->oob_poi + of.offset, of.length); 1369 } 1370 1371 /* 1372 * This function reads a NAND page without involving the ECC engine (no HW 1373 * ECC correction). 1374 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1375 * inline (interleaved with payload DATA), and do not align data chunk on 1376 * byte boundaries. 1377 * We thus need to take care moving the payload data and ECC bits stored in the 1378 * page into the provided buffers, which is why we're using gpmi_copy_bits. 1379 * 1380 * See set_geometry_by_ecc_info inline comments to have a full description 1381 * of the layout used by the GPMI controller. 1382 */ 1383 static int gpmi_ecc_read_page_raw(struct mtd_info *mtd, 1384 struct nand_chip *chip, uint8_t *buf, 1385 int oob_required, int page) 1386 { 1387 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1388 struct bch_geometry *nfc_geo = &this->bch_geometry; 1389 int eccsize = nfc_geo->ecc_chunk_size; 1390 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1391 u8 *tmp_buf = this->raw_buffer; 1392 size_t src_bit_off; 1393 size_t oob_bit_off; 1394 size_t oob_byte_off; 1395 uint8_t *oob = chip->oob_poi; 1396 int step; 1397 1398 nand_read_page_op(chip, page, 0, tmp_buf, 1399 mtd->writesize + mtd->oobsize); 1400 1401 /* 1402 * If required, swap the bad block marker and the data stored in the 1403 * metadata section, so that we don't wrongly consider a block as bad. 1404 * 1405 * See the layout description for a detailed explanation on why this 1406 * is needed. 1407 */ 1408 if (this->swap_block_mark) 1409 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1410 1411 /* 1412 * Copy the metadata section into the oob buffer (this section is 1413 * guaranteed to be aligned on a byte boundary). 1414 */ 1415 if (oob_required) 1416 memcpy(oob, tmp_buf, nfc_geo->metadata_size); 1417 1418 oob_bit_off = nfc_geo->metadata_size * 8; 1419 src_bit_off = oob_bit_off; 1420 1421 /* Extract interleaved payload data and ECC bits */ 1422 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1423 if (buf) 1424 gpmi_copy_bits(buf, step * eccsize * 8, 1425 tmp_buf, src_bit_off, 1426 eccsize * 8); 1427 src_bit_off += eccsize * 8; 1428 1429 /* Align last ECC block to align a byte boundary */ 1430 if (step == nfc_geo->ecc_chunk_count - 1 && 1431 (oob_bit_off + eccbits) % 8) 1432 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1433 1434 if (oob_required) 1435 gpmi_copy_bits(oob, oob_bit_off, 1436 tmp_buf, src_bit_off, 1437 eccbits); 1438 1439 src_bit_off += eccbits; 1440 oob_bit_off += eccbits; 1441 } 1442 1443 if (oob_required) { 1444 oob_byte_off = oob_bit_off / 8; 1445 1446 if (oob_byte_off < mtd->oobsize) 1447 memcpy(oob + oob_byte_off, 1448 tmp_buf + mtd->writesize + oob_byte_off, 1449 mtd->oobsize - oob_byte_off); 1450 } 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * This function writes a NAND page without involving the ECC engine (no HW 1457 * ECC generation). 1458 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1459 * inline (interleaved with payload DATA), and do not align data chunk on 1460 * byte boundaries. 1461 * We thus need to take care moving the OOB area at the right place in the 1462 * final page, which is why we're using gpmi_copy_bits. 1463 * 1464 * See set_geometry_by_ecc_info inline comments to have a full description 1465 * of the layout used by the GPMI controller. 1466 */ 1467 static int gpmi_ecc_write_page_raw(struct mtd_info *mtd, 1468 struct nand_chip *chip, 1469 const uint8_t *buf, 1470 int oob_required, int page) 1471 { 1472 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1473 struct bch_geometry *nfc_geo = &this->bch_geometry; 1474 int eccsize = nfc_geo->ecc_chunk_size; 1475 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1476 u8 *tmp_buf = this->raw_buffer; 1477 uint8_t *oob = chip->oob_poi; 1478 size_t dst_bit_off; 1479 size_t oob_bit_off; 1480 size_t oob_byte_off; 1481 int step; 1482 1483 /* 1484 * Initialize all bits to 1 in case we don't have a buffer for the 1485 * payload or oob data in order to leave unspecified bits of data 1486 * to their initial state. 1487 */ 1488 if (!buf || !oob_required) 1489 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize); 1490 1491 /* 1492 * First copy the metadata section (stored in oob buffer) at the 1493 * beginning of the page, as imposed by the GPMI layout. 1494 */ 1495 memcpy(tmp_buf, oob, nfc_geo->metadata_size); 1496 oob_bit_off = nfc_geo->metadata_size * 8; 1497 dst_bit_off = oob_bit_off; 1498 1499 /* Interleave payload data and ECC bits */ 1500 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1501 if (buf) 1502 gpmi_copy_bits(tmp_buf, dst_bit_off, 1503 buf, step * eccsize * 8, eccsize * 8); 1504 dst_bit_off += eccsize * 8; 1505 1506 /* Align last ECC block to align a byte boundary */ 1507 if (step == nfc_geo->ecc_chunk_count - 1 && 1508 (oob_bit_off + eccbits) % 8) 1509 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1510 1511 if (oob_required) 1512 gpmi_copy_bits(tmp_buf, dst_bit_off, 1513 oob, oob_bit_off, eccbits); 1514 1515 dst_bit_off += eccbits; 1516 oob_bit_off += eccbits; 1517 } 1518 1519 oob_byte_off = oob_bit_off / 8; 1520 1521 if (oob_required && oob_byte_off < mtd->oobsize) 1522 memcpy(tmp_buf + mtd->writesize + oob_byte_off, 1523 oob + oob_byte_off, mtd->oobsize - oob_byte_off); 1524 1525 /* 1526 * If required, swap the bad block marker and the first byte of the 1527 * metadata section, so that we don't modify the bad block marker. 1528 * 1529 * See the layout description for a detailed explanation on why this 1530 * is needed. 1531 */ 1532 if (this->swap_block_mark) 1533 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1534 1535 return nand_prog_page_op(chip, page, 0, tmp_buf, 1536 mtd->writesize + mtd->oobsize); 1537 } 1538 1539 static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip, 1540 int page) 1541 { 1542 return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page); 1543 } 1544 1545 static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip, 1546 int page) 1547 { 1548 return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page); 1549 } 1550 1551 static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) 1552 { 1553 struct nand_chip *chip = mtd_to_nand(mtd); 1554 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1555 int ret = 0; 1556 uint8_t *block_mark; 1557 int column, page, chipnr; 1558 1559 chipnr = (int)(ofs >> chip->chip_shift); 1560 chip->select_chip(mtd, chipnr); 1561 1562 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0; 1563 1564 /* Write the block mark. */ 1565 block_mark = this->data_buffer_dma; 1566 block_mark[0] = 0; /* bad block marker */ 1567 1568 /* Shift to get page */ 1569 page = (int)(ofs >> chip->page_shift); 1570 1571 ret = nand_prog_page_op(chip, page, column, block_mark, 1); 1572 1573 chip->select_chip(mtd, -1); 1574 1575 return ret; 1576 } 1577 1578 static int nand_boot_set_geometry(struct gpmi_nand_data *this) 1579 { 1580 struct boot_rom_geometry *geometry = &this->rom_geometry; 1581 1582 /* 1583 * Set the boot block stride size. 1584 * 1585 * In principle, we should be reading this from the OTP bits, since 1586 * that's where the ROM is going to get it. In fact, we don't have any 1587 * way to read the OTP bits, so we go with the default and hope for the 1588 * best. 1589 */ 1590 geometry->stride_size_in_pages = 64; 1591 1592 /* 1593 * Set the search area stride exponent. 1594 * 1595 * In principle, we should be reading this from the OTP bits, since 1596 * that's where the ROM is going to get it. In fact, we don't have any 1597 * way to read the OTP bits, so we go with the default and hope for the 1598 * best. 1599 */ 1600 geometry->search_area_stride_exponent = 2; 1601 return 0; 1602 } 1603 1604 static const char *fingerprint = "STMP"; 1605 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) 1606 { 1607 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1608 struct device *dev = this->dev; 1609 struct nand_chip *chip = &this->nand; 1610 struct mtd_info *mtd = nand_to_mtd(chip); 1611 unsigned int search_area_size_in_strides; 1612 unsigned int stride; 1613 unsigned int page; 1614 uint8_t *buffer = chip->data_buf; 1615 int saved_chip_number; 1616 int found_an_ncb_fingerprint = false; 1617 1618 /* Compute the number of strides in a search area. */ 1619 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1620 1621 saved_chip_number = this->current_chip; 1622 chip->select_chip(mtd, 0); 1623 1624 /* 1625 * Loop through the first search area, looking for the NCB fingerprint. 1626 */ 1627 dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); 1628 1629 for (stride = 0; stride < search_area_size_in_strides; stride++) { 1630 /* Compute the page addresses. */ 1631 page = stride * rom_geo->stride_size_in_pages; 1632 1633 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); 1634 1635 /* 1636 * Read the NCB fingerprint. The fingerprint is four bytes long 1637 * and starts in the 12th byte of the page. 1638 */ 1639 nand_read_page_op(chip, page, 12, NULL, 0); 1640 chip->read_buf(mtd, buffer, strlen(fingerprint)); 1641 1642 /* Look for the fingerprint. */ 1643 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { 1644 found_an_ncb_fingerprint = true; 1645 break; 1646 } 1647 1648 } 1649 1650 chip->select_chip(mtd, saved_chip_number); 1651 1652 if (found_an_ncb_fingerprint) 1653 dev_dbg(dev, "\tFound a fingerprint\n"); 1654 else 1655 dev_dbg(dev, "\tNo fingerprint found\n"); 1656 return found_an_ncb_fingerprint; 1657 } 1658 1659 /* Writes a transcription stamp. */ 1660 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) 1661 { 1662 struct device *dev = this->dev; 1663 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1664 struct nand_chip *chip = &this->nand; 1665 struct mtd_info *mtd = nand_to_mtd(chip); 1666 unsigned int block_size_in_pages; 1667 unsigned int search_area_size_in_strides; 1668 unsigned int search_area_size_in_pages; 1669 unsigned int search_area_size_in_blocks; 1670 unsigned int block; 1671 unsigned int stride; 1672 unsigned int page; 1673 uint8_t *buffer = chip->data_buf; 1674 int saved_chip_number; 1675 int status; 1676 1677 /* Compute the search area geometry. */ 1678 block_size_in_pages = mtd->erasesize / mtd->writesize; 1679 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1680 search_area_size_in_pages = search_area_size_in_strides * 1681 rom_geo->stride_size_in_pages; 1682 search_area_size_in_blocks = 1683 (search_area_size_in_pages + (block_size_in_pages - 1)) / 1684 block_size_in_pages; 1685 1686 dev_dbg(dev, "Search Area Geometry :\n"); 1687 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); 1688 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); 1689 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); 1690 1691 /* Select chip 0. */ 1692 saved_chip_number = this->current_chip; 1693 chip->select_chip(mtd, 0); 1694 1695 /* Loop over blocks in the first search area, erasing them. */ 1696 dev_dbg(dev, "Erasing the search area...\n"); 1697 1698 for (block = 0; block < search_area_size_in_blocks; block++) { 1699 /* Erase this block. */ 1700 dev_dbg(dev, "\tErasing block 0x%x\n", block); 1701 status = nand_erase_op(chip, block); 1702 if (status) 1703 dev_err(dev, "[%s] Erase failed.\n", __func__); 1704 } 1705 1706 /* Write the NCB fingerprint into the page buffer. */ 1707 memset(buffer, ~0, mtd->writesize); 1708 memcpy(buffer + 12, fingerprint, strlen(fingerprint)); 1709 1710 /* Loop through the first search area, writing NCB fingerprints. */ 1711 dev_dbg(dev, "Writing NCB fingerprints...\n"); 1712 for (stride = 0; stride < search_area_size_in_strides; stride++) { 1713 /* Compute the page addresses. */ 1714 page = stride * rom_geo->stride_size_in_pages; 1715 1716 /* Write the first page of the current stride. */ 1717 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1718 1719 status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page); 1720 if (status) 1721 dev_err(dev, "[%s] Write failed.\n", __func__); 1722 } 1723 1724 /* Deselect chip 0. */ 1725 chip->select_chip(mtd, saved_chip_number); 1726 return 0; 1727 } 1728 1729 static int mx23_boot_init(struct gpmi_nand_data *this) 1730 { 1731 struct device *dev = this->dev; 1732 struct nand_chip *chip = &this->nand; 1733 struct mtd_info *mtd = nand_to_mtd(chip); 1734 unsigned int block_count; 1735 unsigned int block; 1736 int chipnr; 1737 int page; 1738 loff_t byte; 1739 uint8_t block_mark; 1740 int ret = 0; 1741 1742 /* 1743 * If control arrives here, we can't use block mark swapping, which 1744 * means we're forced to use transcription. First, scan for the 1745 * transcription stamp. If we find it, then we don't have to do 1746 * anything -- the block marks are already transcribed. 1747 */ 1748 if (mx23_check_transcription_stamp(this)) 1749 return 0; 1750 1751 /* 1752 * If control arrives here, we couldn't find a transcription stamp, so 1753 * so we presume the block marks are in the conventional location. 1754 */ 1755 dev_dbg(dev, "Transcribing bad block marks...\n"); 1756 1757 /* Compute the number of blocks in the entire medium. */ 1758 block_count = chip->chipsize >> chip->phys_erase_shift; 1759 1760 /* 1761 * Loop over all the blocks in the medium, transcribing block marks as 1762 * we go. 1763 */ 1764 for (block = 0; block < block_count; block++) { 1765 /* 1766 * Compute the chip, page and byte addresses for this block's 1767 * conventional mark. 1768 */ 1769 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); 1770 page = block << (chip->phys_erase_shift - chip->page_shift); 1771 byte = block << chip->phys_erase_shift; 1772 1773 /* Send the command to read the conventional block mark. */ 1774 chip->select_chip(mtd, chipnr); 1775 nand_read_page_op(chip, page, mtd->writesize, NULL, 0); 1776 block_mark = chip->read_byte(mtd); 1777 chip->select_chip(mtd, -1); 1778 1779 /* 1780 * Check if the block is marked bad. If so, we need to mark it 1781 * again, but this time the result will be a mark in the 1782 * location where we transcribe block marks. 1783 */ 1784 if (block_mark != 0xff) { 1785 dev_dbg(dev, "Transcribing mark in block %u\n", block); 1786 ret = chip->block_markbad(mtd, byte); 1787 if (ret) 1788 dev_err(dev, 1789 "Failed to mark block bad with ret %d\n", 1790 ret); 1791 } 1792 } 1793 1794 /* Write the stamp that indicates we've transcribed the block marks. */ 1795 mx23_write_transcription_stamp(this); 1796 return 0; 1797 } 1798 1799 static int nand_boot_init(struct gpmi_nand_data *this) 1800 { 1801 nand_boot_set_geometry(this); 1802 1803 /* This is ROM arch-specific initilization before the BBT scanning. */ 1804 if (GPMI_IS_MX23(this)) 1805 return mx23_boot_init(this); 1806 return 0; 1807 } 1808 1809 static int gpmi_set_geometry(struct gpmi_nand_data *this) 1810 { 1811 int ret; 1812 1813 /* Free the temporary DMA memory for reading ID. */ 1814 gpmi_free_dma_buffer(this); 1815 1816 /* Set up the NFC geometry which is used by BCH. */ 1817 ret = bch_set_geometry(this); 1818 if (ret) { 1819 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret); 1820 return ret; 1821 } 1822 1823 /* Alloc the new DMA buffers according to the pagesize and oobsize */ 1824 return gpmi_alloc_dma_buffer(this); 1825 } 1826 1827 static int gpmi_init_last(struct gpmi_nand_data *this) 1828 { 1829 struct nand_chip *chip = &this->nand; 1830 struct mtd_info *mtd = nand_to_mtd(chip); 1831 struct nand_ecc_ctrl *ecc = &chip->ecc; 1832 struct bch_geometry *bch_geo = &this->bch_geometry; 1833 int ret; 1834 1835 /* Set up the medium geometry */ 1836 ret = gpmi_set_geometry(this); 1837 if (ret) 1838 return ret; 1839 1840 /* Init the nand_ecc_ctrl{} */ 1841 ecc->read_page = gpmi_ecc_read_page; 1842 ecc->write_page = gpmi_ecc_write_page; 1843 ecc->read_oob = gpmi_ecc_read_oob; 1844 ecc->write_oob = gpmi_ecc_write_oob; 1845 ecc->read_page_raw = gpmi_ecc_read_page_raw; 1846 ecc->write_page_raw = gpmi_ecc_write_page_raw; 1847 ecc->read_oob_raw = gpmi_ecc_read_oob_raw; 1848 ecc->write_oob_raw = gpmi_ecc_write_oob_raw; 1849 ecc->mode = NAND_ECC_HW; 1850 ecc->size = bch_geo->ecc_chunk_size; 1851 ecc->strength = bch_geo->ecc_strength; 1852 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops); 1853 1854 /* 1855 * We only enable the subpage read when: 1856 * (1) the chip is imx6, and 1857 * (2) the size of the ECC parity is byte aligned. 1858 */ 1859 if (GPMI_IS_MX6(this) && 1860 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { 1861 ecc->read_subpage = gpmi_ecc_read_subpage; 1862 chip->options |= NAND_SUBPAGE_READ; 1863 } 1864 1865 return 0; 1866 } 1867 1868 static int gpmi_nand_attach_chip(struct nand_chip *chip) 1869 { 1870 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1871 int ret; 1872 1873 if (chip->bbt_options & NAND_BBT_USE_FLASH) { 1874 chip->bbt_options |= NAND_BBT_NO_OOB; 1875 1876 if (of_property_read_bool(this->dev->of_node, 1877 "fsl,no-blockmark-swap")) 1878 this->swap_block_mark = false; 1879 } 1880 dev_dbg(this->dev, "Blockmark swapping %sabled\n", 1881 this->swap_block_mark ? "en" : "dis"); 1882 1883 ret = gpmi_init_last(this); 1884 if (ret) 1885 return ret; 1886 1887 chip->options |= NAND_SKIP_BBTSCAN; 1888 1889 return 0; 1890 } 1891 1892 static const struct nand_controller_ops gpmi_nand_controller_ops = { 1893 .attach_chip = gpmi_nand_attach_chip, 1894 }; 1895 1896 static int gpmi_nand_init(struct gpmi_nand_data *this) 1897 { 1898 struct nand_chip *chip = &this->nand; 1899 struct mtd_info *mtd = nand_to_mtd(chip); 1900 int ret; 1901 1902 /* init current chip */ 1903 this->current_chip = -1; 1904 1905 /* init the MTD data structures */ 1906 mtd->name = "gpmi-nand"; 1907 mtd->dev.parent = this->dev; 1908 1909 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ 1910 nand_set_controller_data(chip, this); 1911 nand_set_flash_node(chip, this->pdev->dev.of_node); 1912 chip->select_chip = gpmi_select_chip; 1913 chip->setup_data_interface = gpmi_setup_data_interface; 1914 chip->cmd_ctrl = gpmi_cmd_ctrl; 1915 chip->dev_ready = gpmi_dev_ready; 1916 chip->read_byte = gpmi_read_byte; 1917 chip->read_buf = gpmi_read_buf; 1918 chip->write_buf = gpmi_write_buf; 1919 chip->badblock_pattern = &gpmi_bbt_descr; 1920 chip->block_markbad = gpmi_block_markbad; 1921 chip->options |= NAND_NO_SUBPAGE_WRITE; 1922 1923 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 1924 this->swap_block_mark = !GPMI_IS_MX23(this); 1925 1926 /* 1927 * Allocate a temporary DMA buffer for reading ID in the 1928 * nand_scan_ident(). 1929 */ 1930 this->bch_geometry.payload_size = 1024; 1931 this->bch_geometry.auxiliary_size = 128; 1932 ret = gpmi_alloc_dma_buffer(this); 1933 if (ret) 1934 goto err_out; 1935 1936 chip->dummy_controller.ops = &gpmi_nand_controller_ops; 1937 ret = nand_scan(mtd, GPMI_IS_MX6(this) ? 2 : 1); 1938 if (ret) 1939 goto err_out; 1940 1941 ret = nand_boot_init(this); 1942 if (ret) 1943 goto err_nand_cleanup; 1944 ret = nand_create_bbt(chip); 1945 if (ret) 1946 goto err_nand_cleanup; 1947 1948 ret = mtd_device_register(mtd, NULL, 0); 1949 if (ret) 1950 goto err_nand_cleanup; 1951 return 0; 1952 1953 err_nand_cleanup: 1954 nand_cleanup(chip); 1955 err_out: 1956 gpmi_free_dma_buffer(this); 1957 return ret; 1958 } 1959 1960 static const struct of_device_id gpmi_nand_id_table[] = { 1961 { 1962 .compatible = "fsl,imx23-gpmi-nand", 1963 .data = &gpmi_devdata_imx23, 1964 }, { 1965 .compatible = "fsl,imx28-gpmi-nand", 1966 .data = &gpmi_devdata_imx28, 1967 }, { 1968 .compatible = "fsl,imx6q-gpmi-nand", 1969 .data = &gpmi_devdata_imx6q, 1970 }, { 1971 .compatible = "fsl,imx6sx-gpmi-nand", 1972 .data = &gpmi_devdata_imx6sx, 1973 }, { 1974 .compatible = "fsl,imx7d-gpmi-nand", 1975 .data = &gpmi_devdata_imx7d, 1976 }, {} 1977 }; 1978 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 1979 1980 static int gpmi_nand_probe(struct platform_device *pdev) 1981 { 1982 struct gpmi_nand_data *this; 1983 const struct of_device_id *of_id; 1984 int ret; 1985 1986 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); 1987 if (!this) 1988 return -ENOMEM; 1989 1990 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); 1991 if (of_id) { 1992 this->devdata = of_id->data; 1993 } else { 1994 dev_err(&pdev->dev, "Failed to find the right device id.\n"); 1995 return -ENODEV; 1996 } 1997 1998 platform_set_drvdata(pdev, this); 1999 this->pdev = pdev; 2000 this->dev = &pdev->dev; 2001 2002 ret = acquire_resources(this); 2003 if (ret) 2004 goto exit_acquire_resources; 2005 2006 ret = gpmi_init(this); 2007 if (ret) 2008 goto exit_nfc_init; 2009 2010 ret = gpmi_nand_init(this); 2011 if (ret) 2012 goto exit_nfc_init; 2013 2014 dev_info(this->dev, "driver registered.\n"); 2015 2016 return 0; 2017 2018 exit_nfc_init: 2019 release_resources(this); 2020 exit_acquire_resources: 2021 2022 return ret; 2023 } 2024 2025 static int gpmi_nand_remove(struct platform_device *pdev) 2026 { 2027 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 2028 2029 nand_release(nand_to_mtd(&this->nand)); 2030 gpmi_free_dma_buffer(this); 2031 release_resources(this); 2032 return 0; 2033 } 2034 2035 #ifdef CONFIG_PM_SLEEP 2036 static int gpmi_pm_suspend(struct device *dev) 2037 { 2038 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2039 2040 release_dma_channels(this); 2041 return 0; 2042 } 2043 2044 static int gpmi_pm_resume(struct device *dev) 2045 { 2046 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2047 int ret; 2048 2049 ret = acquire_dma_channels(this); 2050 if (ret < 0) 2051 return ret; 2052 2053 /* re-init the GPMI registers */ 2054 ret = gpmi_init(this); 2055 if (ret) { 2056 dev_err(this->dev, "Error setting GPMI : %d\n", ret); 2057 return ret; 2058 } 2059 2060 /* re-init the BCH registers */ 2061 ret = bch_set_geometry(this); 2062 if (ret) { 2063 dev_err(this->dev, "Error setting BCH : %d\n", ret); 2064 return ret; 2065 } 2066 2067 return 0; 2068 } 2069 #endif /* CONFIG_PM_SLEEP */ 2070 2071 static const struct dev_pm_ops gpmi_pm_ops = { 2072 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) 2073 }; 2074 2075 static struct platform_driver gpmi_nand_driver = { 2076 .driver = { 2077 .name = "gpmi-nand", 2078 .pm = &gpmi_pm_ops, 2079 .of_match_table = gpmi_nand_id_table, 2080 }, 2081 .probe = gpmi_nand_probe, 2082 .remove = gpmi_nand_remove, 2083 }; 2084 module_platform_driver(gpmi_nand_driver); 2085 2086 MODULE_AUTHOR("Freescale Semiconductor, Inc."); 2087 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); 2088 MODULE_LICENSE("GPL"); 2089