1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * davinci_nand.c - NAND Flash Driver for DaVinci family chips 4 * 5 * Copyright © 2006 Texas Instruments. 6 * 7 * Port to 2.6.23 Copyright © 2008 by: 8 * Sander Huijsen <Shuijsen@optelecom-nkf.com> 9 * Troy Kisky <troy.kisky@boundarydevices.com> 10 * Dirk Behme <Dirk.Behme@gmail.com> 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/err.h> 17 #include <linux/iopoll.h> 18 #include <linux/mtd/rawnand.h> 19 #include <linux/mtd/partitions.h> 20 #include <linux/slab.h> 21 #include <linux/of_device.h> 22 #include <linux/of.h> 23 24 #include <linux/platform_data/mtd-davinci.h> 25 #include <linux/platform_data/mtd-davinci-aemif.h> 26 27 /* 28 * This is a device driver for the NAND flash controller found on the 29 * various DaVinci family chips. It handles up to four SoC chipselects, 30 * and some flavors of secondary chipselect (e.g. based on A12) as used 31 * with multichip packages. 32 * 33 * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC 34 * available on chips like the DM355 and OMAP-L137 and needed with the 35 * more error-prone MLC NAND chips. 36 * 37 * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY 38 * outputs in a "wire-AND" configuration, with no per-chip signals. 39 */ 40 struct davinci_nand_info { 41 struct nand_controller controller; 42 struct nand_chip chip; 43 44 struct platform_device *pdev; 45 46 bool is_readmode; 47 48 void __iomem *base; 49 void __iomem *vaddr; 50 51 void __iomem *current_cs; 52 53 uint32_t mask_chipsel; 54 uint32_t mask_ale; 55 uint32_t mask_cle; 56 57 uint32_t core_chipsel; 58 59 struct davinci_aemif_timing *timing; 60 }; 61 62 static DEFINE_SPINLOCK(davinci_nand_lock); 63 static bool ecc4_busy; 64 65 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd) 66 { 67 return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip); 68 } 69 70 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info, 71 int offset) 72 { 73 return __raw_readl(info->base + offset); 74 } 75 76 static inline void davinci_nand_writel(struct davinci_nand_info *info, 77 int offset, unsigned long value) 78 { 79 __raw_writel(value, info->base + offset); 80 } 81 82 /*----------------------------------------------------------------------*/ 83 84 /* 85 * 1-bit hardware ECC ... context maintained for each core chipselect 86 */ 87 88 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd) 89 { 90 struct davinci_nand_info *info = to_davinci_nand(mtd); 91 92 return davinci_nand_readl(info, NANDF1ECC_OFFSET 93 + 4 * info->core_chipsel); 94 } 95 96 static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode) 97 { 98 struct davinci_nand_info *info; 99 uint32_t nandcfr; 100 unsigned long flags; 101 102 info = to_davinci_nand(nand_to_mtd(chip)); 103 104 /* Reset ECC hardware */ 105 nand_davinci_readecc_1bit(nand_to_mtd(chip)); 106 107 spin_lock_irqsave(&davinci_nand_lock, flags); 108 109 /* Restart ECC hardware */ 110 nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET); 111 nandcfr |= BIT(8 + info->core_chipsel); 112 davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr); 113 114 spin_unlock_irqrestore(&davinci_nand_lock, flags); 115 } 116 117 /* 118 * Read hardware ECC value and pack into three bytes 119 */ 120 static int nand_davinci_calculate_1bit(struct nand_chip *chip, 121 const u_char *dat, u_char *ecc_code) 122 { 123 unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip)); 124 unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4); 125 126 /* invert so that erased block ecc is correct */ 127 ecc24 = ~ecc24; 128 ecc_code[0] = (u_char)(ecc24); 129 ecc_code[1] = (u_char)(ecc24 >> 8); 130 ecc_code[2] = (u_char)(ecc24 >> 16); 131 132 return 0; 133 } 134 135 static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat, 136 u_char *read_ecc, u_char *calc_ecc) 137 { 138 uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) | 139 (read_ecc[2] << 16); 140 uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) | 141 (calc_ecc[2] << 16); 142 uint32_t diff = eccCalc ^ eccNand; 143 144 if (diff) { 145 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) { 146 /* Correctable error */ 147 if ((diff >> (12 + 3)) < chip->ecc.size) { 148 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7); 149 return 1; 150 } else { 151 return -EBADMSG; 152 } 153 } else if (!(diff & (diff - 1))) { 154 /* Single bit ECC error in the ECC itself, 155 * nothing to fix */ 156 return 1; 157 } else { 158 /* Uncorrectable error */ 159 return -EBADMSG; 160 } 161 162 } 163 return 0; 164 } 165 166 /*----------------------------------------------------------------------*/ 167 168 /* 169 * 4-bit hardware ECC ... context maintained over entire AEMIF 170 * 171 * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED 172 * since that forces use of a problematic "infix OOB" layout. 173 * Among other things, it trashes manufacturer bad block markers. 174 * Also, and specific to this hardware, it ECC-protects the "prepad" 175 * in the OOB ... while having ECC protection for parts of OOB would 176 * seem useful, the current MTD stack sometimes wants to update the 177 * OOB without recomputing ECC. 178 */ 179 180 static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode) 181 { 182 struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); 183 unsigned long flags; 184 u32 val; 185 186 /* Reset ECC hardware */ 187 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); 188 189 spin_lock_irqsave(&davinci_nand_lock, flags); 190 191 /* Start 4-bit ECC calculation for read/write */ 192 val = davinci_nand_readl(info, NANDFCR_OFFSET); 193 val &= ~(0x03 << 4); 194 val |= (info->core_chipsel << 4) | BIT(12); 195 davinci_nand_writel(info, NANDFCR_OFFSET, val); 196 197 info->is_readmode = (mode == NAND_ECC_READ); 198 199 spin_unlock_irqrestore(&davinci_nand_lock, flags); 200 } 201 202 /* Read raw ECC code after writing to NAND. */ 203 static void 204 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4]) 205 { 206 const u32 mask = 0x03ff03ff; 207 208 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask; 209 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask; 210 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask; 211 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask; 212 } 213 214 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */ 215 static int nand_davinci_calculate_4bit(struct nand_chip *chip, 216 const u_char *dat, u_char *ecc_code) 217 { 218 struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); 219 u32 raw_ecc[4], *p; 220 unsigned i; 221 222 /* After a read, terminate ECC calculation by a dummy read 223 * of some 4-bit ECC register. ECC covers everything that 224 * was read; correct() just uses the hardware state, so 225 * ecc_code is not needed. 226 */ 227 if (info->is_readmode) { 228 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); 229 return 0; 230 } 231 232 /* Pack eight raw 10-bit ecc values into ten bytes, making 233 * two passes which each convert four values (in upper and 234 * lower halves of two 32-bit words) into five bytes. The 235 * ROM boot loader uses this same packing scheme. 236 */ 237 nand_davinci_readecc_4bit(info, raw_ecc); 238 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) { 239 *ecc_code++ = p[0] & 0xff; 240 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc); 241 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0); 242 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0); 243 *ecc_code++ = (p[1] >> 18) & 0xff; 244 } 245 246 return 0; 247 } 248 249 /* Correct up to 4 bits in data we just read, using state left in the 250 * hardware plus the ecc_code computed when it was first written. 251 */ 252 static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data, 253 u_char *ecc_code, u_char *null) 254 { 255 int i; 256 struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); 257 unsigned short ecc10[8]; 258 unsigned short *ecc16; 259 u32 syndrome[4]; 260 u32 ecc_state; 261 unsigned num_errors, corrected; 262 unsigned long timeo; 263 264 /* Unpack ten bytes into eight 10 bit values. We know we're 265 * little-endian, and use type punning for less shifting/masking. 266 */ 267 if (WARN_ON(0x01 & (uintptr_t)ecc_code)) 268 return -EINVAL; 269 ecc16 = (unsigned short *)ecc_code; 270 271 ecc10[0] = (ecc16[0] >> 0) & 0x3ff; 272 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0); 273 ecc10[2] = (ecc16[1] >> 4) & 0x3ff; 274 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc); 275 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300); 276 ecc10[5] = (ecc16[3] >> 2) & 0x3ff; 277 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0); 278 ecc10[7] = (ecc16[4] >> 6) & 0x3ff; 279 280 /* Tell ECC controller about the expected ECC codes. */ 281 for (i = 7; i >= 0; i--) 282 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]); 283 284 /* Allow time for syndrome calculation ... then read it. 285 * A syndrome of all zeroes 0 means no detected errors. 286 */ 287 davinci_nand_readl(info, NANDFSR_OFFSET); 288 nand_davinci_readecc_4bit(info, syndrome); 289 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3])) 290 return 0; 291 292 /* 293 * Clear any previous address calculation by doing a dummy read of an 294 * error address register. 295 */ 296 davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET); 297 298 /* Start address calculation, and wait for it to complete. 299 * We _could_ start reading more data while this is working, 300 * to speed up the overall page read. 301 */ 302 davinci_nand_writel(info, NANDFCR_OFFSET, 303 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13)); 304 305 /* 306 * ECC_STATE field reads 0x3 (Error correction complete) immediately 307 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately 308 * begin trying to poll for the state, you may fall right out of your 309 * loop without any of the correction calculations having taken place. 310 * The recommendation from the hardware team is to initially delay as 311 * long as ECC_STATE reads less than 4. After that, ECC HW has entered 312 * correction state. 313 */ 314 timeo = jiffies + usecs_to_jiffies(100); 315 do { 316 ecc_state = (davinci_nand_readl(info, 317 NANDFSR_OFFSET) >> 8) & 0x0f; 318 cpu_relax(); 319 } while ((ecc_state < 4) && time_before(jiffies, timeo)); 320 321 for (;;) { 322 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET); 323 324 switch ((fsr >> 8) & 0x0f) { 325 case 0: /* no error, should not happen */ 326 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET); 327 return 0; 328 case 1: /* five or more errors detected */ 329 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET); 330 return -EBADMSG; 331 case 2: /* error addresses computed */ 332 case 3: 333 num_errors = 1 + ((fsr >> 16) & 0x03); 334 goto correct; 335 default: /* still working on it */ 336 cpu_relax(); 337 continue; 338 } 339 } 340 341 correct: 342 /* correct each error */ 343 for (i = 0, corrected = 0; i < num_errors; i++) { 344 int error_address, error_value; 345 346 if (i > 1) { 347 error_address = davinci_nand_readl(info, 348 NAND_ERR_ADD2_OFFSET); 349 error_value = davinci_nand_readl(info, 350 NAND_ERR_ERRVAL2_OFFSET); 351 } else { 352 error_address = davinci_nand_readl(info, 353 NAND_ERR_ADD1_OFFSET); 354 error_value = davinci_nand_readl(info, 355 NAND_ERR_ERRVAL1_OFFSET); 356 } 357 358 if (i & 1) { 359 error_address >>= 16; 360 error_value >>= 16; 361 } 362 error_address &= 0x3ff; 363 error_address = (512 + 7) - error_address; 364 365 if (error_address < 512) { 366 data[error_address] ^= error_value; 367 corrected++; 368 } 369 } 370 371 return corrected; 372 } 373 374 /** 375 * nand_read_page_hwecc_oob_first - hw ecc, read oob first 376 * @chip: nand chip info structure 377 * @buf: buffer to store read data 378 * @oob_required: caller requires OOB data read to chip->oob_poi 379 * @page: page number to read 380 * 381 * Hardware ECC for large page chips, require OOB to be read first. For this 382 * ECC mode, the write_page method is re-used from ECC_HW. These methods 383 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with 384 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from 385 * the data area, by overwriting the NAND manufacturer bad block markings. 386 */ 387 static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, 388 uint8_t *buf, 389 int oob_required, int page) 390 { 391 struct mtd_info *mtd = nand_to_mtd(chip); 392 int i, eccsize = chip->ecc.size, ret; 393 int eccbytes = chip->ecc.bytes; 394 int eccsteps = chip->ecc.steps; 395 uint8_t *p = buf; 396 uint8_t *ecc_code = chip->ecc.code_buf; 397 uint8_t *ecc_calc = chip->ecc.calc_buf; 398 unsigned int max_bitflips = 0; 399 400 /* Read the OOB area first */ 401 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 402 if (ret) 403 return ret; 404 405 ret = nand_read_page_op(chip, page, 0, NULL, 0); 406 if (ret) 407 return ret; 408 409 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 410 chip->ecc.total); 411 if (ret) 412 return ret; 413 414 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 415 int stat; 416 417 chip->ecc.hwctl(chip, NAND_ECC_READ); 418 419 ret = nand_read_data_op(chip, p, eccsize, false, false); 420 if (ret) 421 return ret; 422 423 chip->ecc.calculate(chip, p, &ecc_calc[i]); 424 425 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 426 if (stat == -EBADMSG && 427 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 428 /* check for empty pages with bitflips */ 429 stat = nand_check_erased_ecc_chunk(p, eccsize, 430 &ecc_code[i], 431 eccbytes, NULL, 0, 432 chip->ecc.strength); 433 } 434 435 if (stat < 0) { 436 mtd->ecc_stats.failed++; 437 } else { 438 mtd->ecc_stats.corrected += stat; 439 max_bitflips = max_t(unsigned int, max_bitflips, stat); 440 } 441 } 442 return max_bitflips; 443 } 444 445 /*----------------------------------------------------------------------*/ 446 447 /* An ECC layout for using 4-bit ECC with small-page flash, storing 448 * ten ECC bytes plus the manufacturer's bad block marker byte, and 449 * and not overlapping the default BBT markers. 450 */ 451 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section, 452 struct mtd_oob_region *oobregion) 453 { 454 if (section > 2) 455 return -ERANGE; 456 457 if (!section) { 458 oobregion->offset = 0; 459 oobregion->length = 5; 460 } else if (section == 1) { 461 oobregion->offset = 6; 462 oobregion->length = 2; 463 } else { 464 oobregion->offset = 13; 465 oobregion->length = 3; 466 } 467 468 return 0; 469 } 470 471 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section, 472 struct mtd_oob_region *oobregion) 473 { 474 if (section > 1) 475 return -ERANGE; 476 477 if (!section) { 478 oobregion->offset = 8; 479 oobregion->length = 5; 480 } else { 481 oobregion->offset = 16; 482 oobregion->length = mtd->oobsize - 16; 483 } 484 485 return 0; 486 } 487 488 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = { 489 .ecc = hwecc4_ooblayout_small_ecc, 490 .free = hwecc4_ooblayout_small_free, 491 }; 492 493 #if defined(CONFIG_OF) 494 static const struct of_device_id davinci_nand_of_match[] = { 495 {.compatible = "ti,davinci-nand", }, 496 {.compatible = "ti,keystone-nand", }, 497 {}, 498 }; 499 MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 500 501 static struct davinci_nand_pdata 502 *nand_davinci_get_pdata(struct platform_device *pdev) 503 { 504 if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) { 505 struct davinci_nand_pdata *pdata; 506 const char *mode; 507 u32 prop; 508 509 pdata = devm_kzalloc(&pdev->dev, 510 sizeof(struct davinci_nand_pdata), 511 GFP_KERNEL); 512 pdev->dev.platform_data = pdata; 513 if (!pdata) 514 return ERR_PTR(-ENOMEM); 515 if (!of_property_read_u32(pdev->dev.of_node, 516 "ti,davinci-chipselect", &prop)) 517 pdata->core_chipsel = prop; 518 else 519 return ERR_PTR(-EINVAL); 520 521 if (!of_property_read_u32(pdev->dev.of_node, 522 "ti,davinci-mask-ale", &prop)) 523 pdata->mask_ale = prop; 524 if (!of_property_read_u32(pdev->dev.of_node, 525 "ti,davinci-mask-cle", &prop)) 526 pdata->mask_cle = prop; 527 if (!of_property_read_u32(pdev->dev.of_node, 528 "ti,davinci-mask-chipsel", &prop)) 529 pdata->mask_chipsel = prop; 530 if (!of_property_read_string(pdev->dev.of_node, 531 "ti,davinci-ecc-mode", &mode)) { 532 if (!strncmp("none", mode, 4)) 533 pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE; 534 if (!strncmp("soft", mode, 4)) 535 pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 536 if (!strncmp("hw", mode, 2)) 537 pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 538 } 539 if (!of_property_read_u32(pdev->dev.of_node, 540 "ti,davinci-ecc-bits", &prop)) 541 pdata->ecc_bits = prop; 542 543 if (!of_property_read_u32(pdev->dev.of_node, 544 "ti,davinci-nand-buswidth", &prop) && prop == 16) 545 pdata->options |= NAND_BUSWIDTH_16; 546 547 if (of_property_read_bool(pdev->dev.of_node, 548 "ti,davinci-nand-use-bbt")) 549 pdata->bbt_options = NAND_BBT_USE_FLASH; 550 551 /* 552 * Since kernel v4.8, this driver has been fixed to enable 553 * use of 4-bit hardware ECC with subpages and verified on 554 * TI's keystone EVMs (K2L, K2HK and K2E). 555 * However, in the interest of not breaking systems using 556 * existing UBI partitions, sub-page writes are not being 557 * (re)enabled. If you want to use subpage writes on Keystone 558 * platforms (i.e. do not have any existing UBI partitions), 559 * then use "ti,davinci-nand" as the compatible in your 560 * device-tree file. 561 */ 562 if (of_device_is_compatible(pdev->dev.of_node, 563 "ti,keystone-nand")) { 564 pdata->options |= NAND_NO_SUBPAGE_WRITE; 565 } 566 } 567 568 return dev_get_platdata(&pdev->dev); 569 } 570 #else 571 static struct davinci_nand_pdata 572 *nand_davinci_get_pdata(struct platform_device *pdev) 573 { 574 return dev_get_platdata(&pdev->dev); 575 } 576 #endif 577 578 static int davinci_nand_attach_chip(struct nand_chip *chip) 579 { 580 struct mtd_info *mtd = nand_to_mtd(chip); 581 struct davinci_nand_info *info = to_davinci_nand(mtd); 582 struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev); 583 int ret = 0; 584 585 if (IS_ERR(pdata)) 586 return PTR_ERR(pdata); 587 588 switch (info->chip.ecc.engine_type) { 589 case NAND_ECC_ENGINE_TYPE_NONE: 590 pdata->ecc_bits = 0; 591 break; 592 case NAND_ECC_ENGINE_TYPE_SOFT: 593 pdata->ecc_bits = 0; 594 /* 595 * This driver expects Hamming based ECC when engine_type is set 596 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to 597 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo 598 * field to davinci_nand_pdata. 599 */ 600 info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; 601 break; 602 case NAND_ECC_ENGINE_TYPE_ON_HOST: 603 if (pdata->ecc_bits == 4) { 604 int chunks = mtd->writesize / 512; 605 606 if (!chunks || mtd->oobsize < 16) { 607 dev_dbg(&info->pdev->dev, "too small\n"); 608 return -EINVAL; 609 } 610 611 /* 612 * No sanity checks: CPUs must support this, 613 * and the chips may not use NAND_BUSWIDTH_16. 614 */ 615 616 /* No sharing 4-bit hardware between chipselects yet */ 617 spin_lock_irq(&davinci_nand_lock); 618 if (ecc4_busy) 619 ret = -EBUSY; 620 else 621 ecc4_busy = true; 622 spin_unlock_irq(&davinci_nand_lock); 623 624 if (ret == -EBUSY) 625 return ret; 626 627 info->chip.ecc.calculate = nand_davinci_calculate_4bit; 628 info->chip.ecc.correct = nand_davinci_correct_4bit; 629 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit; 630 info->chip.ecc.bytes = 10; 631 info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; 632 info->chip.ecc.algo = NAND_ECC_ALGO_BCH; 633 634 /* 635 * Update ECC layout if needed ... for 1-bit HW ECC, the 636 * default is OK, but it allocates 6 bytes when only 3 637 * are needed (for each 512 bytes). For 4-bit HW ECC, 638 * the default is not usable: 10 bytes needed, not 6. 639 * 640 * For small page chips, preserve the manufacturer's 641 * badblock marking data ... and make sure a flash BBT 642 * table marker fits in the free bytes. 643 */ 644 if (chunks == 1) { 645 mtd_set_ooblayout(mtd, 646 &hwecc4_small_ooblayout_ops); 647 } else if (chunks == 4 || chunks == 8) { 648 mtd_set_ooblayout(mtd, 649 nand_get_large_page_ooblayout()); 650 info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first; 651 } else { 652 return -EIO; 653 } 654 } else { 655 /* 1bit ecc hamming */ 656 info->chip.ecc.calculate = nand_davinci_calculate_1bit; 657 info->chip.ecc.correct = nand_davinci_correct_1bit; 658 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit; 659 info->chip.ecc.bytes = 3; 660 info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; 661 } 662 info->chip.ecc.size = 512; 663 info->chip.ecc.strength = pdata->ecc_bits; 664 break; 665 default: 666 return -EINVAL; 667 } 668 669 return ret; 670 } 671 672 static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf, 673 unsigned int len, bool force_8bit) 674 { 675 u32 alignment = ((uintptr_t)buf | len) & 3; 676 677 if (force_8bit || (alignment & 1)) 678 ioread8_rep(info->current_cs, buf, len); 679 else if (alignment & 3) 680 ioread16_rep(info->current_cs, buf, len >> 1); 681 else 682 ioread32_rep(info->current_cs, buf, len >> 2); 683 } 684 685 static void nand_davinci_data_out(struct davinci_nand_info *info, 686 const void *buf, unsigned int len, 687 bool force_8bit) 688 { 689 u32 alignment = ((uintptr_t)buf | len) & 3; 690 691 if (force_8bit || (alignment & 1)) 692 iowrite8_rep(info->current_cs, buf, len); 693 else if (alignment & 3) 694 iowrite16_rep(info->current_cs, buf, len >> 1); 695 else 696 iowrite32_rep(info->current_cs, buf, len >> 2); 697 } 698 699 static int davinci_nand_exec_instr(struct davinci_nand_info *info, 700 const struct nand_op_instr *instr) 701 { 702 unsigned int i, timeout_us; 703 u32 status; 704 int ret; 705 706 switch (instr->type) { 707 case NAND_OP_CMD_INSTR: 708 iowrite8(instr->ctx.cmd.opcode, 709 info->current_cs + info->mask_cle); 710 break; 711 712 case NAND_OP_ADDR_INSTR: 713 for (i = 0; i < instr->ctx.addr.naddrs; i++) { 714 iowrite8(instr->ctx.addr.addrs[i], 715 info->current_cs + info->mask_ale); 716 } 717 break; 718 719 case NAND_OP_DATA_IN_INSTR: 720 nand_davinci_data_in(info, instr->ctx.data.buf.in, 721 instr->ctx.data.len, 722 instr->ctx.data.force_8bit); 723 break; 724 725 case NAND_OP_DATA_OUT_INSTR: 726 nand_davinci_data_out(info, instr->ctx.data.buf.out, 727 instr->ctx.data.len, 728 instr->ctx.data.force_8bit); 729 break; 730 731 case NAND_OP_WAITRDY_INSTR: 732 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; 733 ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET, 734 status, status & BIT(0), 100, 735 timeout_us); 736 if (ret) 737 return ret; 738 739 break; 740 } 741 742 if (instr->delay_ns) 743 ndelay(instr->delay_ns); 744 745 return 0; 746 } 747 748 static int davinci_nand_exec_op(struct nand_chip *chip, 749 const struct nand_operation *op, 750 bool check_only) 751 { 752 struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); 753 unsigned int i; 754 755 if (check_only) 756 return 0; 757 758 info->current_cs = info->vaddr + (op->cs * info->mask_chipsel); 759 760 for (i = 0; i < op->ninstrs; i++) { 761 int ret; 762 763 ret = davinci_nand_exec_instr(info, &op->instrs[i]); 764 if (ret) 765 return ret; 766 } 767 768 return 0; 769 } 770 771 static const struct nand_controller_ops davinci_nand_controller_ops = { 772 .attach_chip = davinci_nand_attach_chip, 773 .exec_op = davinci_nand_exec_op, 774 }; 775 776 static int nand_davinci_probe(struct platform_device *pdev) 777 { 778 struct davinci_nand_pdata *pdata; 779 struct davinci_nand_info *info; 780 struct resource *res1; 781 struct resource *res2; 782 void __iomem *vaddr; 783 void __iomem *base; 784 int ret; 785 uint32_t val; 786 struct mtd_info *mtd; 787 788 pdata = nand_davinci_get_pdata(pdev); 789 if (IS_ERR(pdata)) 790 return PTR_ERR(pdata); 791 792 /* insist on board-specific configuration */ 793 if (!pdata) 794 return -ENODEV; 795 796 /* which external chipselect will we be managing? */ 797 if (pdata->core_chipsel < 0 || pdata->core_chipsel > 3) 798 return -ENODEV; 799 800 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 801 if (!info) 802 return -ENOMEM; 803 804 platform_set_drvdata(pdev, info); 805 806 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0); 807 res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); 808 if (!res1 || !res2) { 809 dev_err(&pdev->dev, "resource missing\n"); 810 return -EINVAL; 811 } 812 813 vaddr = devm_ioremap_resource(&pdev->dev, res1); 814 if (IS_ERR(vaddr)) 815 return PTR_ERR(vaddr); 816 817 /* 818 * This registers range is used to setup NAND settings. In case with 819 * TI AEMIF driver, the same memory address range is requested already 820 * by AEMIF, so we cannot request it twice, just ioremap. 821 * The AEMIF and NAND drivers not use the same registers in this range. 822 */ 823 base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2)); 824 if (!base) { 825 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2); 826 return -EADDRNOTAVAIL; 827 } 828 829 info->pdev = pdev; 830 info->base = base; 831 info->vaddr = vaddr; 832 833 mtd = nand_to_mtd(&info->chip); 834 mtd->dev.parent = &pdev->dev; 835 nand_set_flash_node(&info->chip, pdev->dev.of_node); 836 837 /* options such as NAND_BBT_USE_FLASH */ 838 info->chip.bbt_options = pdata->bbt_options; 839 /* options such as 16-bit widths */ 840 info->chip.options = pdata->options; 841 info->chip.bbt_td = pdata->bbt_td; 842 info->chip.bbt_md = pdata->bbt_md; 843 info->timing = pdata->timing; 844 845 info->current_cs = info->vaddr; 846 info->core_chipsel = pdata->core_chipsel; 847 info->mask_chipsel = pdata->mask_chipsel; 848 849 /* use nandboot-capable ALE/CLE masks by default */ 850 info->mask_ale = pdata->mask_ale ? : MASK_ALE; 851 info->mask_cle = pdata->mask_cle ? : MASK_CLE; 852 853 /* Use board-specific ECC config */ 854 info->chip.ecc.engine_type = pdata->engine_type; 855 info->chip.ecc.placement = pdata->ecc_placement; 856 857 spin_lock_irq(&davinci_nand_lock); 858 859 /* put CSxNAND into NAND mode */ 860 val = davinci_nand_readl(info, NANDFCR_OFFSET); 861 val |= BIT(info->core_chipsel); 862 davinci_nand_writel(info, NANDFCR_OFFSET, val); 863 864 spin_unlock_irq(&davinci_nand_lock); 865 866 /* Scan to find existence of the device(s) */ 867 nand_controller_init(&info->controller); 868 info->controller.ops = &davinci_nand_controller_ops; 869 info->chip.controller = &info->controller; 870 ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1); 871 if (ret < 0) { 872 dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 873 return ret; 874 } 875 876 if (pdata->parts) 877 ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 878 else 879 ret = mtd_device_register(mtd, NULL, 0); 880 if (ret < 0) 881 goto err_cleanup_nand; 882 883 val = davinci_nand_readl(info, NRCSR_OFFSET); 884 dev_info(&pdev->dev, "controller rev. %d.%d\n", 885 (val >> 8) & 0xff, val & 0xff); 886 887 return 0; 888 889 err_cleanup_nand: 890 nand_cleanup(&info->chip); 891 892 return ret; 893 } 894 895 static int nand_davinci_remove(struct platform_device *pdev) 896 { 897 struct davinci_nand_info *info = platform_get_drvdata(pdev); 898 struct nand_chip *chip = &info->chip; 899 int ret; 900 901 spin_lock_irq(&davinci_nand_lock); 902 if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED) 903 ecc4_busy = false; 904 spin_unlock_irq(&davinci_nand_lock); 905 906 ret = mtd_device_unregister(nand_to_mtd(chip)); 907 WARN_ON(ret); 908 nand_cleanup(chip); 909 910 return 0; 911 } 912 913 static struct platform_driver nand_davinci_driver = { 914 .probe = nand_davinci_probe, 915 .remove = nand_davinci_remove, 916 .driver = { 917 .name = "davinci_nand", 918 .of_match_table = of_match_ptr(davinci_nand_of_match), 919 }, 920 }; 921 MODULE_ALIAS("platform:davinci_nand"); 922 923 module_platform_driver(nand_davinci_driver); 924 925 MODULE_LICENSE("GPL"); 926 MODULE_AUTHOR("Texas Instruments"); 927 MODULE_DESCRIPTION("Davinci NAND flash driver"); 928 929