1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ARM PL35X NAND flash controller driver 4 * 5 * Copyright (C) 2017 Xilinx, Inc 6 * Author: 7 * Miquel Raynal <miquel.raynal@bootlin.com> 8 * Original work (rewritten): 9 * Punnaiah Choudary Kalluri <punnaia@xilinx.com> 10 * Naga Sureshkumar Relli <nagasure@xilinx.com> 11 */ 12 13 #include <linux/amba/bus.h> 14 #include <linux/err.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/ioport.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/moduleparam.h> 23 #include <linux/mtd/mtd.h> 24 #include <linux/mtd/rawnand.h> 25 #include <linux/mtd/partitions.h> 26 #include <linux/of.h> 27 #include <linux/platform_device.h> 28 #include <linux/slab.h> 29 #include <linux/clk.h> 30 31 #define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller" 32 33 /* SMC controller status register (RO) */ 34 #define PL35X_SMC_MEMC_STATUS 0x0 35 #define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6) 36 /* SMC clear config register (WO) */ 37 #define PL35X_SMC_MEMC_CFG_CLR 0xC 38 #define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1) 39 #define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4) 40 #define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6) 41 /* SMC direct command register (WO) */ 42 #define PL35X_SMC_DIRECT_CMD 0x10 43 #define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23) 44 #define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21) 45 /* SMC set cycles register (WO) */ 46 #define PL35X_SMC_CYCLES 0x14 47 #define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0) 48 #define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4) 49 #define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8) 50 #define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11) 51 #define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14) 52 #define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17) 53 #define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20) 54 /* SMC set opmode register (WO) */ 55 #define PL35X_SMC_OPMODE 0x18 56 #define PL35X_SMC_OPMODE_BW_8 0 57 #define PL35X_SMC_OPMODE_BW_16 1 58 /* SMC ECC status register (RO) */ 59 #define PL35X_SMC_ECC_STATUS 0x400 60 #define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6) 61 /* SMC ECC configuration register */ 62 #define PL35X_SMC_ECC_CFG 0x404 63 #define PL35X_SMC_ECC_CFG_MODE_MASK 0xC 64 #define PL35X_SMC_ECC_CFG_MODE_BYPASS 0 65 #define PL35X_SMC_ECC_CFG_MODE_APB BIT(2) 66 #define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3) 67 #define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3 68 /* SMC ECC command 1 register */ 69 #define PL35X_SMC_ECC_CMD1 0x408 70 #define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0) 71 #define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8) 72 #define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16) 73 #define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24) 74 /* SMC ECC command 2 register */ 75 #define PL35X_SMC_ECC_CMD2 0x40C 76 #define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0) 77 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8) 78 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16) 79 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24) 80 /* SMC ECC value registers (RO) */ 81 #define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x))) 82 #define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27)) 83 #define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28)) 84 #define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30)) 85 86 /* NAND AXI interface */ 87 #define PL35X_SMC_CMD_PHASE 0 88 #define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3) 89 #define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11) 90 #define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20) 91 #define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos))) 92 #define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21) 93 #define PL35X_SMC_DATA_PHASE BIT(19) 94 #define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10) 95 #define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21) 96 97 #define PL35X_NAND_MAX_CS 1 98 #define PL35X_NAND_LAST_XFER_SZ 4 99 #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns)) 100 101 #define PL35X_NAND_ECC_BITS_MASK 0xFFF 102 #define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF 103 #define PL35X_NAND_ECC_BIT_OFF_MASK 0x7 104 105 struct pl35x_nand_timings { 106 unsigned int t_rc:4; 107 unsigned int t_wc:4; 108 unsigned int t_rea:3; 109 unsigned int t_wp:3; 110 unsigned int t_clr:3; 111 unsigned int t_ar:3; 112 unsigned int t_rr:4; 113 unsigned int rsvd:8; 114 }; 115 116 struct pl35x_nand { 117 struct list_head node; 118 struct nand_chip chip; 119 unsigned int cs; 120 unsigned int addr_cycles; 121 u32 ecc_cfg; 122 u32 timings; 123 }; 124 125 /** 126 * struct pl35x_nandc - NAND flash controller driver structure 127 * @dev: Kernel device 128 * @conf_regs: SMC configuration registers for command phase 129 * @io_regs: NAND data registers for data phase 130 * @controller: Core NAND controller structure 131 * @chip: NAND chip information structure 132 * @selected_chip: NAND chip currently selected by the controller 133 * @assigned_cs: List of assigned CS 134 * @ecc_buf: Temporary buffer to extract ECC bytes 135 */ 136 struct pl35x_nandc { 137 struct device *dev; 138 void __iomem *conf_regs; 139 void __iomem *io_regs; 140 struct nand_controller controller; 141 struct list_head chips; 142 struct nand_chip *selected_chip; 143 unsigned long assigned_cs; 144 u8 *ecc_buf; 145 }; 146 147 static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl) 148 { 149 return container_of(ctrl, struct pl35x_nandc, controller); 150 } 151 152 static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip) 153 { 154 return container_of(chip, struct pl35x_nand, chip); 155 } 156 157 static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section, 158 struct mtd_oob_region *oobregion) 159 { 160 struct nand_chip *chip = mtd_to_nand(mtd); 161 162 if (section >= chip->ecc.steps) 163 return -ERANGE; 164 165 oobregion->offset = (section * chip->ecc.bytes); 166 oobregion->length = chip->ecc.bytes; 167 168 return 0; 169 } 170 171 static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section, 172 struct mtd_oob_region *oobregion) 173 { 174 struct nand_chip *chip = mtd_to_nand(mtd); 175 176 if (section >= chip->ecc.steps) 177 return -ERANGE; 178 179 oobregion->offset = (section * chip->ecc.bytes) + 8; 180 oobregion->length = 8; 181 182 return 0; 183 } 184 185 static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = { 186 .ecc = pl35x_ecc_ooblayout16_ecc, 187 .free = pl35x_ecc_ooblayout16_free, 188 }; 189 190 /* Generic flash bbt decriptors */ 191 static u8 bbt_pattern[] = { 'B', 'b', 't', '0' }; 192 static u8 mirror_pattern[] = { '1', 't', 'b', 'B' }; 193 194 static struct nand_bbt_descr bbt_main_descr = { 195 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 196 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 197 .offs = 4, 198 .len = 4, 199 .veroffs = 20, 200 .maxblocks = 4, 201 .pattern = bbt_pattern 202 }; 203 204 static struct nand_bbt_descr bbt_mirror_descr = { 205 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 206 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 207 .offs = 4, 208 .len = 4, 209 .veroffs = 20, 210 .maxblocks = 4, 211 .pattern = mirror_pattern 212 }; 213 214 static void pl35x_smc_update_regs(struct pl35x_nandc *nfc) 215 { 216 writel(PL35X_SMC_DIRECT_CMD_NAND_CS | 217 PL35X_SMC_DIRECT_CMD_UPD_REGS, 218 nfc->conf_regs + PL35X_SMC_DIRECT_CMD); 219 } 220 221 static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw) 222 { 223 if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16) 224 return -EINVAL; 225 226 writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE); 227 pl35x_smc_update_regs(nfc); 228 229 return 0; 230 } 231 232 static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc) 233 { 234 writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1, 235 nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR); 236 } 237 238 static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc) 239 { 240 u32 reg; 241 int ret; 242 243 ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg, 244 reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1, 245 10, 1000000); 246 if (ret) 247 dev_err(nfc->dev, 248 "Timeout polling on NAND controller interrupt (0x%x)\n", 249 reg); 250 251 pl35x_smc_clear_irq(nfc); 252 253 return ret; 254 } 255 256 static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc) 257 { 258 u32 reg; 259 int ret; 260 261 ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg, 262 !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY), 263 10, 1000000); 264 if (ret) 265 dev_err(nfc->dev, 266 "Timeout polling on ECC controller interrupt\n"); 267 268 return ret; 269 } 270 271 static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc, 272 struct nand_chip *chip, 273 unsigned int mode) 274 { 275 struct pl35x_nand *plnand; 276 u32 ecc_cfg; 277 278 ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG); 279 ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK; 280 ecc_cfg |= mode; 281 writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); 282 283 if (chip) { 284 plnand = to_pl35x_nand(chip); 285 plnand->ecc_cfg = ecc_cfg; 286 } 287 288 if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS) 289 return pl35x_smc_wait_for_ecc_done(nfc); 290 291 return 0; 292 } 293 294 static void pl35x_smc_force_byte_access(struct nand_chip *chip, 295 bool force_8bit) 296 { 297 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 298 int ret; 299 300 if (!(chip->options & NAND_BUSWIDTH_16)) 301 return; 302 303 if (force_8bit) 304 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8); 305 else 306 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16); 307 308 if (ret) 309 dev_err(nfc->dev, "Error in Buswidth\n"); 310 } 311 312 static void pl35x_nand_select_target(struct nand_chip *chip, 313 unsigned int die_nr) 314 { 315 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 316 struct pl35x_nand *plnand = to_pl35x_nand(chip); 317 318 if (chip == nfc->selected_chip) 319 return; 320 321 /* Setup the timings */ 322 writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES); 323 pl35x_smc_update_regs(nfc); 324 325 /* Configure the ECC engine */ 326 writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); 327 328 nfc->selected_chip = chip; 329 } 330 331 static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in, 332 unsigned int len, bool force_8bit, 333 unsigned int flags, unsigned int last_flags) 334 { 335 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 336 unsigned int buf_end = len / 4; 337 unsigned int in_start = round_down(len, 4); 338 unsigned int data_phase_addr; 339 u32 *buf32 = (u32 *)in; 340 u8 *buf8 = (u8 *)in; 341 int i; 342 343 if (force_8bit) 344 pl35x_smc_force_byte_access(chip, true); 345 346 for (i = 0; i < buf_end; i++) { 347 data_phase_addr = PL35X_SMC_DATA_PHASE + flags; 348 if (i + 1 == buf_end) 349 data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags; 350 351 buf32[i] = readl(nfc->io_regs + data_phase_addr); 352 } 353 354 /* No working extra flags on unaligned data accesses */ 355 for (i = in_start; i < len; i++) 356 buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE); 357 358 if (force_8bit) 359 pl35x_smc_force_byte_access(chip, false); 360 } 361 362 static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out, 363 int len, bool force_8bit, 364 unsigned int flags, 365 unsigned int last_flags) 366 { 367 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 368 unsigned int buf_end = len / 4; 369 unsigned int in_start = round_down(len, 4); 370 const u32 *buf32 = (const u32 *)out; 371 const u8 *buf8 = (const u8 *)out; 372 unsigned int data_phase_addr; 373 int i; 374 375 if (force_8bit) 376 pl35x_smc_force_byte_access(chip, true); 377 378 for (i = 0; i < buf_end; i++) { 379 data_phase_addr = PL35X_SMC_DATA_PHASE + flags; 380 if (i + 1 == buf_end) 381 data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags; 382 383 writel(buf32[i], nfc->io_regs + data_phase_addr); 384 } 385 386 /* No working extra flags on unaligned data accesses */ 387 for (i = in_start; i < len; i++) 388 writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE); 389 390 if (force_8bit) 391 pl35x_smc_force_byte_access(chip, false); 392 } 393 394 static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf, 395 unsigned char *read_ecc, 396 unsigned char *calc_ecc) 397 { 398 unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper; 399 unsigned short calc_ecc_lower, calc_ecc_upper; 400 unsigned short byte_addr, bit_addr; 401 402 read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) & 403 PL35X_NAND_ECC_BITS_MASK; 404 read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) & 405 PL35X_NAND_ECC_BITS_MASK; 406 407 calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) & 408 PL35X_NAND_ECC_BITS_MASK; 409 calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) & 410 PL35X_NAND_ECC_BITS_MASK; 411 412 ecc_odd = read_ecc_lower ^ calc_ecc_lower; 413 ecc_even = read_ecc_upper ^ calc_ecc_upper; 414 415 /* No error */ 416 if (likely(!ecc_odd && !ecc_even)) 417 return 0; 418 419 /* One error in the main data; to be corrected */ 420 if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) { 421 /* Bits [11:3] of error code give the byte offset */ 422 byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK; 423 /* Bits [2:0] of error code give the bit offset */ 424 bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK; 425 /* Toggle the faulty bit */ 426 buf[byte_addr] ^= (BIT(bit_addr)); 427 428 return 1; 429 } 430 431 /* One error in the ECC data; no action needed */ 432 if (hweight32(ecc_odd | ecc_even) == 1) 433 return 1; 434 435 return -EBADMSG; 436 } 437 438 static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg, 439 u8 *ecc_array) 440 { 441 u32 ecc_value = ~ecc_reg; 442 unsigned int ecc_byte; 443 444 for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++) 445 ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte); 446 } 447 448 static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc, 449 struct nand_chip *chip, u8 *read_ecc) 450 { 451 u32 ecc_value; 452 int chunk; 453 454 for (chunk = 0; chunk < chip->ecc.steps; 455 chunk++, read_ecc += chip->ecc.bytes) { 456 ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk)); 457 if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value)) 458 return -EINVAL; 459 460 pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc); 461 } 462 463 return 0; 464 } 465 466 static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc, 467 struct nand_chip *chip, u8 *data, 468 u8 *read_ecc) 469 { 470 struct mtd_info *mtd = nand_to_mtd(chip); 471 unsigned int max_bitflips = 0, chunk; 472 u8 calc_ecc[3]; 473 u32 ecc_value; 474 int stats; 475 476 for (chunk = 0; chunk < chip->ecc.steps; 477 chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) { 478 /* Read ECC value for each chunk */ 479 ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk)); 480 481 if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value)) 482 return -EINVAL; 483 484 if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) { 485 mtd->ecc_stats.failed++; 486 continue; 487 } 488 489 pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc); 490 stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc); 491 if (stats < 0) { 492 mtd->ecc_stats.failed++; 493 } else { 494 mtd->ecc_stats.corrected += stats; 495 max_bitflips = max_t(unsigned int, max_bitflips, stats); 496 } 497 } 498 499 return max_bitflips; 500 } 501 502 static int pl35x_nand_write_page_hwecc(struct nand_chip *chip, 503 const u8 *buf, int oob_required, 504 int page) 505 { 506 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 507 struct pl35x_nand *plnand = to_pl35x_nand(chip); 508 struct mtd_info *mtd = nand_to_mtd(chip); 509 unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2; 510 unsigned int nrows = plnand->addr_cycles; 511 u32 addr1 = 0, addr2 = 0, row; 512 u32 cmd_addr; 513 int i, ret; 514 u8 status; 515 516 ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB); 517 if (ret) 518 return ret; 519 520 cmd_addr = PL35X_SMC_CMD_PHASE | 521 PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) | 522 PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN); 523 524 for (i = 0, row = first_row; row < nrows; i++, row++) { 525 u8 addr = page >> ((i * 8) & 0xFF); 526 527 if (row < 4) 528 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr); 529 else 530 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr); 531 } 532 533 /* Send the command and address cycles */ 534 writel(addr1, nfc->io_regs + cmd_addr); 535 if (plnand->addr_cycles > 4) 536 writel(addr2, nfc->io_regs + cmd_addr); 537 538 /* Write the data with the engine enabled */ 539 pl35x_nand_write_data_op(chip, buf, mtd->writesize, false, 540 0, PL35X_SMC_DATA_PHASE_ECC_LAST); 541 ret = pl35x_smc_wait_for_ecc_done(nfc); 542 if (ret) 543 goto disable_ecc_engine; 544 545 /* Copy the HW calculated ECC bytes in the OOB buffer */ 546 ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf); 547 if (ret) 548 goto disable_ecc_engine; 549 550 if (!oob_required) 551 memset(chip->oob_poi, 0xFF, mtd->oobsize); 552 553 ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 554 0, chip->ecc.total); 555 if (ret) 556 goto disable_ecc_engine; 557 558 /* Write the spare area with ECC bytes */ 559 pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0, 560 PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) | 561 PL35X_SMC_CMD_PHASE_CMD1_VALID | 562 PL35X_SMC_DATA_PHASE_CLEAR_CS); 563 ret = pl35x_smc_wait_for_irq(nfc); 564 if (ret) 565 goto disable_ecc_engine; 566 567 /* Check write status on the chip side */ 568 ret = nand_status_op(chip, &status); 569 if (ret) 570 goto disable_ecc_engine; 571 572 if (status & NAND_STATUS_FAIL) 573 ret = -EIO; 574 575 disable_ecc_engine: 576 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); 577 578 return ret; 579 } 580 581 /* 582 * This functions reads data and checks the data integrity by comparing hardware 583 * generated ECC values and read ECC values from spare area. 584 * 585 * There is a limitation with SMC controller: ECC_LAST must be set on the 586 * last data access to tell the ECC engine not to expect any further data. 587 * In practice, this implies to shrink the last data transfert by eg. 4 bytes, 588 * and doing a last 4-byte transfer with the additional bit set. The last block 589 * should be aligned with the end of an ECC block. Because of this limitation, 590 * it is not possible to use the core routines. 591 */ 592 static int pl35x_nand_read_page_hwecc(struct nand_chip *chip, 593 u8 *buf, int oob_required, int page) 594 { 595 const struct nand_sdr_timings *sdr = 596 nand_get_sdr_timings(nand_get_interface_config(chip)); 597 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 598 struct pl35x_nand *plnand = to_pl35x_nand(chip); 599 struct mtd_info *mtd = nand_to_mtd(chip); 600 unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2; 601 unsigned int nrows = plnand->addr_cycles; 602 unsigned int addr1 = 0, addr2 = 0, row; 603 u32 cmd_addr; 604 int i, ret; 605 606 ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB); 607 if (ret) 608 return ret; 609 610 cmd_addr = PL35X_SMC_CMD_PHASE | 611 PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) | 612 PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) | 613 PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) | 614 PL35X_SMC_CMD_PHASE_CMD1_VALID; 615 616 for (i = 0, row = first_row; row < nrows; i++, row++) { 617 u8 addr = page >> ((i * 8) & 0xFF); 618 619 if (row < 4) 620 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr); 621 else 622 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr); 623 } 624 625 /* Send the command and address cycles */ 626 writel(addr1, nfc->io_regs + cmd_addr); 627 if (plnand->addr_cycles > 4) 628 writel(addr2, nfc->io_regs + cmd_addr); 629 630 /* Wait the data to be available in the NAND cache */ 631 ndelay(PSEC_TO_NSEC(sdr->tRR_min)); 632 ret = pl35x_smc_wait_for_irq(nfc); 633 if (ret) 634 goto disable_ecc_engine; 635 636 /* Retrieve the raw data with the engine enabled */ 637 pl35x_nand_read_data_op(chip, buf, mtd->writesize, false, 638 0, PL35X_SMC_DATA_PHASE_ECC_LAST); 639 ret = pl35x_smc_wait_for_ecc_done(nfc); 640 if (ret) 641 goto disable_ecc_engine; 642 643 /* Retrieve the stored ECC bytes */ 644 pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 645 0, PL35X_SMC_DATA_PHASE_CLEAR_CS); 646 ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0, 647 chip->ecc.total); 648 if (ret) 649 goto disable_ecc_engine; 650 651 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); 652 653 /* Correct the data and report failures */ 654 return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf); 655 656 disable_ecc_engine: 657 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); 658 659 return ret; 660 } 661 662 static int pl35x_nand_exec_op(struct nand_chip *chip, 663 const struct nand_subop *subop) 664 { 665 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 666 const struct nand_op_instr *instr, *data_instr = NULL; 667 unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0; 668 u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0; 669 unsigned int op_id, len, offset, rdy_del_ns; 670 int last_instr_type = -1; 671 bool cmd1_valid = false; 672 const u8 *addrs; 673 int i, ret; 674 675 for (op_id = 0; op_id < subop->ninstrs; op_id++) { 676 instr = &subop->instrs[op_id]; 677 678 switch (instr->type) { 679 case NAND_OP_CMD_INSTR: 680 if (!cmds) { 681 cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode); 682 } else { 683 cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode); 684 if (last_instr_type != NAND_OP_DATA_OUT_INSTR) 685 cmd1_valid = true; 686 } 687 cmds++; 688 break; 689 690 case NAND_OP_ADDR_INSTR: 691 offset = nand_subop_get_addr_start_off(subop, op_id); 692 naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 693 addrs = &instr->ctx.addr.addrs[offset]; 694 cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs); 695 696 for (i = offset; i < naddrs; i++) { 697 if (i < 4) 698 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]); 699 else 700 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]); 701 } 702 break; 703 704 case NAND_OP_DATA_IN_INSTR: 705 case NAND_OP_DATA_OUT_INSTR: 706 data_instr = instr; 707 len = nand_subop_get_data_len(subop, op_id); 708 break; 709 710 case NAND_OP_WAITRDY_INSTR: 711 rdy_tim_ms = instr->ctx.waitrdy.timeout_ms; 712 rdy_del_ns = instr->delay_ns; 713 break; 714 } 715 716 last_instr_type = instr->type; 717 } 718 719 /* Command phase */ 720 cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 | 721 (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0); 722 writel(addr1, nfc->io_regs + cmd_addr); 723 if (naddrs > 4) 724 writel(addr2, nfc->io_regs + cmd_addr); 725 726 /* Data phase */ 727 if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) { 728 last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS; 729 if (cmds == 2) 730 last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID; 731 732 pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out, 733 len, data_instr->ctx.data.force_8bit, 734 0, last_flags); 735 } 736 737 if (rdy_tim_ms) { 738 ndelay(rdy_del_ns); 739 ret = pl35x_smc_wait_for_irq(nfc); 740 if (ret) 741 return ret; 742 } 743 744 if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR) 745 pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in, 746 len, data_instr->ctx.data.force_8bit, 747 0, PL35X_SMC_DATA_PHASE_CLEAR_CS); 748 749 return 0; 750 } 751 752 static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER( 753 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, 754 NAND_OP_PARSER_PAT_CMD_ELEM(true), 755 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7), 756 NAND_OP_PARSER_PAT_CMD_ELEM(true), 757 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 758 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)), 759 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, 760 NAND_OP_PARSER_PAT_CMD_ELEM(false), 761 NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7), 762 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112), 763 NAND_OP_PARSER_PAT_CMD_ELEM(false), 764 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 765 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, 766 NAND_OP_PARSER_PAT_CMD_ELEM(false), 767 NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7), 768 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112), 769 NAND_OP_PARSER_PAT_CMD_ELEM(true), 770 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 771 ); 772 773 static int pl35x_nfc_exec_op(struct nand_chip *chip, 774 const struct nand_operation *op, 775 bool check_only) 776 { 777 if (!check_only) 778 pl35x_nand_select_target(chip, op->cs); 779 780 return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser, 781 op, check_only); 782 } 783 784 static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs, 785 const struct nand_interface_config *conf) 786 { 787 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 788 struct pl35x_nand *plnand = to_pl35x_nand(chip); 789 struct pl35x_nand_timings tmgs = {}; 790 const struct nand_sdr_timings *sdr; 791 unsigned int period_ns, val; 792 struct clk *mclk; 793 794 sdr = nand_get_sdr_timings(conf); 795 if (IS_ERR(sdr)) 796 return PTR_ERR(sdr); 797 798 mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk"); 799 if (IS_ERR(mclk)) { 800 dev_err(nfc->dev, "Failed to retrieve SMC memclk\n"); 801 return PTR_ERR(mclk); 802 } 803 804 /* 805 * SDR timings are given in pico-seconds while NFC timings must be 806 * expressed in NAND controller clock cycles. We use the TO_CYCLE() 807 * macro to convert from one to the other. 808 */ 809 period_ns = NSEC_PER_SEC / clk_get_rate(mclk); 810 811 /* 812 * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not 813 * written anywhere in the datasheet but is an empirical observation. 814 */ 815 val = TO_CYCLES(sdr->tRC_min, period_ns); 816 if (sdr->tRC_min <= 20000) 817 val++; 818 819 tmgs.t_rc = val; 820 if (tmgs.t_rc != val || tmgs.t_rc < 2) 821 return -EINVAL; 822 823 val = TO_CYCLES(sdr->tWC_min, period_ns); 824 tmgs.t_wc = val; 825 if (tmgs.t_wc != val || tmgs.t_wc < 2) 826 return -EINVAL; 827 828 /* 829 * For all SDR modes, PL35X SMC needs tREA_max being 1, 830 * this is also an empirical result. 831 */ 832 tmgs.t_rea = 1; 833 834 val = TO_CYCLES(sdr->tWP_min, period_ns); 835 tmgs.t_wp = val; 836 if (tmgs.t_wp != val || tmgs.t_wp < 1) 837 return -EINVAL; 838 839 val = TO_CYCLES(sdr->tCLR_min, period_ns); 840 tmgs.t_clr = val; 841 if (tmgs.t_clr != val) 842 return -EINVAL; 843 844 val = TO_CYCLES(sdr->tAR_min, period_ns); 845 tmgs.t_ar = val; 846 if (tmgs.t_ar != val) 847 return -EINVAL; 848 849 val = TO_CYCLES(sdr->tRR_min, period_ns); 850 tmgs.t_rr = val; 851 if (tmgs.t_rr != val) 852 return -EINVAL; 853 854 if (cs == NAND_DATA_IFACE_CHECK_ONLY) 855 return 0; 856 857 plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) | 858 PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) | 859 PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) | 860 PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) | 861 PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) | 862 PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) | 863 PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr); 864 865 return 0; 866 } 867 868 static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc, 869 struct nand_chip *chip, 870 unsigned int pg_sz) 871 { 872 struct pl35x_nand *plnand = to_pl35x_nand(chip); 873 u32 sz; 874 875 switch (pg_sz) { 876 case SZ_512: 877 sz = 1; 878 break; 879 case SZ_1K: 880 sz = 2; 881 break; 882 case SZ_2K: 883 sz = 3; 884 break; 885 default: 886 sz = 0; 887 break; 888 } 889 890 plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG); 891 plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK; 892 plnand->ecc_cfg |= sz; 893 writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); 894 } 895 896 static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc, 897 struct nand_chip *chip) 898 { 899 struct mtd_info *mtd = nand_to_mtd(chip); 900 int ret = 0; 901 902 if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) { 903 dev_err(nfc->dev, 904 "The hardware ECC engine is limited to pages up to 2kiB\n"); 905 return -EOPNOTSUPP; 906 } 907 908 chip->ecc.strength = 1; 909 chip->ecc.bytes = 3; 910 chip->ecc.size = SZ_512; 911 chip->ecc.steps = mtd->writesize / chip->ecc.size; 912 chip->ecc.read_page = pl35x_nand_read_page_hwecc; 913 chip->ecc.write_page = pl35x_nand_write_page_hwecc; 914 chip->ecc.write_page_raw = nand_monolithic_write_page_raw; 915 pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize); 916 917 nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps, 918 GFP_KERNEL); 919 if (!nfc->ecc_buf) 920 return -ENOMEM; 921 922 switch (mtd->oobsize) { 923 case 16: 924 /* Legacy Xilinx layout */ 925 mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops); 926 chip->bbt_options |= NAND_BBT_NO_OOB_BBM; 927 break; 928 case 64: 929 mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); 930 break; 931 default: 932 dev_err(nfc->dev, "Unsupported OOB size\n"); 933 return -EOPNOTSUPP; 934 } 935 936 return ret; 937 } 938 939 static int pl35x_nand_attach_chip(struct nand_chip *chip) 940 { 941 const struct nand_ecc_props *requirements = 942 nanddev_get_ecc_requirements(&chip->base); 943 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); 944 struct pl35x_nand *plnand = to_pl35x_nand(chip); 945 struct mtd_info *mtd = nand_to_mtd(chip); 946 int ret; 947 948 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE && 949 (!chip->ecc.size || !chip->ecc.strength)) { 950 if (requirements->step_size && requirements->strength) { 951 chip->ecc.size = requirements->step_size; 952 chip->ecc.strength = requirements->strength; 953 } else { 954 dev_info(nfc->dev, 955 "No minimum ECC strength, using 1b/512B\n"); 956 chip->ecc.size = 512; 957 chip->ecc.strength = 1; 958 } 959 } 960 961 if (mtd->writesize <= SZ_512) 962 plnand->addr_cycles = 1; 963 else 964 plnand->addr_cycles = 2; 965 966 if (chip->options & NAND_ROW_ADDR_3) 967 plnand->addr_cycles += 3; 968 else 969 plnand->addr_cycles += 2; 970 971 switch (chip->ecc.engine_type) { 972 case NAND_ECC_ENGINE_TYPE_ON_DIE: 973 /* Keep these legacy BBT descriptors for ON_DIE situations */ 974 chip->bbt_td = &bbt_main_descr; 975 chip->bbt_md = &bbt_mirror_descr; 976 fallthrough; 977 case NAND_ECC_ENGINE_TYPE_NONE: 978 case NAND_ECC_ENGINE_TYPE_SOFT: 979 break; 980 case NAND_ECC_ENGINE_TYPE_ON_HOST: 981 ret = pl35x_nand_init_hw_ecc_controller(nfc, chip); 982 if (ret) 983 return ret; 984 break; 985 default: 986 dev_err(nfc->dev, "Unsupported ECC mode: %d\n", 987 chip->ecc.engine_type); 988 return -EINVAL; 989 } 990 991 return 0; 992 } 993 994 static const struct nand_controller_ops pl35x_nandc_ops = { 995 .attach_chip = pl35x_nand_attach_chip, 996 .exec_op = pl35x_nfc_exec_op, 997 .setup_interface = pl35x_nfc_setup_interface, 998 }; 999 1000 static int pl35x_nand_reset_state(struct pl35x_nandc *nfc) 1001 { 1002 int ret; 1003 1004 /* Disable interrupts and clear their status */ 1005 writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 | 1006 PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 | 1007 PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1, 1008 nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR); 1009 1010 /* Set default bus width to 8-bit */ 1011 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8); 1012 if (ret) 1013 return ret; 1014 1015 /* Ensure the ECC controller is bypassed by default */ 1016 ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS); 1017 if (ret) 1018 return ret; 1019 1020 /* 1021 * Configure the commands that the ECC block uses to detect the 1022 * operations it should start/end. 1023 */ 1024 writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) | 1025 PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) | 1026 PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) | 1027 PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1), 1028 nfc->conf_regs + PL35X_SMC_ECC_CMD1); 1029 writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) | 1030 PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) | 1031 PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) | 1032 PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1), 1033 nfc->conf_regs + PL35X_SMC_ECC_CMD2); 1034 1035 return 0; 1036 } 1037 1038 static int pl35x_nand_chip_init(struct pl35x_nandc *nfc, 1039 struct device_node *np) 1040 { 1041 struct pl35x_nand *plnand; 1042 struct nand_chip *chip; 1043 struct mtd_info *mtd; 1044 int cs, ret; 1045 1046 plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL); 1047 if (!plnand) 1048 return -ENOMEM; 1049 1050 ret = of_property_read_u32(np, "reg", &cs); 1051 if (ret) 1052 return ret; 1053 1054 if (cs >= PL35X_NAND_MAX_CS) { 1055 dev_err(nfc->dev, "Wrong CS %d\n", cs); 1056 return -EINVAL; 1057 } 1058 1059 if (test_and_set_bit(cs, &nfc->assigned_cs)) { 1060 dev_err(nfc->dev, "Already assigned CS %d\n", cs); 1061 return -EINVAL; 1062 } 1063 1064 plnand->cs = cs; 1065 1066 chip = &plnand->chip; 1067 chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE; 1068 chip->bbt_options = NAND_BBT_USE_FLASH; 1069 chip->controller = &nfc->controller; 1070 mtd = nand_to_mtd(chip); 1071 mtd->dev.parent = nfc->dev; 1072 nand_set_flash_node(chip, np); 1073 if (!mtd->name) { 1074 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, 1075 "%s", PL35X_NANDC_DRIVER_NAME); 1076 if (!mtd->name) { 1077 dev_err(nfc->dev, "Failed to allocate mtd->name\n"); 1078 return -ENOMEM; 1079 } 1080 } 1081 1082 ret = nand_scan(chip, 1); 1083 if (ret) 1084 return ret; 1085 1086 ret = mtd_device_register(mtd, NULL, 0); 1087 if (ret) { 1088 nand_cleanup(chip); 1089 return ret; 1090 } 1091 1092 list_add_tail(&plnand->node, &nfc->chips); 1093 1094 return ret; 1095 } 1096 1097 static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc) 1098 { 1099 struct pl35x_nand *plnand, *tmp; 1100 struct nand_chip *chip; 1101 int ret; 1102 1103 list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) { 1104 chip = &plnand->chip; 1105 ret = mtd_device_unregister(nand_to_mtd(chip)); 1106 WARN_ON(ret); 1107 nand_cleanup(chip); 1108 list_del(&plnand->node); 1109 } 1110 } 1111 1112 static int pl35x_nand_chips_init(struct pl35x_nandc *nfc) 1113 { 1114 struct device_node *np = nfc->dev->of_node, *nand_np; 1115 int nchips = of_get_child_count(np); 1116 int ret; 1117 1118 if (!nchips || nchips > PL35X_NAND_MAX_CS) { 1119 dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n", 1120 nchips); 1121 return -EINVAL; 1122 } 1123 1124 for_each_child_of_node(np, nand_np) { 1125 ret = pl35x_nand_chip_init(nfc, nand_np); 1126 if (ret) { 1127 of_node_put(nand_np); 1128 pl35x_nand_chips_cleanup(nfc); 1129 break; 1130 } 1131 } 1132 1133 return ret; 1134 } 1135 1136 static int pl35x_nand_probe(struct platform_device *pdev) 1137 { 1138 struct device *smc_dev = pdev->dev.parent; 1139 struct amba_device *smc_amba = to_amba_device(smc_dev); 1140 struct pl35x_nandc *nfc; 1141 u32 ret; 1142 1143 nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); 1144 if (!nfc) 1145 return -ENOMEM; 1146 1147 nfc->dev = &pdev->dev; 1148 nand_controller_init(&nfc->controller); 1149 nfc->controller.ops = &pl35x_nandc_ops; 1150 INIT_LIST_HEAD(&nfc->chips); 1151 1152 nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res); 1153 if (IS_ERR(nfc->conf_regs)) 1154 return PTR_ERR(nfc->conf_regs); 1155 1156 nfc->io_regs = devm_platform_ioremap_resource(pdev, 0); 1157 if (IS_ERR(nfc->io_regs)) 1158 return PTR_ERR(nfc->io_regs); 1159 1160 ret = pl35x_nand_reset_state(nfc); 1161 if (ret) 1162 return ret; 1163 1164 ret = pl35x_nand_chips_init(nfc); 1165 if (ret) 1166 return ret; 1167 1168 platform_set_drvdata(pdev, nfc); 1169 1170 return 0; 1171 } 1172 1173 static void pl35x_nand_remove(struct platform_device *pdev) 1174 { 1175 struct pl35x_nandc *nfc = platform_get_drvdata(pdev); 1176 1177 pl35x_nand_chips_cleanup(nfc); 1178 } 1179 1180 static const struct of_device_id pl35x_nand_of_match[] = { 1181 { .compatible = "arm,pl353-nand-r2p1" }, 1182 {}, 1183 }; 1184 MODULE_DEVICE_TABLE(of, pl35x_nand_of_match); 1185 1186 static struct platform_driver pl35x_nandc_driver = { 1187 .probe = pl35x_nand_probe, 1188 .remove_new = pl35x_nand_remove, 1189 .driver = { 1190 .name = PL35X_NANDC_DRIVER_NAME, 1191 .of_match_table = pl35x_nand_of_match, 1192 }, 1193 }; 1194 module_platform_driver(pl35x_nandc_driver); 1195 1196 MODULE_AUTHOR("Xilinx, Inc."); 1197 MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME); 1198 MODULE_DESCRIPTION("ARM PL35X NAND controller driver"); 1199 MODULE_LICENSE("GPL"); 1200