1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com> 4 * 5 * Derived from: 6 * https://github.com/yuq/sunxi-nfc-mtd 7 * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com> 8 * 9 * https://github.com/hno/Allwinner-Info 10 * Copyright (C) 2013 Henrik Nordström <Henrik Nordström> 11 * 12 * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com> 13 * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org> 14 */ 15 16 #include <linux/dma-mapping.h> 17 #include <linux/slab.h> 18 #include <linux/module.h> 19 #include <linux/moduleparam.h> 20 #include <linux/platform_device.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/mtd/mtd.h> 24 #include <linux/mtd/rawnand.h> 25 #include <linux/mtd/partitions.h> 26 #include <linux/clk.h> 27 #include <linux/delay.h> 28 #include <linux/dmaengine.h> 29 #include <linux/interrupt.h> 30 #include <linux/iopoll.h> 31 #include <linux/reset.h> 32 33 #define NFC_REG_CTL 0x0000 34 #define NFC_REG_ST 0x0004 35 #define NFC_REG_INT 0x0008 36 #define NFC_REG_TIMING_CTL 0x000C 37 #define NFC_REG_TIMING_CFG 0x0010 38 #define NFC_REG_ADDR_LOW 0x0014 39 #define NFC_REG_ADDR_HIGH 0x0018 40 #define NFC_REG_SECTOR_NUM 0x001C 41 #define NFC_REG_CNT 0x0020 42 #define NFC_REG_CMD 0x0024 43 #define NFC_REG_RCMD_SET 0x0028 44 #define NFC_REG_WCMD_SET 0x002C 45 #define NFC_REG_A10_IO_DATA 0x0030 46 #define NFC_REG_A23_IO_DATA 0x0300 47 #define NFC_REG_ECC_CTL 0x0034 48 #define NFC_REG_ECC_ST 0x0038 49 #define NFC_REG_DEBUG 0x003C 50 #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3) 51 #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) 52 #define NFC_REG_SPARE_AREA 0x00A0 53 #define NFC_REG_PAT_ID 0x00A4 54 #define NFC_REG_MDMA_CNT 0x00C4 55 #define NFC_RAM0_BASE 0x0400 56 #define NFC_RAM1_BASE 0x0800 57 58 /* define bit use in NFC_CTL */ 59 #define NFC_EN BIT(0) 60 #define NFC_RESET BIT(1) 61 #define NFC_BUS_WIDTH_MSK BIT(2) 62 #define NFC_BUS_WIDTH_8 (0 << 2) 63 #define NFC_BUS_WIDTH_16 (1 << 2) 64 #define NFC_RB_SEL_MSK BIT(3) 65 #define NFC_RB_SEL(x) ((x) << 3) 66 #define NFC_CE_SEL_MSK GENMASK(26, 24) 67 #define NFC_CE_SEL(x) ((x) << 24) 68 #define NFC_CE_CTL BIT(6) 69 #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8) 70 #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8) 71 #define NFC_SAM BIT(12) 72 #define NFC_RAM_METHOD BIT(14) 73 #define NFC_DMA_TYPE_NORMAL BIT(15) 74 #define NFC_DEBUG_CTL BIT(31) 75 76 /* define bit use in NFC_ST */ 77 #define NFC_RB_B2R BIT(0) 78 #define NFC_CMD_INT_FLAG BIT(1) 79 #define NFC_DMA_INT_FLAG BIT(2) 80 #define NFC_CMD_FIFO_STATUS BIT(3) 81 #define NFC_STA BIT(4) 82 #define NFC_NATCH_INT_FLAG BIT(5) 83 #define NFC_RB_STATE(x) BIT(x + 8) 84 85 /* define bit use in NFC_INT */ 86 #define NFC_B2R_INT_ENABLE BIT(0) 87 #define NFC_CMD_INT_ENABLE BIT(1) 88 #define NFC_DMA_INT_ENABLE BIT(2) 89 #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \ 90 NFC_CMD_INT_ENABLE | \ 91 NFC_DMA_INT_ENABLE) 92 93 /* define bit use in NFC_TIMING_CTL */ 94 #define NFC_TIMING_CTL_EDO BIT(8) 95 96 /* define NFC_TIMING_CFG register layout */ 97 #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \ 98 (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \ 99 (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \ 100 (((tCAD) & 0x7) << 8)) 101 102 /* define bit use in NFC_CMD */ 103 #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0) 104 #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8) 105 #define NFC_CMD(x) (x) 106 #define NFC_ADR_NUM_MSK GENMASK(18, 16) 107 #define NFC_ADR_NUM(x) (((x) - 1) << 16) 108 #define NFC_SEND_ADR BIT(19) 109 #define NFC_ACCESS_DIR BIT(20) 110 #define NFC_DATA_TRANS BIT(21) 111 #define NFC_SEND_CMD1 BIT(22) 112 #define NFC_WAIT_FLAG BIT(23) 113 #define NFC_SEND_CMD2 BIT(24) 114 #define NFC_SEQ BIT(25) 115 #define NFC_DATA_SWAP_METHOD BIT(26) 116 #define NFC_ROW_AUTO_INC BIT(27) 117 #define NFC_SEND_CMD3 BIT(28) 118 #define NFC_SEND_CMD4 BIT(29) 119 #define NFC_CMD_TYPE_MSK GENMASK(31, 30) 120 #define NFC_NORMAL_OP (0 << 30) 121 #define NFC_ECC_OP (1 << 30) 122 #define NFC_PAGE_OP (2U << 30) 123 124 /* define bit use in NFC_RCMD_SET */ 125 #define NFC_READ_CMD_MSK GENMASK(7, 0) 126 #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8) 127 #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16) 128 129 /* define bit use in NFC_WCMD_SET */ 130 #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0) 131 #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8) 132 #define NFC_READ_CMD0_MSK GENMASK(23, 16) 133 #define NFC_READ_CMD1_MSK GENMASK(31, 24) 134 135 /* define bit use in NFC_ECC_CTL */ 136 #define NFC_ECC_EN BIT(0) 137 #define NFC_ECC_PIPELINE BIT(3) 138 #define NFC_ECC_EXCEPTION BIT(4) 139 #define NFC_ECC_BLOCK_SIZE_MSK BIT(5) 140 #define NFC_ECC_BLOCK_512 BIT(5) 141 #define NFC_RANDOM_EN BIT(9) 142 #define NFC_RANDOM_DIRECTION BIT(10) 143 #define NFC_ECC_MODE_MSK GENMASK(15, 12) 144 #define NFC_ECC_MODE(x) ((x) << 12) 145 #define NFC_RANDOM_SEED_MSK GENMASK(30, 16) 146 #define NFC_RANDOM_SEED(x) ((x) << 16) 147 148 /* define bit use in NFC_ECC_ST */ 149 #define NFC_ECC_ERR(x) BIT(x) 150 #define NFC_ECC_ERR_MSK GENMASK(15, 0) 151 #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) 152 #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) 153 154 #define NFC_DEFAULT_TIMEOUT_MS 1000 155 156 #define NFC_SRAM_SIZE 1024 157 158 #define NFC_MAX_CS 7 159 160 /** 161 * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select 162 * 163 * @cs: the NAND CS id used to communicate with a NAND Chip 164 * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC 165 */ 166 struct sunxi_nand_chip_sel { 167 u8 cs; 168 s8 rb; 169 }; 170 171 /** 172 * struct sunxi_nand_hw_ecc - stores information related to HW ECC support 173 * 174 * @mode: the sunxi ECC mode field deduced from ECC requirements 175 */ 176 struct sunxi_nand_hw_ecc { 177 int mode; 178 }; 179 180 /** 181 * struct sunxi_nand_chip - stores NAND chip device related information 182 * 183 * @node: used to store NAND chips into a list 184 * @nand: base NAND chip structure 185 * @clk_rate: clk_rate required for this NAND chip 186 * @timing_cfg: TIMING_CFG register value for this NAND chip 187 * @timing_ctl: TIMING_CTL register value for this NAND chip 188 * @nsels: number of CS lines required by the NAND chip 189 * @sels: array of CS lines descriptions 190 */ 191 struct sunxi_nand_chip { 192 struct list_head node; 193 struct nand_chip nand; 194 unsigned long clk_rate; 195 u32 timing_cfg; 196 u32 timing_ctl; 197 int nsels; 198 struct sunxi_nand_chip_sel sels[]; 199 }; 200 201 static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) 202 { 203 return container_of(nand, struct sunxi_nand_chip, nand); 204 } 205 206 /* 207 * NAND Controller capabilities structure: stores NAND controller capabilities 208 * for distinction between compatible strings. 209 * 210 * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM 211 * through MBUS on A23/A33 needs extra configuration. 212 * @reg_io_data: I/O data register 213 * @dma_maxburst: DMA maxburst 214 */ 215 struct sunxi_nfc_caps { 216 bool extra_mbus_conf; 217 unsigned int reg_io_data; 218 unsigned int dma_maxburst; 219 }; 220 221 /** 222 * struct sunxi_nfc - stores sunxi NAND controller information 223 * 224 * @controller: base controller structure 225 * @dev: parent device (used to print error messages) 226 * @regs: NAND controller registers 227 * @ahb_clk: NAND controller AHB clock 228 * @mod_clk: NAND controller mod clock 229 * @reset: NAND controller reset line 230 * @assigned_cs: bitmask describing already assigned CS lines 231 * @clk_rate: NAND controller current clock rate 232 * @chips: a list containing all the NAND chips attached to this NAND 233 * controller 234 * @complete: a completion object used to wait for NAND controller events 235 * @dmac: the DMA channel attached to the NAND controller 236 */ 237 struct sunxi_nfc { 238 struct nand_controller controller; 239 struct device *dev; 240 void __iomem *regs; 241 struct clk *ahb_clk; 242 struct clk *mod_clk; 243 struct reset_control *reset; 244 unsigned long assigned_cs; 245 unsigned long clk_rate; 246 struct list_head chips; 247 struct completion complete; 248 struct dma_chan *dmac; 249 const struct sunxi_nfc_caps *caps; 250 }; 251 252 static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl) 253 { 254 return container_of(ctrl, struct sunxi_nfc, controller); 255 } 256 257 static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id) 258 { 259 struct sunxi_nfc *nfc = dev_id; 260 u32 st = readl(nfc->regs + NFC_REG_ST); 261 u32 ien = readl(nfc->regs + NFC_REG_INT); 262 263 if (!(ien & st)) 264 return IRQ_NONE; 265 266 if ((ien & st) == ien) 267 complete(&nfc->complete); 268 269 writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST); 270 writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT); 271 272 return IRQ_HANDLED; 273 } 274 275 static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events, 276 bool use_polling, unsigned int timeout_ms) 277 { 278 int ret; 279 280 if (events & ~NFC_INT_MASK) 281 return -EINVAL; 282 283 if (!timeout_ms) 284 timeout_ms = NFC_DEFAULT_TIMEOUT_MS; 285 286 if (!use_polling) { 287 init_completion(&nfc->complete); 288 289 writel(events, nfc->regs + NFC_REG_INT); 290 291 ret = wait_for_completion_timeout(&nfc->complete, 292 msecs_to_jiffies(timeout_ms)); 293 if (!ret) 294 ret = -ETIMEDOUT; 295 else 296 ret = 0; 297 298 writel(0, nfc->regs + NFC_REG_INT); 299 } else { 300 u32 status; 301 302 ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, 303 (status & events) == events, 1, 304 timeout_ms * 1000); 305 } 306 307 writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST); 308 309 if (ret) 310 dev_err(nfc->dev, "wait interrupt timedout\n"); 311 312 return ret; 313 } 314 315 static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc) 316 { 317 u32 status; 318 int ret; 319 320 ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, 321 !(status & NFC_CMD_FIFO_STATUS), 1, 322 NFC_DEFAULT_TIMEOUT_MS * 1000); 323 if (ret) 324 dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); 325 326 return ret; 327 } 328 329 static int sunxi_nfc_rst(struct sunxi_nfc *nfc) 330 { 331 u32 ctl; 332 int ret; 333 334 writel(0, nfc->regs + NFC_REG_ECC_CTL); 335 writel(NFC_RESET, nfc->regs + NFC_REG_CTL); 336 337 ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl, 338 !(ctl & NFC_RESET), 1, 339 NFC_DEFAULT_TIMEOUT_MS * 1000); 340 if (ret) 341 dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); 342 343 return ret; 344 } 345 346 static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, 347 int chunksize, int nchunks, 348 enum dma_data_direction ddir, 349 struct scatterlist *sg) 350 { 351 struct dma_async_tx_descriptor *dmad; 352 enum dma_transfer_direction tdir; 353 dma_cookie_t dmat; 354 int ret; 355 356 if (ddir == DMA_FROM_DEVICE) 357 tdir = DMA_DEV_TO_MEM; 358 else 359 tdir = DMA_MEM_TO_DEV; 360 361 sg_init_one(sg, buf, nchunks * chunksize); 362 ret = dma_map_sg(nfc->dev, sg, 1, ddir); 363 if (!ret) 364 return -ENOMEM; 365 366 dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); 367 if (!dmad) { 368 ret = -EINVAL; 369 goto err_unmap_buf; 370 } 371 372 writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, 373 nfc->regs + NFC_REG_CTL); 374 writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); 375 writel(chunksize, nfc->regs + NFC_REG_CNT); 376 if (nfc->caps->extra_mbus_conf) 377 writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); 378 379 dmat = dmaengine_submit(dmad); 380 381 ret = dma_submit_error(dmat); 382 if (ret) 383 goto err_clr_dma_flag; 384 385 return 0; 386 387 err_clr_dma_flag: 388 writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, 389 nfc->regs + NFC_REG_CTL); 390 391 err_unmap_buf: 392 dma_unmap_sg(nfc->dev, sg, 1, ddir); 393 return ret; 394 } 395 396 static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc, 397 enum dma_data_direction ddir, 398 struct scatterlist *sg) 399 { 400 dma_unmap_sg(nfc->dev, sg, 1, ddir); 401 writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, 402 nfc->regs + NFC_REG_CTL); 403 } 404 405 static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs) 406 { 407 struct mtd_info *mtd = nand_to_mtd(nand); 408 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 409 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 410 struct sunxi_nand_chip_sel *sel; 411 u32 ctl; 412 413 if (cs > 0 && cs >= sunxi_nand->nsels) 414 return; 415 416 ctl = readl(nfc->regs + NFC_REG_CTL) & 417 ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN); 418 419 sel = &sunxi_nand->sels[cs]; 420 ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift); 421 if (sel->rb >= 0) 422 ctl |= NFC_RB_SEL(sel->rb); 423 424 writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); 425 426 if (nfc->clk_rate != sunxi_nand->clk_rate) { 427 clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate); 428 nfc->clk_rate = sunxi_nand->clk_rate; 429 } 430 431 writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL); 432 writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG); 433 writel(ctl, nfc->regs + NFC_REG_CTL); 434 } 435 436 static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len) 437 { 438 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 439 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 440 int ret; 441 int cnt; 442 int offs = 0; 443 u32 tmp; 444 445 while (len > offs) { 446 bool poll = false; 447 448 cnt = min(len - offs, NFC_SRAM_SIZE); 449 450 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 451 if (ret) 452 break; 453 454 writel(cnt, nfc->regs + NFC_REG_CNT); 455 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; 456 writel(tmp, nfc->regs + NFC_REG_CMD); 457 458 /* Arbitrary limit for polling mode */ 459 if (cnt < 64) 460 poll = true; 461 462 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0); 463 if (ret) 464 break; 465 466 if (buf) 467 memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE, 468 cnt); 469 offs += cnt; 470 } 471 } 472 473 static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf, 474 int len) 475 { 476 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 477 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 478 int ret; 479 int cnt; 480 int offs = 0; 481 u32 tmp; 482 483 while (len > offs) { 484 bool poll = false; 485 486 cnt = min(len - offs, NFC_SRAM_SIZE); 487 488 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 489 if (ret) 490 break; 491 492 writel(cnt, nfc->regs + NFC_REG_CNT); 493 memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt); 494 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | 495 NFC_ACCESS_DIR; 496 writel(tmp, nfc->regs + NFC_REG_CMD); 497 498 /* Arbitrary limit for polling mode */ 499 if (cnt < 64) 500 poll = true; 501 502 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0); 503 if (ret) 504 break; 505 506 offs += cnt; 507 } 508 } 509 510 /* These seed values have been extracted from Allwinner's BSP */ 511 static const u16 sunxi_nfc_randomizer_page_seeds[] = { 512 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, 513 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436, 514 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d, 515 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130, 516 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56, 517 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55, 518 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb, 519 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17, 520 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62, 521 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064, 522 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126, 523 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e, 524 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3, 525 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b, 526 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d, 527 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db, 528 }; 529 530 /* 531 * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds 532 * have been generated using 533 * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what 534 * the randomizer engine does internally before de/scrambling OOB data. 535 * 536 * Those tables are statically defined to avoid calculating randomizer state 537 * at runtime. 538 */ 539 static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = { 540 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64, 541 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409, 542 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617, 543 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d, 544 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91, 545 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d, 546 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab, 547 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8, 548 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8, 549 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b, 550 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5, 551 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a, 552 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891, 553 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36, 554 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd, 555 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0, 556 }; 557 558 static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = { 559 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6, 560 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982, 561 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9, 562 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07, 563 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e, 564 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2, 565 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c, 566 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f, 567 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc, 568 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e, 569 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8, 570 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68, 571 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d, 572 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179, 573 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601, 574 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd, 575 }; 576 577 static u16 sunxi_nfc_randomizer_step(u16 state, int count) 578 { 579 state &= 0x7fff; 580 581 /* 582 * This loop is just a simple implementation of a Fibonacci LFSR using 583 * the x16 + x15 + 1 polynomial. 584 */ 585 while (count--) 586 state = ((state >> 1) | 587 (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff; 588 589 return state; 590 } 591 592 static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page, 593 bool ecc) 594 { 595 struct mtd_info *mtd = nand_to_mtd(nand); 596 const u16 *seeds = sunxi_nfc_randomizer_page_seeds; 597 int mod = mtd_div_by_ws(mtd->erasesize, mtd); 598 599 if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds)) 600 mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds); 601 602 if (ecc) { 603 if (mtd->ecc_step_size == 512) 604 seeds = sunxi_nfc_randomizer_ecc512_seeds; 605 else 606 seeds = sunxi_nfc_randomizer_ecc1024_seeds; 607 } 608 609 return seeds[page % mod]; 610 } 611 612 static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page, 613 bool ecc) 614 { 615 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 616 u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); 617 u16 state; 618 619 if (!(nand->options & NAND_NEED_SCRAMBLING)) 620 return; 621 622 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); 623 state = sunxi_nfc_randomizer_state(nand, page, ecc); 624 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK; 625 writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL); 626 } 627 628 static void sunxi_nfc_randomizer_enable(struct nand_chip *nand) 629 { 630 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 631 632 if (!(nand->options & NAND_NEED_SCRAMBLING)) 633 return; 634 635 writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN, 636 nfc->regs + NFC_REG_ECC_CTL); 637 } 638 639 static void sunxi_nfc_randomizer_disable(struct nand_chip *nand) 640 { 641 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 642 643 if (!(nand->options & NAND_NEED_SCRAMBLING)) 644 return; 645 646 writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN, 647 nfc->regs + NFC_REG_ECC_CTL); 648 } 649 650 static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm) 651 { 652 u16 state = sunxi_nfc_randomizer_state(nand, page, true); 653 654 bbm[0] ^= state; 655 bbm[1] ^= sunxi_nfc_randomizer_step(state, 8); 656 } 657 658 static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand, 659 const uint8_t *buf, int len, 660 bool ecc, int page) 661 { 662 sunxi_nfc_randomizer_config(nand, page, ecc); 663 sunxi_nfc_randomizer_enable(nand); 664 sunxi_nfc_write_buf(nand, buf, len); 665 sunxi_nfc_randomizer_disable(nand); 666 } 667 668 static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf, 669 int len, bool ecc, int page) 670 { 671 sunxi_nfc_randomizer_config(nand, page, ecc); 672 sunxi_nfc_randomizer_enable(nand); 673 sunxi_nfc_read_buf(nand, buf, len); 674 sunxi_nfc_randomizer_disable(nand); 675 } 676 677 static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) 678 { 679 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 680 struct sunxi_nand_hw_ecc *data = nand->ecc.priv; 681 u32 ecc_ctl; 682 683 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); 684 ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | 685 NFC_ECC_BLOCK_SIZE_MSK); 686 ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION | 687 NFC_ECC_PIPELINE; 688 689 if (nand->ecc.size == 512) 690 ecc_ctl |= NFC_ECC_BLOCK_512; 691 692 writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); 693 } 694 695 static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand) 696 { 697 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 698 699 writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN, 700 nfc->regs + NFC_REG_ECC_CTL); 701 } 702 703 static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf) 704 { 705 buf[0] = user_data; 706 buf[1] = user_data >> 8; 707 buf[2] = user_data >> 16; 708 buf[3] = user_data >> 24; 709 } 710 711 static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) 712 { 713 return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); 714 } 715 716 static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob, 717 int step, bool bbm, int page) 718 { 719 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 720 721 sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)), 722 oob); 723 724 /* De-randomize the Bad Block Marker. */ 725 if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) 726 sunxi_nfc_randomize_bbm(nand, page, oob); 727 } 728 729 static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand, 730 const u8 *oob, int step, 731 bool bbm, int page) 732 { 733 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 734 u8 user_data[4]; 735 736 /* Randomize the Bad Block Marker. */ 737 if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) { 738 memcpy(user_data, oob, sizeof(user_data)); 739 sunxi_nfc_randomize_bbm(nand, page, user_data); 740 oob = user_data; 741 } 742 743 writel(sunxi_nfc_buf_to_user_data(oob), 744 nfc->regs + NFC_REG_USER_DATA(step)); 745 } 746 747 static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand, 748 unsigned int *max_bitflips, int ret) 749 { 750 struct mtd_info *mtd = nand_to_mtd(nand); 751 752 if (ret < 0) { 753 mtd->ecc_stats.failed++; 754 } else { 755 mtd->ecc_stats.corrected += ret; 756 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 757 } 758 } 759 760 static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob, 761 int step, u32 status, bool *erased) 762 { 763 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 764 struct nand_ecc_ctrl *ecc = &nand->ecc; 765 u32 tmp; 766 767 *erased = false; 768 769 if (status & NFC_ECC_ERR(step)) 770 return -EBADMSG; 771 772 if (status & NFC_ECC_PAT_FOUND(step)) { 773 u8 pattern; 774 775 if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) { 776 pattern = 0x0; 777 } else { 778 pattern = 0xff; 779 *erased = true; 780 } 781 782 if (data) 783 memset(data, pattern, ecc->size); 784 785 if (oob) 786 memset(oob, pattern, ecc->bytes + 4); 787 788 return 0; 789 } 790 791 tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step)); 792 793 return NFC_ECC_ERR_CNT(step, tmp); 794 } 795 796 static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, 797 u8 *data, int data_off, 798 u8 *oob, int oob_off, 799 int *cur_off, 800 unsigned int *max_bitflips, 801 bool bbm, bool oob_required, int page) 802 { 803 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 804 struct nand_ecc_ctrl *ecc = &nand->ecc; 805 int raw_mode = 0; 806 bool erased; 807 int ret; 808 809 if (*cur_off != data_off) 810 nand_change_read_column_op(nand, data_off, NULL, 0, false); 811 812 sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page); 813 814 if (data_off + ecc->size != oob_off) 815 nand_change_read_column_op(nand, oob_off, NULL, 0, false); 816 817 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 818 if (ret) 819 return ret; 820 821 sunxi_nfc_randomizer_enable(nand); 822 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, 823 nfc->regs + NFC_REG_CMD); 824 825 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); 826 sunxi_nfc_randomizer_disable(nand); 827 if (ret) 828 return ret; 829 830 *cur_off = oob_off + ecc->bytes + 4; 831 832 ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0, 833 readl(nfc->regs + NFC_REG_ECC_ST), 834 &erased); 835 if (erased) 836 return 1; 837 838 if (ret < 0) { 839 /* 840 * Re-read the data with the randomizer disabled to identify 841 * bitflips in erased pages. 842 */ 843 if (nand->options & NAND_NEED_SCRAMBLING) 844 nand_change_read_column_op(nand, data_off, data, 845 ecc->size, false); 846 else 847 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, 848 ecc->size); 849 850 nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4, 851 false); 852 853 ret = nand_check_erased_ecc_chunk(data, ecc->size, 854 oob, ecc->bytes + 4, 855 NULL, 0, ecc->strength); 856 if (ret >= 0) 857 raw_mode = 1; 858 } else { 859 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); 860 861 if (oob_required) { 862 nand_change_read_column_op(nand, oob_off, NULL, 0, 863 false); 864 sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4, 865 true, page); 866 867 sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0, 868 bbm, page); 869 } 870 } 871 872 sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret); 873 874 return raw_mode; 875 } 876 877 static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand, 878 u8 *oob, int *cur_off, 879 bool randomize, int page) 880 { 881 struct mtd_info *mtd = nand_to_mtd(nand); 882 struct nand_ecc_ctrl *ecc = &nand->ecc; 883 int offset = ((ecc->bytes + 4) * ecc->steps); 884 int len = mtd->oobsize - offset; 885 886 if (len <= 0) 887 return; 888 889 if (!cur_off || *cur_off != offset) 890 nand_change_read_column_op(nand, mtd->writesize, NULL, 0, 891 false); 892 893 if (!randomize) 894 sunxi_nfc_read_buf(nand, oob + offset, len); 895 else 896 sunxi_nfc_randomizer_read_buf(nand, oob + offset, len, 897 false, page); 898 899 if (cur_off) 900 *cur_off = mtd->oobsize + mtd->writesize; 901 } 902 903 static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf, 904 int oob_required, int page, 905 int nchunks) 906 { 907 bool randomized = nand->options & NAND_NEED_SCRAMBLING; 908 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 909 struct mtd_info *mtd = nand_to_mtd(nand); 910 struct nand_ecc_ctrl *ecc = &nand->ecc; 911 unsigned int max_bitflips = 0; 912 int ret, i, raw_mode = 0; 913 struct scatterlist sg; 914 u32 status; 915 916 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 917 if (ret) 918 return ret; 919 920 ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks, 921 DMA_FROM_DEVICE, &sg); 922 if (ret) 923 return ret; 924 925 sunxi_nfc_hw_ecc_enable(nand); 926 sunxi_nfc_randomizer_config(nand, page, false); 927 sunxi_nfc_randomizer_enable(nand); 928 929 writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | 930 NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); 931 932 dma_async_issue_pending(nfc->dmac); 933 934 writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, 935 nfc->regs + NFC_REG_CMD); 936 937 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); 938 if (ret) 939 dmaengine_terminate_all(nfc->dmac); 940 941 sunxi_nfc_randomizer_disable(nand); 942 sunxi_nfc_hw_ecc_disable(nand); 943 944 sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg); 945 946 if (ret) 947 return ret; 948 949 status = readl(nfc->regs + NFC_REG_ECC_ST); 950 951 for (i = 0; i < nchunks; i++) { 952 int data_off = i * ecc->size; 953 int oob_off = i * (ecc->bytes + 4); 954 u8 *data = buf + data_off; 955 u8 *oob = nand->oob_poi + oob_off; 956 bool erased; 957 958 ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL, 959 oob_required ? oob : NULL, 960 i, status, &erased); 961 962 /* ECC errors are handled in the second loop. */ 963 if (ret < 0) 964 continue; 965 966 if (oob_required && !erased) { 967 /* TODO: use DMA to retrieve OOB */ 968 nand_change_read_column_op(nand, 969 mtd->writesize + oob_off, 970 oob, ecc->bytes + 4, false); 971 972 sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i, 973 !i, page); 974 } 975 976 if (erased) 977 raw_mode = 1; 978 979 sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); 980 } 981 982 if (status & NFC_ECC_ERR_MSK) { 983 for (i = 0; i < nchunks; i++) { 984 int data_off = i * ecc->size; 985 int oob_off = i * (ecc->bytes + 4); 986 u8 *data = buf + data_off; 987 u8 *oob = nand->oob_poi + oob_off; 988 989 if (!(status & NFC_ECC_ERR(i))) 990 continue; 991 992 /* 993 * Re-read the data with the randomizer disabled to 994 * identify bitflips in erased pages. 995 * TODO: use DMA to read page in raw mode 996 */ 997 if (randomized) 998 nand_change_read_column_op(nand, data_off, 999 data, ecc->size, 1000 false); 1001 1002 /* TODO: use DMA to retrieve OOB */ 1003 nand_change_read_column_op(nand, 1004 mtd->writesize + oob_off, 1005 oob, ecc->bytes + 4, false); 1006 1007 ret = nand_check_erased_ecc_chunk(data, ecc->size, 1008 oob, ecc->bytes + 4, 1009 NULL, 0, 1010 ecc->strength); 1011 if (ret >= 0) 1012 raw_mode = 1; 1013 1014 sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); 1015 } 1016 } 1017 1018 if (oob_required) 1019 sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, 1020 NULL, !raw_mode, 1021 page); 1022 1023 return max_bitflips; 1024 } 1025 1026 static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, 1027 const u8 *data, int data_off, 1028 const u8 *oob, int oob_off, 1029 int *cur_off, bool bbm, 1030 int page) 1031 { 1032 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 1033 struct nand_ecc_ctrl *ecc = &nand->ecc; 1034 int ret; 1035 1036 if (data_off != *cur_off) 1037 nand_change_write_column_op(nand, data_off, NULL, 0, false); 1038 1039 sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page); 1040 1041 if (data_off + ecc->size != oob_off) 1042 nand_change_write_column_op(nand, oob_off, NULL, 0, false); 1043 1044 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 1045 if (ret) 1046 return ret; 1047 1048 sunxi_nfc_randomizer_enable(nand); 1049 sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); 1050 1051 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | 1052 NFC_ACCESS_DIR | NFC_ECC_OP, 1053 nfc->regs + NFC_REG_CMD); 1054 1055 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); 1056 sunxi_nfc_randomizer_disable(nand); 1057 if (ret) 1058 return ret; 1059 1060 *cur_off = oob_off + ecc->bytes + 4; 1061 1062 return 0; 1063 } 1064 1065 static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand, 1066 u8 *oob, int *cur_off, 1067 int page) 1068 { 1069 struct mtd_info *mtd = nand_to_mtd(nand); 1070 struct nand_ecc_ctrl *ecc = &nand->ecc; 1071 int offset = ((ecc->bytes + 4) * ecc->steps); 1072 int len = mtd->oobsize - offset; 1073 1074 if (len <= 0) 1075 return; 1076 1077 if (!cur_off || *cur_off != offset) 1078 nand_change_write_column_op(nand, offset + mtd->writesize, 1079 NULL, 0, false); 1080 1081 sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page); 1082 1083 if (cur_off) 1084 *cur_off = mtd->oobsize + mtd->writesize; 1085 } 1086 1087 static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf, 1088 int oob_required, int page) 1089 { 1090 struct mtd_info *mtd = nand_to_mtd(nand); 1091 struct nand_ecc_ctrl *ecc = &nand->ecc; 1092 unsigned int max_bitflips = 0; 1093 int ret, i, cur_off = 0; 1094 bool raw_mode = false; 1095 1096 sunxi_nfc_select_chip(nand, nand->cur_cs); 1097 1098 nand_read_page_op(nand, page, 0, NULL, 0); 1099 1100 sunxi_nfc_hw_ecc_enable(nand); 1101 1102 for (i = 0; i < ecc->steps; i++) { 1103 int data_off = i * ecc->size; 1104 int oob_off = i * (ecc->bytes + 4); 1105 u8 *data = buf + data_off; 1106 u8 *oob = nand->oob_poi + oob_off; 1107 1108 ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob, 1109 oob_off + mtd->writesize, 1110 &cur_off, &max_bitflips, 1111 !i, oob_required, page); 1112 if (ret < 0) 1113 return ret; 1114 else if (ret) 1115 raw_mode = true; 1116 } 1117 1118 if (oob_required) 1119 sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off, 1120 !raw_mode, page); 1121 1122 sunxi_nfc_hw_ecc_disable(nand); 1123 1124 return max_bitflips; 1125 } 1126 1127 static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf, 1128 int oob_required, int page) 1129 { 1130 int ret; 1131 1132 sunxi_nfc_select_chip(nand, nand->cur_cs); 1133 1134 nand_read_page_op(nand, page, 0, NULL, 0); 1135 1136 ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page, 1137 nand->ecc.steps); 1138 if (ret >= 0) 1139 return ret; 1140 1141 /* Fallback to PIO mode */ 1142 return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page); 1143 } 1144 1145 static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand, 1146 u32 data_offs, u32 readlen, 1147 u8 *bufpoi, int page) 1148 { 1149 struct mtd_info *mtd = nand_to_mtd(nand); 1150 struct nand_ecc_ctrl *ecc = &nand->ecc; 1151 int ret, i, cur_off = 0; 1152 unsigned int max_bitflips = 0; 1153 1154 sunxi_nfc_select_chip(nand, nand->cur_cs); 1155 1156 nand_read_page_op(nand, page, 0, NULL, 0); 1157 1158 sunxi_nfc_hw_ecc_enable(nand); 1159 1160 for (i = data_offs / ecc->size; 1161 i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { 1162 int data_off = i * ecc->size; 1163 int oob_off = i * (ecc->bytes + 4); 1164 u8 *data = bufpoi + data_off; 1165 u8 *oob = nand->oob_poi + oob_off; 1166 1167 ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, 1168 oob, 1169 oob_off + mtd->writesize, 1170 &cur_off, &max_bitflips, !i, 1171 false, page); 1172 if (ret < 0) 1173 return ret; 1174 } 1175 1176 sunxi_nfc_hw_ecc_disable(nand); 1177 1178 return max_bitflips; 1179 } 1180 1181 static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand, 1182 u32 data_offs, u32 readlen, 1183 u8 *buf, int page) 1184 { 1185 int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size); 1186 int ret; 1187 1188 sunxi_nfc_select_chip(nand, nand->cur_cs); 1189 1190 nand_read_page_op(nand, page, 0, NULL, 0); 1191 1192 ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks); 1193 if (ret >= 0) 1194 return ret; 1195 1196 /* Fallback to PIO mode */ 1197 return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen, 1198 buf, page); 1199 } 1200 1201 static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand, 1202 const uint8_t *buf, int oob_required, 1203 int page) 1204 { 1205 struct mtd_info *mtd = nand_to_mtd(nand); 1206 struct nand_ecc_ctrl *ecc = &nand->ecc; 1207 int ret, i, cur_off = 0; 1208 1209 sunxi_nfc_select_chip(nand, nand->cur_cs); 1210 1211 nand_prog_page_begin_op(nand, page, 0, NULL, 0); 1212 1213 sunxi_nfc_hw_ecc_enable(nand); 1214 1215 for (i = 0; i < ecc->steps; i++) { 1216 int data_off = i * ecc->size; 1217 int oob_off = i * (ecc->bytes + 4); 1218 const u8 *data = buf + data_off; 1219 const u8 *oob = nand->oob_poi + oob_off; 1220 1221 ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, 1222 oob_off + mtd->writesize, 1223 &cur_off, !i, page); 1224 if (ret) 1225 return ret; 1226 } 1227 1228 if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) 1229 sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, 1230 &cur_off, page); 1231 1232 sunxi_nfc_hw_ecc_disable(nand); 1233 1234 return nand_prog_page_end_op(nand); 1235 } 1236 1237 static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand, 1238 u32 data_offs, u32 data_len, 1239 const u8 *buf, int oob_required, 1240 int page) 1241 { 1242 struct mtd_info *mtd = nand_to_mtd(nand); 1243 struct nand_ecc_ctrl *ecc = &nand->ecc; 1244 int ret, i, cur_off = 0; 1245 1246 sunxi_nfc_select_chip(nand, nand->cur_cs); 1247 1248 nand_prog_page_begin_op(nand, page, 0, NULL, 0); 1249 1250 sunxi_nfc_hw_ecc_enable(nand); 1251 1252 for (i = data_offs / ecc->size; 1253 i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { 1254 int data_off = i * ecc->size; 1255 int oob_off = i * (ecc->bytes + 4); 1256 const u8 *data = buf + data_off; 1257 const u8 *oob = nand->oob_poi + oob_off; 1258 1259 ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, 1260 oob_off + mtd->writesize, 1261 &cur_off, !i, page); 1262 if (ret) 1263 return ret; 1264 } 1265 1266 sunxi_nfc_hw_ecc_disable(nand); 1267 1268 return nand_prog_page_end_op(nand); 1269 } 1270 1271 static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, 1272 const u8 *buf, 1273 int oob_required, 1274 int page) 1275 { 1276 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 1277 struct nand_ecc_ctrl *ecc = &nand->ecc; 1278 struct scatterlist sg; 1279 int ret, i; 1280 1281 sunxi_nfc_select_chip(nand, nand->cur_cs); 1282 1283 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 1284 if (ret) 1285 return ret; 1286 1287 ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps, 1288 DMA_TO_DEVICE, &sg); 1289 if (ret) 1290 goto pio_fallback; 1291 1292 for (i = 0; i < ecc->steps; i++) { 1293 const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); 1294 1295 sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page); 1296 } 1297 1298 nand_prog_page_begin_op(nand, page, 0, NULL, 0); 1299 1300 sunxi_nfc_hw_ecc_enable(nand); 1301 sunxi_nfc_randomizer_config(nand, page, false); 1302 sunxi_nfc_randomizer_enable(nand); 1303 1304 writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, 1305 nfc->regs + NFC_REG_WCMD_SET); 1306 1307 dma_async_issue_pending(nfc->dmac); 1308 1309 writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | 1310 NFC_DATA_TRANS | NFC_ACCESS_DIR, 1311 nfc->regs + NFC_REG_CMD); 1312 1313 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); 1314 if (ret) 1315 dmaengine_terminate_all(nfc->dmac); 1316 1317 sunxi_nfc_randomizer_disable(nand); 1318 sunxi_nfc_hw_ecc_disable(nand); 1319 1320 sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg); 1321 1322 if (ret) 1323 return ret; 1324 1325 if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) 1326 /* TODO: use DMA to transfer extra OOB bytes ? */ 1327 sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, 1328 NULL, page); 1329 1330 return nand_prog_page_end_op(nand); 1331 1332 pio_fallback: 1333 return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page); 1334 } 1335 1336 static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page) 1337 { 1338 u8 *buf = nand_get_data_buf(nand); 1339 1340 return nand->ecc.read_page(nand, buf, 1, page); 1341 } 1342 1343 static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page) 1344 { 1345 struct mtd_info *mtd = nand_to_mtd(nand); 1346 u8 *buf = nand_get_data_buf(nand); 1347 int ret; 1348 1349 memset(buf, 0xff, mtd->writesize); 1350 ret = nand->ecc.write_page(nand, buf, 1, page); 1351 if (ret) 1352 return ret; 1353 1354 /* Send command to program the OOB data */ 1355 return nand_prog_page_end_op(nand); 1356 } 1357 1358 static const s32 tWB_lut[] = {6, 12, 16, 20}; 1359 static const s32 tRHW_lut[] = {4, 8, 12, 20}; 1360 1361 static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, 1362 u32 clk_period) 1363 { 1364 u32 clk_cycles = DIV_ROUND_UP(duration, clk_period); 1365 int i; 1366 1367 for (i = 0; i < lut_size; i++) { 1368 if (clk_cycles <= lut[i]) 1369 return i; 1370 } 1371 1372 /* Doesn't fit */ 1373 return -EINVAL; 1374 } 1375 1376 #define sunxi_nand_lookup_timing(l, p, c) \ 1377 _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) 1378 1379 static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline, 1380 const struct nand_interface_config *conf) 1381 { 1382 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 1383 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 1384 const struct nand_sdr_timings *timings; 1385 u32 min_clk_period = 0; 1386 s32 tWB, tADL, tWHR, tRHW, tCAD; 1387 long real_clk_rate; 1388 1389 timings = nand_get_sdr_timings(conf); 1390 if (IS_ERR(timings)) 1391 return -ENOTSUPP; 1392 1393 /* T1 <=> tCLS */ 1394 if (timings->tCLS_min > min_clk_period) 1395 min_clk_period = timings->tCLS_min; 1396 1397 /* T2 <=> tCLH */ 1398 if (timings->tCLH_min > min_clk_period) 1399 min_clk_period = timings->tCLH_min; 1400 1401 /* T3 <=> tCS */ 1402 if (timings->tCS_min > min_clk_period) 1403 min_clk_period = timings->tCS_min; 1404 1405 /* T4 <=> tCH */ 1406 if (timings->tCH_min > min_clk_period) 1407 min_clk_period = timings->tCH_min; 1408 1409 /* T5 <=> tWP */ 1410 if (timings->tWP_min > min_clk_period) 1411 min_clk_period = timings->tWP_min; 1412 1413 /* T6 <=> tWH */ 1414 if (timings->tWH_min > min_clk_period) 1415 min_clk_period = timings->tWH_min; 1416 1417 /* T7 <=> tALS */ 1418 if (timings->tALS_min > min_clk_period) 1419 min_clk_period = timings->tALS_min; 1420 1421 /* T8 <=> tDS */ 1422 if (timings->tDS_min > min_clk_period) 1423 min_clk_period = timings->tDS_min; 1424 1425 /* T9 <=> tDH */ 1426 if (timings->tDH_min > min_clk_period) 1427 min_clk_period = timings->tDH_min; 1428 1429 /* T10 <=> tRR */ 1430 if (timings->tRR_min > (min_clk_period * 3)) 1431 min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3); 1432 1433 /* T11 <=> tALH */ 1434 if (timings->tALH_min > min_clk_period) 1435 min_clk_period = timings->tALH_min; 1436 1437 /* T12 <=> tRP */ 1438 if (timings->tRP_min > min_clk_period) 1439 min_clk_period = timings->tRP_min; 1440 1441 /* T13 <=> tREH */ 1442 if (timings->tREH_min > min_clk_period) 1443 min_clk_period = timings->tREH_min; 1444 1445 /* T14 <=> tRC */ 1446 if (timings->tRC_min > (min_clk_period * 2)) 1447 min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2); 1448 1449 /* T15 <=> tWC */ 1450 if (timings->tWC_min > (min_clk_period * 2)) 1451 min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2); 1452 1453 /* T16 - T19 + tCAD */ 1454 if (timings->tWB_max > (min_clk_period * 20)) 1455 min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20); 1456 1457 if (timings->tADL_min > (min_clk_period * 32)) 1458 min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32); 1459 1460 if (timings->tWHR_min > (min_clk_period * 32)) 1461 min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32); 1462 1463 if (timings->tRHW_min > (min_clk_period * 20)) 1464 min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20); 1465 1466 /* 1467 * In non-EDO, tREA should be less than tRP to guarantee that the 1468 * controller does not sample the IO lines too early. Unfortunately, 1469 * the sunxi NAND controller does not allow us to have different 1470 * values for tRP and tREH (tRP = tREH = tRW / 2). 1471 * 1472 * We have 2 options to overcome this limitation: 1473 * 1474 * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint 1475 * 2/ Use EDO mode (only works if timings->tRLOH > 0) 1476 */ 1477 if (timings->tREA_max > min_clk_period && !timings->tRLOH_min) 1478 min_clk_period = timings->tREA_max; 1479 1480 tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, 1481 min_clk_period); 1482 if (tWB < 0) { 1483 dev_err(nfc->dev, "unsupported tWB\n"); 1484 return tWB; 1485 } 1486 1487 tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3; 1488 if (tADL > 3) { 1489 dev_err(nfc->dev, "unsupported tADL\n"); 1490 return -EINVAL; 1491 } 1492 1493 tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3; 1494 if (tWHR > 3) { 1495 dev_err(nfc->dev, "unsupported tWHR\n"); 1496 return -EINVAL; 1497 } 1498 1499 tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min, 1500 min_clk_period); 1501 if (tRHW < 0) { 1502 dev_err(nfc->dev, "unsupported tRHW\n"); 1503 return tRHW; 1504 } 1505 1506 if (csline == NAND_DATA_IFACE_CHECK_ONLY) 1507 return 0; 1508 1509 /* 1510 * TODO: according to ONFI specs this value only applies for DDR NAND, 1511 * but Allwinner seems to set this to 0x7. Mimic them for now. 1512 */ 1513 tCAD = 0x7; 1514 1515 /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ 1516 sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); 1517 1518 /* Convert min_clk_period from picoseconds to nanoseconds */ 1519 min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); 1520 1521 /* 1522 * Unlike what is stated in Allwinner datasheet, the clk_rate should 1523 * be set to (1 / min_clk_period), and not (2 / min_clk_period). 1524 * This new formula was verified with a scope and validated by 1525 * Allwinner engineers. 1526 */ 1527 sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period; 1528 real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate); 1529 if (real_clk_rate <= 0) { 1530 dev_err(nfc->dev, "Unable to round clk %lu\n", 1531 sunxi_nand->clk_rate); 1532 return -EINVAL; 1533 } 1534 1535 sunxi_nand->timing_ctl = 0; 1536 1537 /* 1538 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data 1539 * output cycle timings shall be used if the host drives tRC less than 1540 * 30 ns. We should also use EDO mode if tREA is bigger than tRP. 1541 */ 1542 min_clk_period = NSEC_PER_SEC / real_clk_rate; 1543 if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max) 1544 sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO; 1545 1546 return 0; 1547 } 1548 1549 static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 1550 struct mtd_oob_region *oobregion) 1551 { 1552 struct nand_chip *nand = mtd_to_nand(mtd); 1553 struct nand_ecc_ctrl *ecc = &nand->ecc; 1554 1555 if (section >= ecc->steps) 1556 return -ERANGE; 1557 1558 oobregion->offset = section * (ecc->bytes + 4) + 4; 1559 oobregion->length = ecc->bytes; 1560 1561 return 0; 1562 } 1563 1564 static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section, 1565 struct mtd_oob_region *oobregion) 1566 { 1567 struct nand_chip *nand = mtd_to_nand(mtd); 1568 struct nand_ecc_ctrl *ecc = &nand->ecc; 1569 1570 if (section > ecc->steps) 1571 return -ERANGE; 1572 1573 /* 1574 * The first 2 bytes are used for BB markers, hence we 1575 * only have 2 bytes available in the first user data 1576 * section. 1577 */ 1578 if (!section && ecc->mode == NAND_ECC_HW) { 1579 oobregion->offset = 2; 1580 oobregion->length = 2; 1581 1582 return 0; 1583 } 1584 1585 oobregion->offset = section * (ecc->bytes + 4); 1586 1587 if (section < ecc->steps) 1588 oobregion->length = 4; 1589 else 1590 oobregion->offset = mtd->oobsize - oobregion->offset; 1591 1592 return 0; 1593 } 1594 1595 static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { 1596 .ecc = sunxi_nand_ooblayout_ecc, 1597 .free = sunxi_nand_ooblayout_free, 1598 }; 1599 1600 static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) 1601 { 1602 kfree(ecc->priv); 1603 } 1604 1605 static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, 1606 struct nand_ecc_ctrl *ecc, 1607 struct device_node *np) 1608 { 1609 static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; 1610 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 1611 struct mtd_info *mtd = nand_to_mtd(nand); 1612 struct sunxi_nand_hw_ecc *data; 1613 int nsectors; 1614 int ret; 1615 int i; 1616 1617 if (ecc->options & NAND_ECC_MAXIMIZE) { 1618 int bytes; 1619 1620 ecc->size = 1024; 1621 nsectors = mtd->writesize / ecc->size; 1622 1623 /* Reserve 2 bytes for the BBM */ 1624 bytes = (mtd->oobsize - 2) / nsectors; 1625 1626 /* 4 non-ECC bytes are added before each ECC bytes section */ 1627 bytes -= 4; 1628 1629 /* and bytes has to be even. */ 1630 if (bytes % 2) 1631 bytes--; 1632 1633 ecc->strength = bytes * 8 / fls(8 * ecc->size); 1634 1635 for (i = 0; i < ARRAY_SIZE(strengths); i++) { 1636 if (strengths[i] > ecc->strength) 1637 break; 1638 } 1639 1640 if (!i) 1641 ecc->strength = 0; 1642 else 1643 ecc->strength = strengths[i - 1]; 1644 } 1645 1646 if (ecc->size != 512 && ecc->size != 1024) 1647 return -EINVAL; 1648 1649 data = kzalloc(sizeof(*data), GFP_KERNEL); 1650 if (!data) 1651 return -ENOMEM; 1652 1653 /* Prefer 1k ECC chunk over 512 ones */ 1654 if (ecc->size == 512 && mtd->writesize > 512) { 1655 ecc->size = 1024; 1656 ecc->strength *= 2; 1657 } 1658 1659 /* Add ECC info retrieval from DT */ 1660 for (i = 0; i < ARRAY_SIZE(strengths); i++) { 1661 if (ecc->strength <= strengths[i]) { 1662 /* 1663 * Update ecc->strength value with the actual strength 1664 * that will be used by the ECC engine. 1665 */ 1666 ecc->strength = strengths[i]; 1667 break; 1668 } 1669 } 1670 1671 if (i >= ARRAY_SIZE(strengths)) { 1672 dev_err(nfc->dev, "unsupported strength\n"); 1673 ret = -ENOTSUPP; 1674 goto err; 1675 } 1676 1677 data->mode = i; 1678 1679 /* HW ECC always request ECC bytes for 1024 bytes blocks */ 1680 ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8); 1681 1682 /* HW ECC always work with even numbers of ECC bytes */ 1683 ecc->bytes = ALIGN(ecc->bytes, 2); 1684 1685 nsectors = mtd->writesize / ecc->size; 1686 1687 if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) { 1688 ret = -EINVAL; 1689 goto err; 1690 } 1691 1692 ecc->read_oob = sunxi_nfc_hw_ecc_read_oob; 1693 ecc->write_oob = sunxi_nfc_hw_ecc_write_oob; 1694 mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops); 1695 ecc->priv = data; 1696 1697 if (nfc->dmac) { 1698 ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; 1699 ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; 1700 ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; 1701 nand->options |= NAND_USES_DMA; 1702 } else { 1703 ecc->read_page = sunxi_nfc_hw_ecc_read_page; 1704 ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; 1705 ecc->write_page = sunxi_nfc_hw_ecc_write_page; 1706 } 1707 1708 /* TODO: support DMA for raw accesses and subpage write */ 1709 ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; 1710 ecc->read_oob_raw = nand_read_oob_std; 1711 ecc->write_oob_raw = nand_write_oob_std; 1712 1713 return 0; 1714 1715 err: 1716 kfree(data); 1717 1718 return ret; 1719 } 1720 1721 static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) 1722 { 1723 switch (ecc->mode) { 1724 case NAND_ECC_HW: 1725 sunxi_nand_hw_ecc_ctrl_cleanup(ecc); 1726 break; 1727 case NAND_ECC_NONE: 1728 default: 1729 break; 1730 } 1731 } 1732 1733 static int sunxi_nand_attach_chip(struct nand_chip *nand) 1734 { 1735 struct nand_ecc_ctrl *ecc = &nand->ecc; 1736 struct device_node *np = nand_get_flash_node(nand); 1737 int ret; 1738 1739 if (nand->bbt_options & NAND_BBT_USE_FLASH) 1740 nand->bbt_options |= NAND_BBT_NO_OOB; 1741 1742 if (nand->options & NAND_NEED_SCRAMBLING) 1743 nand->options |= NAND_NO_SUBPAGE_WRITE; 1744 1745 nand->options |= NAND_SUBPAGE_READ; 1746 1747 if (!ecc->size) { 1748 ecc->size = nand->base.eccreq.step_size; 1749 ecc->strength = nand->base.eccreq.strength; 1750 } 1751 1752 if (!ecc->size || !ecc->strength) 1753 return -EINVAL; 1754 1755 switch (ecc->mode) { 1756 case NAND_ECC_HW: 1757 ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np); 1758 if (ret) 1759 return ret; 1760 break; 1761 case NAND_ECC_NONE: 1762 case NAND_ECC_SOFT: 1763 break; 1764 default: 1765 return -EINVAL; 1766 } 1767 1768 return 0; 1769 } 1770 1771 static int sunxi_nfc_exec_subop(struct nand_chip *nand, 1772 const struct nand_subop *subop) 1773 { 1774 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 1775 u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { }; 1776 unsigned int i, j, remaining, start; 1777 void *inbuf = NULL; 1778 int ret; 1779 1780 for (i = 0; i < subop->ninstrs; i++) { 1781 const struct nand_op_instr *instr = &subop->instrs[i]; 1782 1783 switch (instr->type) { 1784 case NAND_OP_CMD_INSTR: 1785 if (cmd & NFC_SEND_CMD1) { 1786 if (WARN_ON(cmd & NFC_SEND_CMD2)) 1787 return -EINVAL; 1788 1789 cmd |= NFC_SEND_CMD2; 1790 extcmd |= instr->ctx.cmd.opcode; 1791 } else { 1792 cmd |= NFC_SEND_CMD1 | 1793 NFC_CMD(instr->ctx.cmd.opcode); 1794 } 1795 break; 1796 1797 case NAND_OP_ADDR_INSTR: 1798 remaining = nand_subop_get_num_addr_cyc(subop, i); 1799 start = nand_subop_get_addr_start_off(subop, i); 1800 for (j = 0; j < 8 && j + start < remaining; j++) { 1801 u32 addr = instr->ctx.addr.addrs[j + start]; 1802 1803 addrs[j / 4] |= addr << (j % 4) * 8; 1804 } 1805 1806 if (j) 1807 cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j); 1808 1809 break; 1810 1811 case NAND_OP_DATA_IN_INSTR: 1812 case NAND_OP_DATA_OUT_INSTR: 1813 start = nand_subop_get_data_start_off(subop, i); 1814 remaining = nand_subop_get_data_len(subop, i); 1815 cnt = min_t(u32, remaining, NFC_SRAM_SIZE); 1816 cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; 1817 1818 if (instr->type == NAND_OP_DATA_OUT_INSTR) { 1819 cmd |= NFC_ACCESS_DIR; 1820 memcpy_toio(nfc->regs + NFC_RAM0_BASE, 1821 instr->ctx.data.buf.out + start, 1822 cnt); 1823 } else { 1824 inbuf = instr->ctx.data.buf.in + start; 1825 } 1826 1827 break; 1828 1829 case NAND_OP_WAITRDY_INSTR: 1830 cmd |= NFC_WAIT_FLAG; 1831 break; 1832 } 1833 } 1834 1835 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 1836 if (ret) 1837 return ret; 1838 1839 if (cmd & NFC_SEND_ADR) { 1840 writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW); 1841 writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH); 1842 } 1843 1844 if (cmd & NFC_SEND_CMD2) 1845 writel(extcmd, 1846 nfc->regs + 1847 (cmd & NFC_ACCESS_DIR ? 1848 NFC_REG_WCMD_SET : NFC_REG_RCMD_SET)); 1849 1850 if (cmd & NFC_DATA_TRANS) 1851 writel(cnt, nfc->regs + NFC_REG_CNT); 1852 1853 writel(cmd, nfc->regs + NFC_REG_CMD); 1854 1855 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, 1856 !(cmd & NFC_WAIT_FLAG) && cnt < 64, 1857 0); 1858 if (ret) 1859 return ret; 1860 1861 if (inbuf) 1862 memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt); 1863 1864 return 0; 1865 } 1866 1867 static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand, 1868 const struct nand_subop *subop) 1869 { 1870 return nand_soft_waitrdy(nand, 1871 subop->instrs[0].ctx.waitrdy.timeout_ms); 1872 } 1873 1874 static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER( 1875 NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, 1876 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1877 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 1878 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1879 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 1880 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), 1881 NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, 1882 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1883 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 1884 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), 1885 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1886 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 1887 ); 1888 1889 static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER( 1890 NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, 1891 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1892 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 1893 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1894 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), 1895 NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, 1896 NAND_OP_PARSER_PAT_CMD_ELEM(true), 1897 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 1898 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), 1899 NAND_OP_PARSER_PAT_CMD_ELEM(true)), 1900 NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy, 1901 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 1902 ); 1903 1904 static int sunxi_nfc_exec_op(struct nand_chip *nand, 1905 const struct nand_operation *op, bool check_only) 1906 { 1907 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 1908 const struct nand_op_parser *parser; 1909 1910 if (!check_only) 1911 sunxi_nfc_select_chip(nand, op->cs); 1912 1913 if (sunxi_nand->sels[op->cs].rb >= 0) 1914 parser = &sunxi_nfc_op_parser; 1915 else 1916 parser = &sunxi_nfc_norb_op_parser; 1917 1918 return nand_op_parser_exec_op(nand, parser, op, check_only); 1919 } 1920 1921 static const struct nand_controller_ops sunxi_nand_controller_ops = { 1922 .attach_chip = sunxi_nand_attach_chip, 1923 .setup_interface = sunxi_nfc_setup_interface, 1924 .exec_op = sunxi_nfc_exec_op, 1925 }; 1926 1927 static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, 1928 struct device_node *np) 1929 { 1930 struct sunxi_nand_chip *sunxi_nand; 1931 struct mtd_info *mtd; 1932 struct nand_chip *nand; 1933 int nsels; 1934 int ret; 1935 int i; 1936 u32 tmp; 1937 1938 if (!of_get_property(np, "reg", &nsels)) 1939 return -EINVAL; 1940 1941 nsels /= sizeof(u32); 1942 if (!nsels) { 1943 dev_err(dev, "invalid reg property size\n"); 1944 return -EINVAL; 1945 } 1946 1947 sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels), 1948 GFP_KERNEL); 1949 if (!sunxi_nand) { 1950 dev_err(dev, "could not allocate chip\n"); 1951 return -ENOMEM; 1952 } 1953 1954 sunxi_nand->nsels = nsels; 1955 1956 for (i = 0; i < nsels; i++) { 1957 ret = of_property_read_u32_index(np, "reg", i, &tmp); 1958 if (ret) { 1959 dev_err(dev, "could not retrieve reg property: %d\n", 1960 ret); 1961 return ret; 1962 } 1963 1964 if (tmp > NFC_MAX_CS) { 1965 dev_err(dev, 1966 "invalid reg value: %u (max CS = 7)\n", 1967 tmp); 1968 return -EINVAL; 1969 } 1970 1971 if (test_and_set_bit(tmp, &nfc->assigned_cs)) { 1972 dev_err(dev, "CS %d already assigned\n", tmp); 1973 return -EINVAL; 1974 } 1975 1976 sunxi_nand->sels[i].cs = tmp; 1977 1978 if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) && 1979 tmp < 2) 1980 sunxi_nand->sels[i].rb = tmp; 1981 else 1982 sunxi_nand->sels[i].rb = -1; 1983 } 1984 1985 nand = &sunxi_nand->nand; 1986 /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ 1987 nand->controller = &nfc->controller; 1988 nand->controller->ops = &sunxi_nand_controller_ops; 1989 1990 /* 1991 * Set the ECC mode to the default value in case nothing is specified 1992 * in the DT. 1993 */ 1994 nand->ecc.mode = NAND_ECC_HW; 1995 nand_set_flash_node(nand, np); 1996 1997 mtd = nand_to_mtd(nand); 1998 mtd->dev.parent = dev; 1999 2000 ret = nand_scan(nand, nsels); 2001 if (ret) 2002 return ret; 2003 2004 ret = mtd_device_register(mtd, NULL, 0); 2005 if (ret) { 2006 dev_err(dev, "failed to register mtd device: %d\n", ret); 2007 nand_cleanup(nand); 2008 return ret; 2009 } 2010 2011 list_add_tail(&sunxi_nand->node, &nfc->chips); 2012 2013 return 0; 2014 } 2015 2016 static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) 2017 { 2018 struct device_node *np = dev->of_node; 2019 struct device_node *nand_np; 2020 int nchips = of_get_child_count(np); 2021 int ret; 2022 2023 if (nchips > 8) { 2024 dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips); 2025 return -EINVAL; 2026 } 2027 2028 for_each_child_of_node(np, nand_np) { 2029 ret = sunxi_nand_chip_init(dev, nfc, nand_np); 2030 if (ret) { 2031 of_node_put(nand_np); 2032 return ret; 2033 } 2034 } 2035 2036 return 0; 2037 } 2038 2039 static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) 2040 { 2041 struct sunxi_nand_chip *sunxi_nand; 2042 struct nand_chip *chip; 2043 int ret; 2044 2045 while (!list_empty(&nfc->chips)) { 2046 sunxi_nand = list_first_entry(&nfc->chips, 2047 struct sunxi_nand_chip, 2048 node); 2049 chip = &sunxi_nand->nand; 2050 ret = mtd_device_unregister(nand_to_mtd(chip)); 2051 WARN_ON(ret); 2052 nand_cleanup(chip); 2053 sunxi_nand_ecc_cleanup(&chip->ecc); 2054 list_del(&sunxi_nand->node); 2055 } 2056 } 2057 2058 static int sunxi_nfc_probe(struct platform_device *pdev) 2059 { 2060 struct device *dev = &pdev->dev; 2061 struct resource *r; 2062 struct sunxi_nfc *nfc; 2063 int irq; 2064 int ret; 2065 2066 nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); 2067 if (!nfc) 2068 return -ENOMEM; 2069 2070 nfc->dev = dev; 2071 nand_controller_init(&nfc->controller); 2072 INIT_LIST_HEAD(&nfc->chips); 2073 2074 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2075 nfc->regs = devm_ioremap_resource(dev, r); 2076 if (IS_ERR(nfc->regs)) 2077 return PTR_ERR(nfc->regs); 2078 2079 irq = platform_get_irq(pdev, 0); 2080 if (irq < 0) 2081 return irq; 2082 2083 nfc->ahb_clk = devm_clk_get(dev, "ahb"); 2084 if (IS_ERR(nfc->ahb_clk)) { 2085 dev_err(dev, "failed to retrieve ahb clk\n"); 2086 return PTR_ERR(nfc->ahb_clk); 2087 } 2088 2089 ret = clk_prepare_enable(nfc->ahb_clk); 2090 if (ret) 2091 return ret; 2092 2093 nfc->mod_clk = devm_clk_get(dev, "mod"); 2094 if (IS_ERR(nfc->mod_clk)) { 2095 dev_err(dev, "failed to retrieve mod clk\n"); 2096 ret = PTR_ERR(nfc->mod_clk); 2097 goto out_ahb_clk_unprepare; 2098 } 2099 2100 ret = clk_prepare_enable(nfc->mod_clk); 2101 if (ret) 2102 goto out_ahb_clk_unprepare; 2103 2104 nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb"); 2105 if (IS_ERR(nfc->reset)) { 2106 ret = PTR_ERR(nfc->reset); 2107 goto out_mod_clk_unprepare; 2108 } 2109 2110 ret = reset_control_deassert(nfc->reset); 2111 if (ret) { 2112 dev_err(dev, "reset err %d\n", ret); 2113 goto out_mod_clk_unprepare; 2114 } 2115 2116 nfc->caps = of_device_get_match_data(&pdev->dev); 2117 if (!nfc->caps) { 2118 ret = -EINVAL; 2119 goto out_ahb_reset_reassert; 2120 } 2121 2122 ret = sunxi_nfc_rst(nfc); 2123 if (ret) 2124 goto out_ahb_reset_reassert; 2125 2126 writel(0, nfc->regs + NFC_REG_INT); 2127 ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, 2128 0, "sunxi-nand", nfc); 2129 if (ret) 2130 goto out_ahb_reset_reassert; 2131 2132 nfc->dmac = dma_request_chan(dev, "rxtx"); 2133 if (IS_ERR(nfc->dmac)) { 2134 ret = PTR_ERR(nfc->dmac); 2135 if (ret == -EPROBE_DEFER) 2136 goto out_ahb_reset_reassert; 2137 2138 /* Ignore errors to fall back to PIO mode */ 2139 dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret); 2140 nfc->dmac = NULL; 2141 } else { 2142 struct dma_slave_config dmac_cfg = { }; 2143 2144 dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; 2145 dmac_cfg.dst_addr = dmac_cfg.src_addr; 2146 dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 2147 dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; 2148 dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; 2149 dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; 2150 dmaengine_slave_config(nfc->dmac, &dmac_cfg); 2151 2152 if (nfc->caps->extra_mbus_conf) 2153 writel(readl(nfc->regs + NFC_REG_CTL) | 2154 NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL); 2155 } 2156 2157 platform_set_drvdata(pdev, nfc); 2158 2159 ret = sunxi_nand_chips_init(dev, nfc); 2160 if (ret) { 2161 dev_err(dev, "failed to init nand chips\n"); 2162 goto out_release_dmac; 2163 } 2164 2165 return 0; 2166 2167 out_release_dmac: 2168 if (nfc->dmac) 2169 dma_release_channel(nfc->dmac); 2170 out_ahb_reset_reassert: 2171 reset_control_assert(nfc->reset); 2172 out_mod_clk_unprepare: 2173 clk_disable_unprepare(nfc->mod_clk); 2174 out_ahb_clk_unprepare: 2175 clk_disable_unprepare(nfc->ahb_clk); 2176 2177 return ret; 2178 } 2179 2180 static int sunxi_nfc_remove(struct platform_device *pdev) 2181 { 2182 struct sunxi_nfc *nfc = platform_get_drvdata(pdev); 2183 2184 sunxi_nand_chips_cleanup(nfc); 2185 2186 reset_control_assert(nfc->reset); 2187 2188 if (nfc->dmac) 2189 dma_release_channel(nfc->dmac); 2190 clk_disable_unprepare(nfc->mod_clk); 2191 clk_disable_unprepare(nfc->ahb_clk); 2192 2193 return 0; 2194 } 2195 2196 static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = { 2197 .reg_io_data = NFC_REG_A10_IO_DATA, 2198 .dma_maxburst = 4, 2199 }; 2200 2201 static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = { 2202 .extra_mbus_conf = true, 2203 .reg_io_data = NFC_REG_A23_IO_DATA, 2204 .dma_maxburst = 8, 2205 }; 2206 2207 static const struct of_device_id sunxi_nfc_ids[] = { 2208 { 2209 .compatible = "allwinner,sun4i-a10-nand", 2210 .data = &sunxi_nfc_a10_caps, 2211 }, 2212 { 2213 .compatible = "allwinner,sun8i-a23-nand-controller", 2214 .data = &sunxi_nfc_a23_caps, 2215 }, 2216 { /* sentinel */ } 2217 }; 2218 MODULE_DEVICE_TABLE(of, sunxi_nfc_ids); 2219 2220 static struct platform_driver sunxi_nfc_driver = { 2221 .driver = { 2222 .name = "sunxi_nand", 2223 .of_match_table = sunxi_nfc_ids, 2224 }, 2225 .probe = sunxi_nfc_probe, 2226 .remove = sunxi_nfc_remove, 2227 }; 2228 module_platform_driver(sunxi_nfc_driver); 2229 2230 MODULE_LICENSE("GPL"); 2231 MODULE_AUTHOR("Boris BREZILLON"); 2232 MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver"); 2233 MODULE_ALIAS("platform:sunxi_nand"); 2234