1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Driver for the SPI-NAND mode of Mediatek NAND Flash Interface 4 // 5 // Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com> 6 // 7 // This driver is based on the SPI-NAND mtd driver from Mediatek SDK: 8 // 9 // Copyright (C) 2020 MediaTek Inc. 10 // Author: Weijie Gao <weijie.gao@mediatek.com> 11 // 12 // This controller organize the page data as several interleaved sectors 13 // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) 14 // +---------+------+------+---------+------+------+-----+ 15 // | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | 16 // +---------+------+------+---------+------+------+-----+ 17 // With auto-format turned on, DMA only returns this part: 18 // +---------+---------+-----+ 19 // | Sector1 | Sector2 | ... | 20 // +---------+---------+-----+ 21 // The FDM data will be filled to the registers, and ECC parity data isn't 22 // accessible. 23 // With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA 24 // in it's original order shown in the first table. ECC can't be turned on when 25 // auto-format is off. 26 // 27 // However, Linux SPI-NAND driver expects the data returned as: 28 // +------+-----+ 29 // | Page | OOB | 30 // +------+-----+ 31 // where the page data is continuously stored instead of interleaved. 32 // So we assume all instructions matching the page_op template between ECC 33 // prepare_io_req and finish_io_req are for page cache r/w. 34 // Here's how this spi-mem driver operates when reading: 35 // 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). 36 // 2. Perform page ops and let the controller fill the DMA bounce buffer with 37 // de-interleaved sector data and set FDM registers. 38 // 3. Return the data as: 39 // +---------+---------+-----+------+------+-----+ 40 // | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | 41 // +---------+---------+-----+------+------+-----+ 42 // 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, 43 // read the data with auto-format off into the bounce buffer and copy 44 // needed data to the buffer specified in the request. 45 // 46 // Write requests operates in a similar manner. 47 // As a limitation of this strategy, we won't be able to access any ECC parity 48 // data at all in Linux. 49 // 50 // Here's the bad block mark situation on MTK chips: 51 // In older chips like mt7622, MTK uses the first FDM byte in the first sector 52 // as the bad block mark. After de-interleaving, this byte appears at [pagesize] 53 // in the returned data, which is the BBM position expected by kernel. However, 54 // the conventional bad block mark is the first byte of the OOB, which is part 55 // of the last sector data in the interleaved layout. Instead of fixing their 56 // hardware, MTK decided to address this inconsistency in software. On these 57 // later chips, the BootROM expects the following: 58 // 1. The [pagesize] byte on a nand page is used as BBM, which will appear at 59 // (page_size - (nsectors - 1) * spare_size) in the DMA buffer. 60 // 2. The original byte stored at that position in the DMA buffer will be stored 61 // as the first byte of the FDM section in the last sector. 62 // We can't disagree with the BootROM, so after de-interleaving, we need to 63 // perform the following swaps in read: 64 // 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], 65 // which is the expected BBM position by kernel. 66 // 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to 67 // [page_size - (nsectors - 1) * spare_size] 68 // Similarly, when writing, we need to perform swaps in the other direction. 69 70 #include <linux/kernel.h> 71 #include <linux/module.h> 72 #include <linux/init.h> 73 #include <linux/device.h> 74 #include <linux/mutex.h> 75 #include <linux/clk.h> 76 #include <linux/interrupt.h> 77 #include <linux/dma-mapping.h> 78 #include <linux/iopoll.h> 79 #include <linux/of_platform.h> 80 #include <linux/mtd/nand-ecc-mtk.h> 81 #include <linux/spi/spi.h> 82 #include <linux/spi/spi-mem.h> 83 #include <linux/mtd/nand.h> 84 85 // NFI registers 86 #define NFI_CNFG 0x000 87 #define CNFG_OP_MODE_S 12 88 #define CNFG_OP_MODE_CUST 6 89 #define CNFG_OP_MODE_PROGRAM 3 90 #define CNFG_AUTO_FMT_EN BIT(9) 91 #define CNFG_HW_ECC_EN BIT(8) 92 #define CNFG_DMA_BURST_EN BIT(2) 93 #define CNFG_READ_MODE BIT(1) 94 #define CNFG_DMA_MODE BIT(0) 95 96 #define NFI_PAGEFMT 0x0004 97 #define NFI_SPARE_SIZE_LS_S 16 98 #define NFI_FDM_ECC_NUM_S 12 99 #define NFI_FDM_NUM_S 8 100 #define NFI_SPARE_SIZE_S 4 101 #define NFI_SEC_SEL_512 BIT(2) 102 #define NFI_PAGE_SIZE_S 0 103 #define NFI_PAGE_SIZE_512_2K 0 104 #define NFI_PAGE_SIZE_2K_4K 1 105 #define NFI_PAGE_SIZE_4K_8K 2 106 #define NFI_PAGE_SIZE_8K_16K 3 107 108 #define NFI_CON 0x008 109 #define CON_SEC_NUM_S 12 110 #define CON_BWR BIT(9) 111 #define CON_BRD BIT(8) 112 #define CON_NFI_RST BIT(1) 113 #define CON_FIFO_FLUSH BIT(0) 114 115 #define NFI_INTR_EN 0x010 116 #define NFI_INTR_STA 0x014 117 #define NFI_IRQ_INTR_EN BIT(31) 118 #define NFI_IRQ_CUS_READ BIT(8) 119 #define NFI_IRQ_CUS_PG BIT(7) 120 121 #define NFI_CMD 0x020 122 #define NFI_CMD_DUMMY_READ 0x00 123 #define NFI_CMD_DUMMY_WRITE 0x80 124 125 #define NFI_STRDATA 0x040 126 #define STR_DATA BIT(0) 127 128 #define NFI_STA 0x060 129 #define NFI_NAND_FSM_7622 GENMASK(28, 24) 130 #define NFI_NAND_FSM_7986 GENMASK(29, 23) 131 #define NFI_FSM GENMASK(19, 16) 132 #define READ_EMPTY BIT(12) 133 134 #define NFI_FIFOSTA 0x064 135 #define FIFO_WR_REMAIN_S 8 136 #define FIFO_RD_REMAIN_S 0 137 138 #define NFI_ADDRCNTR 0x070 139 #define SEC_CNTR GENMASK(16, 12) 140 #define SEC_CNTR_S 12 141 #define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) 142 143 #define NFI_STRADDR 0x080 144 145 #define NFI_BYTELEN 0x084 146 #define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) 147 148 #define NFI_FDM0L 0x0a0 149 #define NFI_FDM0M 0x0a4 150 #define NFI_FDML(n) (NFI_FDM0L + (n)*8) 151 #define NFI_FDMM(n) (NFI_FDM0M + (n)*8) 152 153 #define NFI_DEBUG_CON1 0x220 154 #define WBUF_EN BIT(2) 155 156 #define NFI_MASTERSTA 0x224 157 #define MAS_ADDR GENMASK(11, 9) 158 #define MAS_RD GENMASK(8, 6) 159 #define MAS_WR GENMASK(5, 3) 160 #define MAS_RDDLY GENMASK(2, 0) 161 #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) 162 #define NFI_MASTERSTA_MASK_7986 3 163 164 // SNFI registers 165 #define SNF_MAC_CTL 0x500 166 #define MAC_XIO_SEL BIT(4) 167 #define SF_MAC_EN BIT(3) 168 #define SF_TRIG BIT(2) 169 #define WIP_READY BIT(1) 170 #define WIP BIT(0) 171 172 #define SNF_MAC_OUTL 0x504 173 #define SNF_MAC_INL 0x508 174 175 #define SNF_RD_CTL2 0x510 176 #define DATA_READ_DUMMY_S 8 177 #define DATA_READ_MAX_DUMMY 0xf 178 #define DATA_READ_CMD_S 0 179 180 #define SNF_RD_CTL3 0x514 181 182 #define SNF_PG_CTL1 0x524 183 #define PG_LOAD_CMD_S 8 184 185 #define SNF_PG_CTL2 0x528 186 187 #define SNF_MISC_CTL 0x538 188 #define SW_RST BIT(28) 189 #define FIFO_RD_LTC_S 25 190 #define PG_LOAD_X4_EN BIT(20) 191 #define DATA_READ_MODE_S 16 192 #define DATA_READ_MODE GENMASK(18, 16) 193 #define DATA_READ_MODE_X1 0 194 #define DATA_READ_MODE_X2 1 195 #define DATA_READ_MODE_X4 2 196 #define DATA_READ_MODE_DUAL 5 197 #define DATA_READ_MODE_QUAD 6 198 #define DATA_READ_LATCH_LAT GENMASK(9, 8) 199 #define DATA_READ_LATCH_LAT_S 8 200 #define PG_LOAD_CUSTOM_EN BIT(7) 201 #define DATARD_CUSTOM_EN BIT(6) 202 #define CS_DESELECT_CYC_S 0 203 204 #define SNF_MISC_CTL2 0x53c 205 #define PROGRAM_LOAD_BYTE_NUM_S 16 206 #define READ_DATA_BYTE_NUM_S 11 207 208 #define SNF_DLY_CTL3 0x548 209 #define SFCK_SAM_DLY_S 0 210 #define SFCK_SAM_DLY GENMASK(5, 0) 211 #define SFCK_SAM_DLY_TOTAL 9 212 #define SFCK_SAM_DLY_RANGE 47 213 214 #define SNF_STA_CTL1 0x550 215 #define CUS_PG_DONE BIT(28) 216 #define CUS_READ_DONE BIT(27) 217 #define SPI_STATE_S 0 218 #define SPI_STATE GENMASK(3, 0) 219 220 #define SNF_CFG 0x55c 221 #define SPI_MODE BIT(0) 222 223 #define SNF_GPRAM 0x800 224 #define SNF_GPRAM_SIZE 0xa0 225 226 #define SNFI_POLL_INTERVAL 1000000 227 228 static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; 229 230 static const u8 mt7986_spare_sizes[] = { 231 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67, 232 74 233 }; 234 235 struct mtk_snand_caps { 236 u16 sector_size; 237 u16 max_sectors; 238 u16 fdm_size; 239 u16 fdm_ecc_size; 240 u16 fifo_size; 241 242 bool bbm_swap; 243 bool empty_page_check; 244 u32 mastersta_mask; 245 u32 nandfsm_mask; 246 247 const u8 *spare_sizes; 248 u32 num_spare_size; 249 }; 250 251 static const struct mtk_snand_caps mt7622_snand_caps = { 252 .sector_size = 512, 253 .max_sectors = 8, 254 .fdm_size = 8, 255 .fdm_ecc_size = 1, 256 .fifo_size = 32, 257 .bbm_swap = false, 258 .empty_page_check = false, 259 .mastersta_mask = NFI_MASTERSTA_MASK_7622, 260 .nandfsm_mask = NFI_NAND_FSM_7622, 261 .spare_sizes = mt7622_spare_sizes, 262 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) 263 }; 264 265 static const struct mtk_snand_caps mt7629_snand_caps = { 266 .sector_size = 512, 267 .max_sectors = 8, 268 .fdm_size = 8, 269 .fdm_ecc_size = 1, 270 .fifo_size = 32, 271 .bbm_swap = true, 272 .empty_page_check = false, 273 .mastersta_mask = NFI_MASTERSTA_MASK_7622, 274 .nandfsm_mask = NFI_NAND_FSM_7622, 275 .spare_sizes = mt7622_spare_sizes, 276 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) 277 }; 278 279 static const struct mtk_snand_caps mt7986_snand_caps = { 280 .sector_size = 1024, 281 .max_sectors = 8, 282 .fdm_size = 8, 283 .fdm_ecc_size = 1, 284 .fifo_size = 64, 285 .bbm_swap = true, 286 .empty_page_check = true, 287 .mastersta_mask = NFI_MASTERSTA_MASK_7986, 288 .nandfsm_mask = NFI_NAND_FSM_7986, 289 .spare_sizes = mt7986_spare_sizes, 290 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes) 291 }; 292 293 struct mtk_snand_conf { 294 size_t page_size; 295 size_t oob_size; 296 u8 nsectors; 297 u8 spare_size; 298 }; 299 300 struct mtk_snand { 301 struct spi_controller *ctlr; 302 struct device *dev; 303 struct clk *nfi_clk; 304 struct clk *pad_clk; 305 struct clk *nfi_hclk; 306 void __iomem *nfi_base; 307 int irq; 308 struct completion op_done; 309 const struct mtk_snand_caps *caps; 310 struct mtk_ecc_config *ecc_cfg; 311 struct mtk_ecc *ecc; 312 struct mtk_snand_conf nfi_cfg; 313 struct mtk_ecc_stats ecc_stats; 314 struct nand_ecc_engine ecc_eng; 315 bool autofmt; 316 u8 *buf; 317 size_t buf_len; 318 }; 319 320 static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) 321 { 322 struct nand_ecc_engine *eng = nand->ecc.engine; 323 324 return container_of(eng, struct mtk_snand, ecc_eng); 325 } 326 327 static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) 328 { 329 if (snf->buf_len >= size) 330 return 0; 331 kfree(snf->buf); 332 snf->buf = kmalloc(size, GFP_KERNEL); 333 if (!snf->buf) 334 return -ENOMEM; 335 snf->buf_len = size; 336 memset(snf->buf, 0xff, snf->buf_len); 337 return 0; 338 } 339 340 static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) 341 { 342 return readl(snf->nfi_base + reg); 343 } 344 345 static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) 346 { 347 writel(val, snf->nfi_base + reg); 348 } 349 350 static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) 351 { 352 writew(val, snf->nfi_base + reg); 353 } 354 355 static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) 356 { 357 u32 val; 358 359 val = readl(snf->nfi_base + reg); 360 val &= ~clr; 361 val |= set; 362 writel(val, snf->nfi_base + reg); 363 } 364 365 static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) 366 { 367 u32 i, val = 0, es = sizeof(u32); 368 369 for (i = reg; i < reg + len; i++) { 370 if (i == reg || i % es == 0) 371 val = nfi_read32(snf, i & ~(es - 1)); 372 373 *data++ = (u8)(val >> (8 * (i % es))); 374 } 375 } 376 377 static int mtk_nfi_reset(struct mtk_snand *snf) 378 { 379 u32 val, fifo_mask; 380 int ret; 381 382 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); 383 384 ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, 385 !(val & snf->caps->mastersta_mask), 0, 386 SNFI_POLL_INTERVAL); 387 if (ret) { 388 dev_err(snf->dev, "NFI master is still busy after reset\n"); 389 return ret; 390 } 391 392 ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, 393 !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0, 394 SNFI_POLL_INTERVAL); 395 if (ret) { 396 dev_err(snf->dev, "Failed to reset NFI\n"); 397 return ret; 398 } 399 400 fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | 401 ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); 402 ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, 403 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); 404 if (ret) { 405 dev_err(snf->dev, "NFI FIFOs are not empty\n"); 406 return ret; 407 } 408 409 return 0; 410 } 411 412 static int mtk_snand_mac_reset(struct mtk_snand *snf) 413 { 414 int ret; 415 u32 val; 416 417 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); 418 419 ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, 420 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); 421 if (ret) 422 dev_err(snf->dev, "Failed to reset SNFI MAC\n"); 423 424 nfi_write32(snf, SNF_MISC_CTL, 425 (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); 426 427 return ret; 428 } 429 430 static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) 431 { 432 int ret; 433 u32 val; 434 435 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); 436 nfi_write32(snf, SNF_MAC_OUTL, outlen); 437 nfi_write32(snf, SNF_MAC_INL, inlen); 438 439 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); 440 441 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, 442 val & WIP_READY, 0, SNFI_POLL_INTERVAL); 443 if (ret) { 444 dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); 445 goto cleanup; 446 } 447 448 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), 449 0, SNFI_POLL_INTERVAL); 450 if (ret) 451 dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); 452 453 cleanup: 454 nfi_write32(snf, SNF_MAC_CTL, 0); 455 456 return ret; 457 } 458 459 static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) 460 { 461 u32 rx_len = 0; 462 u32 reg_offs = 0; 463 u32 val = 0; 464 const u8 *tx_buf = NULL; 465 u8 *rx_buf = NULL; 466 int i, ret; 467 u8 b; 468 469 if (op->data.dir == SPI_MEM_DATA_IN) { 470 rx_len = op->data.nbytes; 471 rx_buf = op->data.buf.in; 472 } else { 473 tx_buf = op->data.buf.out; 474 } 475 476 mtk_snand_mac_reset(snf); 477 478 for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { 479 b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; 480 val |= b << (8 * (reg_offs % 4)); 481 if (reg_offs % 4 == 3) { 482 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 483 val = 0; 484 } 485 } 486 487 for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { 488 b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; 489 val |= b << (8 * (reg_offs % 4)); 490 if (reg_offs % 4 == 3) { 491 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 492 val = 0; 493 } 494 } 495 496 for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { 497 if (reg_offs % 4 == 3) { 498 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 499 val = 0; 500 } 501 } 502 503 if (op->data.dir == SPI_MEM_DATA_OUT) { 504 for (i = 0; i < op->data.nbytes; i++, reg_offs++) { 505 val |= tx_buf[i] << (8 * (reg_offs % 4)); 506 if (reg_offs % 4 == 3) { 507 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 508 val = 0; 509 } 510 } 511 } 512 513 if (reg_offs % 4) 514 nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); 515 516 for (i = 0; i < reg_offs; i += 4) 517 dev_dbg(snf->dev, "%d: %08X", i, 518 nfi_read32(snf, SNF_GPRAM + i)); 519 520 dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); 521 522 ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); 523 if (ret) 524 return ret; 525 526 if (!rx_len) 527 return 0; 528 529 nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); 530 return 0; 531 } 532 533 static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, 534 u32 oob_size) 535 { 536 int spare_idx = -1; 537 u32 spare_size, spare_size_shift, pagesize_idx; 538 u32 sector_size_512; 539 u8 nsectors; 540 int i; 541 542 // skip if it's already configured as required. 543 if (snf->nfi_cfg.page_size == page_size && 544 snf->nfi_cfg.oob_size == oob_size) 545 return 0; 546 547 nsectors = page_size / snf->caps->sector_size; 548 if (nsectors > snf->caps->max_sectors) { 549 dev_err(snf->dev, "too many sectors required.\n"); 550 goto err; 551 } 552 553 if (snf->caps->sector_size == 512) { 554 sector_size_512 = NFI_SEC_SEL_512; 555 spare_size_shift = NFI_SPARE_SIZE_S; 556 } else { 557 sector_size_512 = 0; 558 spare_size_shift = NFI_SPARE_SIZE_LS_S; 559 } 560 561 switch (page_size) { 562 case SZ_512: 563 pagesize_idx = NFI_PAGE_SIZE_512_2K; 564 break; 565 case SZ_2K: 566 if (snf->caps->sector_size == 512) 567 pagesize_idx = NFI_PAGE_SIZE_2K_4K; 568 else 569 pagesize_idx = NFI_PAGE_SIZE_512_2K; 570 break; 571 case SZ_4K: 572 if (snf->caps->sector_size == 512) 573 pagesize_idx = NFI_PAGE_SIZE_4K_8K; 574 else 575 pagesize_idx = NFI_PAGE_SIZE_2K_4K; 576 break; 577 case SZ_8K: 578 if (snf->caps->sector_size == 512) 579 pagesize_idx = NFI_PAGE_SIZE_8K_16K; 580 else 581 pagesize_idx = NFI_PAGE_SIZE_4K_8K; 582 break; 583 case SZ_16K: 584 pagesize_idx = NFI_PAGE_SIZE_8K_16K; 585 break; 586 default: 587 dev_err(snf->dev, "unsupported page size.\n"); 588 goto err; 589 } 590 591 spare_size = oob_size / nsectors; 592 // If we're using the 1KB sector size, HW will automatically double the 593 // spare size. We should only use half of the value in this case. 594 if (snf->caps->sector_size == 1024) 595 spare_size /= 2; 596 597 for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { 598 if (snf->caps->spare_sizes[i] <= spare_size) { 599 spare_size = snf->caps->spare_sizes[i]; 600 if (snf->caps->sector_size == 1024) 601 spare_size *= 2; 602 spare_idx = i; 603 break; 604 } 605 } 606 607 if (spare_idx < 0) { 608 dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); 609 goto err; 610 } 611 612 nfi_write32(snf, NFI_PAGEFMT, 613 (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | 614 (snf->caps->fdm_size << NFI_FDM_NUM_S) | 615 (spare_idx << spare_size_shift) | 616 (pagesize_idx << NFI_PAGE_SIZE_S) | 617 sector_size_512); 618 619 snf->nfi_cfg.page_size = page_size; 620 snf->nfi_cfg.oob_size = oob_size; 621 snf->nfi_cfg.nsectors = nsectors; 622 snf->nfi_cfg.spare_size = spare_size; 623 624 dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", 625 snf->caps->sector_size, spare_size, nsectors); 626 return snand_prepare_bouncebuf(snf, page_size + oob_size); 627 err: 628 dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, 629 oob_size); 630 return -EOPNOTSUPP; 631 } 632 633 static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, 634 struct mtd_oob_region *oobecc) 635 { 636 // ECC area is not accessible 637 return -ERANGE; 638 } 639 640 static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, 641 struct mtd_oob_region *oobfree) 642 { 643 struct nand_device *nand = mtd_to_nanddev(mtd); 644 struct mtk_snand *ms = nand_to_mtk_snand(nand); 645 646 if (section >= ms->nfi_cfg.nsectors) 647 return -ERANGE; 648 649 oobfree->length = ms->caps->fdm_size - 1; 650 oobfree->offset = section * ms->caps->fdm_size + 1; 651 return 0; 652 } 653 654 static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { 655 .ecc = mtk_snand_ooblayout_ecc, 656 .free = mtk_snand_ooblayout_free, 657 }; 658 659 static int mtk_snand_ecc_init_ctx(struct nand_device *nand) 660 { 661 struct mtk_snand *snf = nand_to_mtk_snand(nand); 662 struct nand_ecc_props *conf = &nand->ecc.ctx.conf; 663 struct nand_ecc_props *reqs = &nand->ecc.requirements; 664 struct nand_ecc_props *user = &nand->ecc.user_conf; 665 struct mtd_info *mtd = nanddev_to_mtd(nand); 666 int step_size = 0, strength = 0, desired_correction = 0, steps; 667 bool ecc_user = false; 668 int ret; 669 u32 parity_bits, max_ecc_bytes; 670 struct mtk_ecc_config *ecc_cfg; 671 672 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, 673 nand->memorg.oobsize); 674 if (ret) 675 return ret; 676 677 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); 678 if (!ecc_cfg) 679 return -ENOMEM; 680 681 nand->ecc.ctx.priv = ecc_cfg; 682 683 if (user->step_size && user->strength) { 684 step_size = user->step_size; 685 strength = user->strength; 686 ecc_user = true; 687 } else if (reqs->step_size && reqs->strength) { 688 step_size = reqs->step_size; 689 strength = reqs->strength; 690 } 691 692 if (step_size && strength) { 693 steps = mtd->writesize / step_size; 694 desired_correction = steps * strength; 695 strength = desired_correction / snf->nfi_cfg.nsectors; 696 } 697 698 ecc_cfg->mode = ECC_NFI_MODE; 699 ecc_cfg->sectors = snf->nfi_cfg.nsectors; 700 ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; 701 702 // calculate the max possible strength under current page format 703 parity_bits = mtk_ecc_get_parity_bits(snf->ecc); 704 max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; 705 ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; 706 mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); 707 708 // if there's a user requested strength, find the minimum strength that 709 // meets the requirement. Otherwise use the maximum strength which is 710 // expected by BootROM. 711 if (ecc_user && strength) { 712 u32 s_next = ecc_cfg->strength - 1; 713 714 while (1) { 715 mtk_ecc_adjust_strength(snf->ecc, &s_next); 716 if (s_next >= ecc_cfg->strength) 717 break; 718 if (s_next < strength) 719 break; 720 s_next = ecc_cfg->strength - 1; 721 } 722 } 723 724 mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); 725 726 conf->step_size = snf->caps->sector_size; 727 conf->strength = ecc_cfg->strength; 728 729 if (ecc_cfg->strength < strength) 730 dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", 731 strength); 732 dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", 733 ecc_cfg->strength, snf->caps->sector_size); 734 735 return 0; 736 } 737 738 static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) 739 { 740 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); 741 742 kfree(ecc_cfg); 743 } 744 745 static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, 746 struct nand_page_io_req *req) 747 { 748 struct mtk_snand *snf = nand_to_mtk_snand(nand); 749 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); 750 int ret; 751 752 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, 753 nand->memorg.oobsize); 754 if (ret) 755 return ret; 756 snf->autofmt = true; 757 snf->ecc_cfg = ecc_cfg; 758 return 0; 759 } 760 761 static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, 762 struct nand_page_io_req *req) 763 { 764 struct mtk_snand *snf = nand_to_mtk_snand(nand); 765 struct mtd_info *mtd = nanddev_to_mtd(nand); 766 767 snf->ecc_cfg = NULL; 768 snf->autofmt = false; 769 if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) 770 return 0; 771 772 if (snf->ecc_stats.failed) 773 mtd->ecc_stats.failed += snf->ecc_stats.failed; 774 mtd->ecc_stats.corrected += snf->ecc_stats.corrected; 775 return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; 776 } 777 778 static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { 779 .init_ctx = mtk_snand_ecc_init_ctx, 780 .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, 781 .prepare_io_req = mtk_snand_ecc_prepare_io_req, 782 .finish_io_req = mtk_snand_ecc_finish_io_req, 783 }; 784 785 static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) 786 { 787 u32 vall, valm; 788 u8 *oobptr = buf; 789 int i, j; 790 791 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { 792 vall = nfi_read32(snf, NFI_FDML(i)); 793 valm = nfi_read32(snf, NFI_FDMM(i)); 794 795 for (j = 0; j < snf->caps->fdm_size; j++) 796 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); 797 798 oobptr += snf->caps->fdm_size; 799 } 800 } 801 802 static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) 803 { 804 u32 fdm_size = snf->caps->fdm_size; 805 const u8 *oobptr = buf; 806 u32 vall, valm; 807 int i, j; 808 809 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { 810 vall = 0; 811 valm = 0; 812 813 for (j = 0; j < 8; j++) { 814 if (j < 4) 815 vall |= (j < fdm_size ? oobptr[j] : 0xff) 816 << (j * 8); 817 else 818 valm |= (j < fdm_size ? oobptr[j] : 0xff) 819 << ((j - 4) * 8); 820 } 821 822 nfi_write32(snf, NFI_FDML(i), vall); 823 nfi_write32(snf, NFI_FDMM(i), valm); 824 825 oobptr += fdm_size; 826 } 827 } 828 829 static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) 830 { 831 u32 buf_bbm_pos, fdm_bbm_pos; 832 833 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) 834 return; 835 836 // swap [pagesize] byte on nand with the first fdm byte 837 // in the last sector. 838 buf_bbm_pos = snf->nfi_cfg.page_size - 839 (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; 840 fdm_bbm_pos = snf->nfi_cfg.page_size + 841 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; 842 843 swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); 844 } 845 846 static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) 847 { 848 u32 fdm_bbm_pos1, fdm_bbm_pos2; 849 850 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) 851 return; 852 853 // swap the first fdm byte in the first and the last sector. 854 fdm_bbm_pos1 = snf->nfi_cfg.page_size; 855 fdm_bbm_pos2 = snf->nfi_cfg.page_size + 856 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; 857 swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); 858 } 859 860 static int mtk_snand_read_page_cache(struct mtk_snand *snf, 861 const struct spi_mem_op *op) 862 { 863 u8 *buf = snf->buf; 864 u8 *buf_fdm = buf + snf->nfi_cfg.page_size; 865 // the address part to be sent by the controller 866 u32 op_addr = op->addr.val; 867 // where to start copying data from bounce buffer 868 u32 rd_offset = 0; 869 u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); 870 u32 op_mode = 0; 871 u32 dma_len = snf->buf_len; 872 int ret = 0; 873 u32 rd_mode, rd_bytes, val; 874 dma_addr_t buf_dma; 875 876 if (snf->autofmt) { 877 u32 last_bit; 878 u32 mask; 879 880 dma_len = snf->nfi_cfg.page_size; 881 op_mode = CNFG_AUTO_FMT_EN; 882 if (op->data.ecc) 883 op_mode |= CNFG_HW_ECC_EN; 884 // extract the plane bit: 885 // Find the highest bit set in (pagesize+oobsize). 886 // Bits higher than that in op->addr are kept and sent over SPI 887 // Lower bits are used as an offset for copying data from DMA 888 // bounce buffer. 889 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); 890 mask = (1 << last_bit) - 1; 891 rd_offset = op_addr & mask; 892 op_addr &= ~mask; 893 894 // check if we can dma to the caller memory 895 if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) 896 buf = op->data.buf.in; 897 } 898 mtk_snand_mac_reset(snf); 899 mtk_nfi_reset(snf); 900 901 // command and dummy cycles 902 nfi_write32(snf, SNF_RD_CTL2, 903 (dummy_clk << DATA_READ_DUMMY_S) | 904 (op->cmd.opcode << DATA_READ_CMD_S)); 905 906 // read address 907 nfi_write32(snf, SNF_RD_CTL3, op_addr); 908 909 // Set read op_mode 910 if (op->data.buswidth == 4) 911 rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : 912 DATA_READ_MODE_X4; 913 else if (op->data.buswidth == 2) 914 rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : 915 DATA_READ_MODE_X2; 916 else 917 rd_mode = DATA_READ_MODE_X1; 918 rd_mode <<= DATA_READ_MODE_S; 919 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, 920 rd_mode | DATARD_CUSTOM_EN); 921 922 // Set bytes to read 923 rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * 924 snf->nfi_cfg.nsectors; 925 nfi_write32(snf, SNF_MISC_CTL2, 926 (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); 927 928 // NFI read prepare 929 nfi_write16(snf, NFI_CNFG, 930 (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | 931 CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); 932 933 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); 934 935 buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); 936 ret = dma_mapping_error(snf->dev, buf_dma); 937 if (ret) { 938 dev_err(snf->dev, "DMA mapping failed.\n"); 939 goto cleanup; 940 } 941 nfi_write32(snf, NFI_STRADDR, buf_dma); 942 if (op->data.ecc) { 943 snf->ecc_cfg->op = ECC_DECODE; 944 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); 945 if (ret) 946 goto cleanup_dma; 947 } 948 // Prepare for custom read interrupt 949 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); 950 reinit_completion(&snf->op_done); 951 952 // Trigger NFI into custom mode 953 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); 954 955 // Start DMA read 956 nfi_rmw32(snf, NFI_CON, 0, CON_BRD); 957 nfi_write16(snf, NFI_STRDATA, STR_DATA); 958 959 if (!wait_for_completion_timeout( 960 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { 961 dev_err(snf->dev, "DMA timed out for reading from cache.\n"); 962 ret = -ETIMEDOUT; 963 goto cleanup; 964 } 965 966 // Wait for BUS_SEC_CNTR returning expected value 967 ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, 968 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, 969 SNFI_POLL_INTERVAL); 970 if (ret) { 971 dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); 972 goto cleanup2; 973 } 974 975 // Wait for bus becoming idle 976 ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, 977 !(val & snf->caps->mastersta_mask), 0, 978 SNFI_POLL_INTERVAL); 979 if (ret) { 980 dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); 981 goto cleanup2; 982 } 983 984 if (op->data.ecc) { 985 ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); 986 if (ret) { 987 dev_err(snf->dev, "wait ecc done timeout\n"); 988 goto cleanup2; 989 } 990 // save status before disabling ecc 991 mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, 992 snf->nfi_cfg.nsectors); 993 } 994 995 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); 996 997 if (snf->autofmt) { 998 mtk_snand_read_fdm(snf, buf_fdm); 999 if (snf->caps->bbm_swap) { 1000 mtk_snand_bm_swap(snf, buf); 1001 mtk_snand_fdm_bm_swap(snf); 1002 } 1003 } 1004 1005 // copy data back 1006 if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { 1007 memset(op->data.buf.in, 0xff, op->data.nbytes); 1008 snf->ecc_stats.bitflips = 0; 1009 snf->ecc_stats.failed = 0; 1010 snf->ecc_stats.corrected = 0; 1011 } else { 1012 if (buf == op->data.buf.in) { 1013 u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; 1014 u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; 1015 1016 if (req_left) 1017 memcpy(op->data.buf.in + snf->nfi_cfg.page_size, 1018 buf_fdm, 1019 cap_len < req_left ? cap_len : req_left); 1020 } else if (rd_offset < snf->buf_len) { 1021 u32 cap_len = snf->buf_len - rd_offset; 1022 1023 if (op->data.nbytes < cap_len) 1024 cap_len = op->data.nbytes; 1025 memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); 1026 } 1027 } 1028 cleanup2: 1029 if (op->data.ecc) 1030 mtk_ecc_disable(snf->ecc); 1031 cleanup_dma: 1032 // unmap dma only if any error happens. (otherwise it's done before 1033 // data copying) 1034 if (ret) 1035 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); 1036 cleanup: 1037 // Stop read 1038 nfi_write32(snf, NFI_CON, 0); 1039 nfi_write16(snf, NFI_CNFG, 0); 1040 1041 // Clear SNF done flag 1042 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); 1043 nfi_write32(snf, SNF_STA_CTL1, 0); 1044 1045 // Disable interrupt 1046 nfi_read32(snf, NFI_INTR_STA); 1047 nfi_write32(snf, NFI_INTR_EN, 0); 1048 1049 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); 1050 return ret; 1051 } 1052 1053 static int mtk_snand_write_page_cache(struct mtk_snand *snf, 1054 const struct spi_mem_op *op) 1055 { 1056 // the address part to be sent by the controller 1057 u32 op_addr = op->addr.val; 1058 // where to start copying data from bounce buffer 1059 u32 wr_offset = 0; 1060 u32 op_mode = 0; 1061 int ret = 0; 1062 u32 wr_mode = 0; 1063 u32 dma_len = snf->buf_len; 1064 u32 wr_bytes, val; 1065 size_t cap_len; 1066 dma_addr_t buf_dma; 1067 1068 if (snf->autofmt) { 1069 u32 last_bit; 1070 u32 mask; 1071 1072 dma_len = snf->nfi_cfg.page_size; 1073 op_mode = CNFG_AUTO_FMT_EN; 1074 if (op->data.ecc) 1075 op_mode |= CNFG_HW_ECC_EN; 1076 1077 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); 1078 mask = (1 << last_bit) - 1; 1079 wr_offset = op_addr & mask; 1080 op_addr &= ~mask; 1081 } 1082 mtk_snand_mac_reset(snf); 1083 mtk_nfi_reset(snf); 1084 1085 if (wr_offset) 1086 memset(snf->buf, 0xff, wr_offset); 1087 1088 cap_len = snf->buf_len - wr_offset; 1089 if (op->data.nbytes < cap_len) 1090 cap_len = op->data.nbytes; 1091 memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); 1092 if (snf->autofmt) { 1093 if (snf->caps->bbm_swap) { 1094 mtk_snand_fdm_bm_swap(snf); 1095 mtk_snand_bm_swap(snf, snf->buf); 1096 } 1097 mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); 1098 } 1099 1100 // Command 1101 nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); 1102 1103 // write address 1104 nfi_write32(snf, SNF_PG_CTL2, op_addr); 1105 1106 // Set read op_mode 1107 if (op->data.buswidth == 4) 1108 wr_mode = PG_LOAD_X4_EN; 1109 1110 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, 1111 wr_mode | PG_LOAD_CUSTOM_EN); 1112 1113 // Set bytes to write 1114 wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * 1115 snf->nfi_cfg.nsectors; 1116 nfi_write32(snf, SNF_MISC_CTL2, 1117 (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); 1118 1119 // NFI write prepare 1120 nfi_write16(snf, NFI_CNFG, 1121 (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | 1122 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); 1123 1124 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); 1125 buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); 1126 ret = dma_mapping_error(snf->dev, buf_dma); 1127 if (ret) { 1128 dev_err(snf->dev, "DMA mapping failed.\n"); 1129 goto cleanup; 1130 } 1131 nfi_write32(snf, NFI_STRADDR, buf_dma); 1132 if (op->data.ecc) { 1133 snf->ecc_cfg->op = ECC_ENCODE; 1134 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); 1135 if (ret) 1136 goto cleanup_dma; 1137 } 1138 // Prepare for custom write interrupt 1139 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); 1140 reinit_completion(&snf->op_done); 1141 ; 1142 1143 // Trigger NFI into custom mode 1144 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); 1145 1146 // Start DMA write 1147 nfi_rmw32(snf, NFI_CON, 0, CON_BWR); 1148 nfi_write16(snf, NFI_STRDATA, STR_DATA); 1149 1150 if (!wait_for_completion_timeout( 1151 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { 1152 dev_err(snf->dev, "DMA timed out for program load.\n"); 1153 ret = -ETIMEDOUT; 1154 goto cleanup_ecc; 1155 } 1156 1157 // Wait for NFI_SEC_CNTR returning expected value 1158 ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, 1159 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, 1160 SNFI_POLL_INTERVAL); 1161 if (ret) 1162 dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); 1163 1164 cleanup_ecc: 1165 if (op->data.ecc) 1166 mtk_ecc_disable(snf->ecc); 1167 cleanup_dma: 1168 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); 1169 cleanup: 1170 // Stop write 1171 nfi_write32(snf, NFI_CON, 0); 1172 nfi_write16(snf, NFI_CNFG, 0); 1173 1174 // Clear SNF done flag 1175 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); 1176 nfi_write32(snf, SNF_STA_CTL1, 0); 1177 1178 // Disable interrupt 1179 nfi_read32(snf, NFI_INTR_STA); 1180 nfi_write32(snf, NFI_INTR_EN, 0); 1181 1182 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); 1183 1184 return ret; 1185 } 1186 1187 /** 1188 * mtk_snand_is_page_ops() - check if the op is a controller supported page op. 1189 * @op spi-mem op to check 1190 * 1191 * Check whether op can be executed with read_from_cache or program_load 1192 * mode in the controller. 1193 * This controller can execute typical Read From Cache and Program Load 1194 * instructions found on SPI-NAND with 2-byte address. 1195 * DTR and cmd buswidth & nbytes should be checked before calling this. 1196 * 1197 * Return: true if the op matches the instruction template 1198 */ 1199 static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) 1200 { 1201 if (op->addr.nbytes != 2) 1202 return false; 1203 1204 if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && 1205 op->addr.buswidth != 4) 1206 return false; 1207 1208 // match read from page instructions 1209 if (op->data.dir == SPI_MEM_DATA_IN) { 1210 // check dummy cycle first 1211 if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 1212 DATA_READ_MAX_DUMMY) 1213 return false; 1214 // quad io / quad out 1215 if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && 1216 op->data.buswidth == 4) 1217 return true; 1218 1219 // dual io / dual out 1220 if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && 1221 op->data.buswidth == 2) 1222 return true; 1223 1224 // standard spi 1225 if (op->addr.buswidth == 1 && op->data.buswidth == 1) 1226 return true; 1227 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 1228 // check dummy cycle first 1229 if (op->dummy.nbytes) 1230 return false; 1231 // program load quad out 1232 if (op->addr.buswidth == 1 && op->data.buswidth == 4) 1233 return true; 1234 // standard spi 1235 if (op->addr.buswidth == 1 && op->data.buswidth == 1) 1236 return true; 1237 } 1238 return false; 1239 } 1240 1241 static bool mtk_snand_supports_op(struct spi_mem *mem, 1242 const struct spi_mem_op *op) 1243 { 1244 if (!spi_mem_default_supports_op(mem, op)) 1245 return false; 1246 if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) 1247 return false; 1248 if (mtk_snand_is_page_ops(op)) 1249 return true; 1250 return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && 1251 (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && 1252 (op->data.nbytes == 0 || op->data.buswidth == 1)); 1253 } 1254 1255 static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 1256 { 1257 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); 1258 // page ops transfer size must be exactly ((sector_size + spare_size) * 1259 // nsectors). Limit the op size if the caller requests more than that. 1260 // exec_op will read more than needed and discard the leftover if the 1261 // caller requests less data. 1262 if (mtk_snand_is_page_ops(op)) { 1263 size_t l; 1264 // skip adjust_op_size for page ops 1265 if (ms->autofmt) 1266 return 0; 1267 l = ms->caps->sector_size + ms->nfi_cfg.spare_size; 1268 l *= ms->nfi_cfg.nsectors; 1269 if (op->data.nbytes > l) 1270 op->data.nbytes = l; 1271 } else { 1272 size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 1273 1274 if (hl >= SNF_GPRAM_SIZE) 1275 return -EOPNOTSUPP; 1276 if (op->data.nbytes > SNF_GPRAM_SIZE - hl) 1277 op->data.nbytes = SNF_GPRAM_SIZE - hl; 1278 } 1279 return 0; 1280 } 1281 1282 static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 1283 { 1284 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); 1285 1286 dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, 1287 op->addr.val, op->addr.buswidth, op->addr.nbytes, 1288 op->data.buswidth, op->data.nbytes); 1289 if (mtk_snand_is_page_ops(op)) { 1290 if (op->data.dir == SPI_MEM_DATA_IN) 1291 return mtk_snand_read_page_cache(ms, op); 1292 else 1293 return mtk_snand_write_page_cache(ms, op); 1294 } else { 1295 return mtk_snand_mac_io(ms, op); 1296 } 1297 } 1298 1299 static const struct spi_controller_mem_ops mtk_snand_mem_ops = { 1300 .adjust_op_size = mtk_snand_adjust_op_size, 1301 .supports_op = mtk_snand_supports_op, 1302 .exec_op = mtk_snand_exec_op, 1303 }; 1304 1305 static const struct spi_controller_mem_caps mtk_snand_mem_caps = { 1306 .ecc = true, 1307 }; 1308 1309 static irqreturn_t mtk_snand_irq(int irq, void *id) 1310 { 1311 struct mtk_snand *snf = id; 1312 u32 sta, ien; 1313 1314 sta = nfi_read32(snf, NFI_INTR_STA); 1315 ien = nfi_read32(snf, NFI_INTR_EN); 1316 1317 if (!(sta & ien)) 1318 return IRQ_NONE; 1319 1320 nfi_write32(snf, NFI_INTR_EN, 0); 1321 complete(&snf->op_done); 1322 return IRQ_HANDLED; 1323 } 1324 1325 static const struct of_device_id mtk_snand_ids[] = { 1326 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, 1327 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, 1328 { .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps }, 1329 {}, 1330 }; 1331 1332 MODULE_DEVICE_TABLE(of, mtk_snand_ids); 1333 1334 static int mtk_snand_enable_clk(struct mtk_snand *ms) 1335 { 1336 int ret; 1337 1338 ret = clk_prepare_enable(ms->nfi_clk); 1339 if (ret) { 1340 dev_err(ms->dev, "unable to enable nfi clk\n"); 1341 return ret; 1342 } 1343 ret = clk_prepare_enable(ms->pad_clk); 1344 if (ret) { 1345 dev_err(ms->dev, "unable to enable pad clk\n"); 1346 goto err1; 1347 } 1348 ret = clk_prepare_enable(ms->nfi_hclk); 1349 if (ret) { 1350 dev_err(ms->dev, "unable to enable nfi hclk\n"); 1351 goto err2; 1352 } 1353 1354 return 0; 1355 1356 err2: 1357 clk_disable_unprepare(ms->pad_clk); 1358 err1: 1359 clk_disable_unprepare(ms->nfi_clk); 1360 return ret; 1361 } 1362 1363 static void mtk_snand_disable_clk(struct mtk_snand *ms) 1364 { 1365 clk_disable_unprepare(ms->nfi_hclk); 1366 clk_disable_unprepare(ms->pad_clk); 1367 clk_disable_unprepare(ms->nfi_clk); 1368 } 1369 1370 static int mtk_snand_probe(struct platform_device *pdev) 1371 { 1372 struct device_node *np = pdev->dev.of_node; 1373 const struct of_device_id *dev_id; 1374 struct spi_controller *ctlr; 1375 struct mtk_snand *ms; 1376 unsigned long spi_freq; 1377 u32 val = 0; 1378 int ret; 1379 1380 dev_id = of_match_node(mtk_snand_ids, np); 1381 if (!dev_id) 1382 return -EINVAL; 1383 1384 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); 1385 if (!ctlr) 1386 return -ENOMEM; 1387 platform_set_drvdata(pdev, ctlr); 1388 1389 ms = spi_controller_get_devdata(ctlr); 1390 1391 ms->ctlr = ctlr; 1392 ms->caps = dev_id->data; 1393 1394 ms->ecc = of_mtk_ecc_get(np); 1395 if (IS_ERR(ms->ecc)) 1396 return PTR_ERR(ms->ecc); 1397 else if (!ms->ecc) 1398 return -ENODEV; 1399 1400 ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); 1401 if (IS_ERR(ms->nfi_base)) { 1402 ret = PTR_ERR(ms->nfi_base); 1403 goto release_ecc; 1404 } 1405 1406 ms->dev = &pdev->dev; 1407 1408 ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); 1409 if (IS_ERR(ms->nfi_clk)) { 1410 ret = PTR_ERR(ms->nfi_clk); 1411 dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); 1412 goto release_ecc; 1413 } 1414 1415 ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); 1416 if (IS_ERR(ms->pad_clk)) { 1417 ret = PTR_ERR(ms->pad_clk); 1418 dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); 1419 goto release_ecc; 1420 } 1421 1422 ms->nfi_hclk = devm_clk_get_optional(&pdev->dev, "nfi_hclk"); 1423 if (IS_ERR(ms->nfi_hclk)) { 1424 ret = PTR_ERR(ms->nfi_hclk); 1425 dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret); 1426 goto release_ecc; 1427 } 1428 1429 ret = mtk_snand_enable_clk(ms); 1430 if (ret) 1431 goto release_ecc; 1432 1433 init_completion(&ms->op_done); 1434 1435 ms->irq = platform_get_irq(pdev, 0); 1436 if (ms->irq < 0) { 1437 ret = ms->irq; 1438 goto disable_clk; 1439 } 1440 ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, 1441 "mtk-snand", ms); 1442 if (ret) { 1443 dev_err(ms->dev, "failed to request snfi irq\n"); 1444 goto disable_clk; 1445 } 1446 1447 ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); 1448 if (ret) { 1449 dev_err(ms->dev, "failed to set dma mask\n"); 1450 goto disable_clk; 1451 } 1452 1453 // switch to SNFI mode 1454 nfi_write32(ms, SNF_CFG, SPI_MODE); 1455 1456 ret = of_property_read_u32(np, "rx-sample-delay-ns", &val); 1457 if (!ret) 1458 nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY, 1459 val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL); 1460 1461 ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val); 1462 if (!ret) { 1463 spi_freq = clk_get_rate(ms->pad_clk); 1464 val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq); 1465 nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT, 1466 val << DATA_READ_LATCH_LAT_S); 1467 } 1468 1469 // setup an initial page format for ops matching page_cache_op template 1470 // before ECC is called. 1471 ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64); 1472 if (ret) { 1473 dev_err(ms->dev, "failed to set initial page format\n"); 1474 goto disable_clk; 1475 } 1476 1477 // setup ECC engine 1478 ms->ecc_eng.dev = &pdev->dev; 1479 ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; 1480 ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; 1481 ms->ecc_eng.priv = ms; 1482 1483 ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); 1484 if (ret) { 1485 dev_err(&pdev->dev, "failed to register ecc engine.\n"); 1486 goto disable_clk; 1487 } 1488 1489 ctlr->num_chipselect = 1; 1490 ctlr->mem_ops = &mtk_snand_mem_ops; 1491 ctlr->mem_caps = &mtk_snand_mem_caps; 1492 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 1493 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 1494 ctlr->dev.of_node = pdev->dev.of_node; 1495 ret = spi_register_controller(ctlr); 1496 if (ret) { 1497 dev_err(&pdev->dev, "spi_register_controller failed.\n"); 1498 goto disable_clk; 1499 } 1500 1501 return 0; 1502 disable_clk: 1503 mtk_snand_disable_clk(ms); 1504 release_ecc: 1505 mtk_ecc_release(ms->ecc); 1506 return ret; 1507 } 1508 1509 static int mtk_snand_remove(struct platform_device *pdev) 1510 { 1511 struct spi_controller *ctlr = platform_get_drvdata(pdev); 1512 struct mtk_snand *ms = spi_controller_get_devdata(ctlr); 1513 1514 spi_unregister_controller(ctlr); 1515 mtk_snand_disable_clk(ms); 1516 mtk_ecc_release(ms->ecc); 1517 kfree(ms->buf); 1518 return 0; 1519 } 1520 1521 static struct platform_driver mtk_snand_driver = { 1522 .probe = mtk_snand_probe, 1523 .remove = mtk_snand_remove, 1524 .driver = { 1525 .name = "mtk-snand", 1526 .of_match_table = mtk_snand_ids, 1527 }, 1528 }; 1529 1530 module_platform_driver(mtk_snand_driver); 1531 1532 MODULE_LICENSE("GPL"); 1533 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 1534 MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver"); 1535