1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Marvell NAND flash controller driver 4 * 5 * Copyright (C) 2017 Marvell 6 * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com> 7 * 8 * 9 * This NAND controller driver handles two versions of the hardware, 10 * one is called NFCv1 and is available on PXA SoCs and the other is 11 * called NFCv2 and is available on Armada SoCs. 12 * 13 * The main visible difference is that NFCv1 only has Hamming ECC 14 * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA 15 * is not used with NFCv2. 16 * 17 * The ECC layouts are depicted in details in Marvell AN-379, but here 18 * is a brief description. 19 * 20 * When using Hamming, the data is split in 512B chunks (either 1, 2 21 * or 4) and each chunk will have its own ECC "digest" of 6B at the 22 * beginning of the OOB area and eventually the remaining free OOB 23 * bytes (also called "spare" bytes in the driver). This engine 24 * corrects up to 1 bit per chunk and detects reliably an error if 25 * there are at most 2 bitflips. Here is the page layout used by the 26 * controller when Hamming is chosen: 27 * 28 * +-------------------------------------------------------------+ 29 * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes | 30 * +-------------------------------------------------------------+ 31 * 32 * When using the BCH engine, there are N identical (data + free OOB + 33 * ECC) sections and potentially an extra one to deal with 34 * configurations where the chosen (data + free OOB + ECC) sizes do 35 * not align with the page (data + OOB) size. ECC bytes are always 36 * 30B per ECC chunk. Here is the page layout used by the controller 37 * when BCH is chosen: 38 * 39 * +----------------------------------------- 40 * | Data 1 | Free OOB bytes 1 | ECC 1 | ... 41 * +----------------------------------------- 42 * 43 * ------------------------------------------- 44 * ... | Data N | Free OOB bytes N | ECC N | 45 * ------------------------------------------- 46 * 47 * --------------------------------------------+ 48 * Last Data | Last Free OOB bytes | Last ECC | 49 * --------------------------------------------+ 50 * 51 * In both cases, the layout seen by the user is always: all data 52 * first, then all free OOB bytes and finally all ECC bytes. With BCH, 53 * ECC bytes are 30B long and are padded with 0xFF to align on 32 54 * bytes. 55 * 56 * The controller has certain limitations that are handled by the 57 * driver: 58 * - It can only read 2k at a time. To overcome this limitation, the 59 * driver issues data cycles on the bus, without issuing new 60 * CMD + ADDR cycles. The Marvell term is "naked" operations. 61 * - The ECC strength in BCH mode cannot be tuned. It is fixed 16 62 * bits. What can be tuned is the ECC block size as long as it 63 * stays between 512B and 2kiB. It's usually chosen based on the 64 * chip ECC requirements. For instance, using 2kiB ECC chunks 65 * provides 4b/512B correctability. 66 * - The controller will always treat data bytes, free OOB bytes 67 * and ECC bytes in that order, no matter what the real layout is 68 * (which is usually all data then all OOB bytes). The 69 * marvell_nfc_layouts array below contains the currently 70 * supported layouts. 71 * - Because of these weird layouts, the Bad Block Markers can be 72 * located in data section. In this case, the NAND_BBT_NO_OOB_BBM 73 * option must be set to prevent scanning/writing bad block 74 * markers. 75 */ 76 77 #include <linux/module.h> 78 #include <linux/clk.h> 79 #include <linux/mtd/rawnand.h> 80 #include <linux/of_platform.h> 81 #include <linux/iopoll.h> 82 #include <linux/interrupt.h> 83 #include <linux/slab.h> 84 #include <linux/mfd/syscon.h> 85 #include <linux/regmap.h> 86 #include <asm/unaligned.h> 87 88 #include <linux/dmaengine.h> 89 #include <linux/dma-mapping.h> 90 #include <linux/dma/pxa-dma.h> 91 #include <linux/platform_data/mtd-nand-pxa3xx.h> 92 93 /* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */ 94 #define FIFO_DEPTH 8 95 #define FIFO_REP(x) (x / sizeof(u32)) 96 #define BCH_SEQ_READS (32 / FIFO_DEPTH) 97 /* NFC does not support transfers of larger chunks at a time */ 98 #define MAX_CHUNK_SIZE 2112 99 /* NFCv1 cannot read more that 7 bytes of ID */ 100 #define NFCV1_READID_LEN 7 101 /* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */ 102 #define POLL_PERIOD 0 103 #define POLL_TIMEOUT 100000 104 /* Interrupt maximum wait period in ms */ 105 #define IRQ_TIMEOUT 1000 106 /* Latency in clock cycles between SoC pins and NFC logic */ 107 #define MIN_RD_DEL_CNT 3 108 /* Maximum number of contiguous address cycles */ 109 #define MAX_ADDRESS_CYC_NFCV1 5 110 #define MAX_ADDRESS_CYC_NFCV2 7 111 /* System control registers/bits to enable the NAND controller on some SoCs */ 112 #define GENCONF_SOC_DEVICE_MUX 0x208 113 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) 114 #define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20) 115 #define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21) 116 #define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25) 117 #define GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN BIT(27) 118 #define GENCONF_CLK_GATING_CTRL 0x220 119 #define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2) 120 #define GENCONF_ND_CLK_CTRL 0x700 121 #define GENCONF_ND_CLK_CTRL_EN BIT(0) 122 123 /* NAND controller data flash control register */ 124 #define NDCR 0x00 125 #define NDCR_ALL_INT GENMASK(11, 0) 126 #define NDCR_CS1_CMDDM BIT(7) 127 #define NDCR_CS0_CMDDM BIT(8) 128 #define NDCR_RDYM BIT(11) 129 #define NDCR_ND_ARB_EN BIT(12) 130 #define NDCR_RA_START BIT(15) 131 #define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16) 132 #define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0) 133 #define NDCR_DWIDTH_M BIT(26) 134 #define NDCR_DWIDTH_C BIT(27) 135 #define NDCR_ND_RUN BIT(28) 136 #define NDCR_DMA_EN BIT(29) 137 #define NDCR_ECC_EN BIT(30) 138 #define NDCR_SPARE_EN BIT(31) 139 #define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \ 140 NDCR_DWIDTH_M | NDCR_DWIDTH_C)) 141 142 /* NAND interface timing parameter 0 register */ 143 #define NDTR0 0x04 144 #define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0) 145 #define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3) 146 #define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3) 147 #define NDTR0_SEL_NRE_EDGE BIT(7) 148 #define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8) 149 #define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11) 150 #define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16) 151 #define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19) 152 #define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22) 153 #define NDTR0_SELCNTR BIT(26) 154 #define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27) 155 156 /* NAND interface timing parameter 1 register */ 157 #define NDTR1 0x0C 158 #define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0) 159 #define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4) 160 #define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8) 161 #define NDTR1_PRESCALE BIT(14) 162 #define NDTR1_WAIT_MODE BIT(15) 163 #define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16) 164 165 /* NAND controller status register */ 166 #define NDSR 0x14 167 #define NDSR_WRCMDREQ BIT(0) 168 #define NDSR_RDDREQ BIT(1) 169 #define NDSR_WRDREQ BIT(2) 170 #define NDSR_CORERR BIT(3) 171 #define NDSR_UNCERR BIT(4) 172 #define NDSR_CMDD(cs) BIT(8 - cs) 173 #define NDSR_RDY(rb) BIT(11 + rb) 174 #define NDSR_ERRCNT(x) ((x >> 16) & 0x1F) 175 176 /* NAND ECC control register */ 177 #define NDECCCTRL 0x28 178 #define NDECCCTRL_BCH_EN BIT(0) 179 180 /* NAND controller data buffer register */ 181 #define NDDB 0x40 182 183 /* NAND controller command buffer 0 register */ 184 #define NDCB0 0x48 185 #define NDCB0_CMD1(x) ((x & 0xFF) << 0) 186 #define NDCB0_CMD2(x) ((x & 0xFF) << 8) 187 #define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16) 188 #define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7) 189 #define NDCB0_DBC BIT(19) 190 #define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21) 191 #define NDCB0_CSEL BIT(24) 192 #define NDCB0_RDY_BYP BIT(27) 193 #define NDCB0_LEN_OVRD BIT(28) 194 #define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29) 195 196 /* NAND controller command buffer 1 register */ 197 #define NDCB1 0x4C 198 #define NDCB1_COLS(x) ((x & 0xFFFF) << 0) 199 #define NDCB1_ADDRS_PAGE(x) (x << 16) 200 201 /* NAND controller command buffer 2 register */ 202 #define NDCB2 0x50 203 #define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0) 204 #define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0) 205 206 /* NAND controller command buffer 3 register */ 207 #define NDCB3 0x54 208 #define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16) 209 #define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24) 210 211 /* NAND controller command buffer 0 register 'type' and 'xtype' fields */ 212 #define TYPE_READ 0 213 #define TYPE_WRITE 1 214 #define TYPE_ERASE 2 215 #define TYPE_READ_ID 3 216 #define TYPE_STATUS 4 217 #define TYPE_RESET 5 218 #define TYPE_NAKED_CMD 6 219 #define TYPE_NAKED_ADDR 7 220 #define TYPE_MASK 7 221 #define XTYPE_MONOLITHIC_RW 0 222 #define XTYPE_LAST_NAKED_RW 1 223 #define XTYPE_FINAL_COMMAND 3 224 #define XTYPE_READ 4 225 #define XTYPE_WRITE_DISPATCH 4 226 #define XTYPE_NAKED_RW 5 227 #define XTYPE_COMMAND_DISPATCH 6 228 #define XTYPE_MASK 7 229 230 /** 231 * struct marvell_hw_ecc_layout - layout of Marvell ECC 232 * 233 * Marvell ECC engine works differently than the others, in order to limit the 234 * size of the IP, hardware engineers chose to set a fixed strength at 16 bits 235 * per subpage, and depending on a the desired strength needed by the NAND chip, 236 * a particular layout mixing data/spare/ecc is defined, with a possible last 237 * chunk smaller that the others. 238 * 239 * @writesize: Full page size on which the layout applies 240 * @chunk: Desired ECC chunk size on which the layout applies 241 * @strength: Desired ECC strength (per chunk size bytes) on which the 242 * layout applies 243 * @nchunks: Total number of chunks 244 * @full_chunk_cnt: Number of full-sized chunks, which is the number of 245 * repetitions of the pattern: 246 * (data_bytes + spare_bytes + ecc_bytes). 247 * @data_bytes: Number of data bytes per chunk 248 * @spare_bytes: Number of spare bytes per chunk 249 * @ecc_bytes: Number of ecc bytes per chunk 250 * @last_data_bytes: Number of data bytes in the last chunk 251 * @last_spare_bytes: Number of spare bytes in the last chunk 252 * @last_ecc_bytes: Number of ecc bytes in the last chunk 253 */ 254 struct marvell_hw_ecc_layout { 255 /* Constraints */ 256 int writesize; 257 int chunk; 258 int strength; 259 /* Corresponding layout */ 260 int nchunks; 261 int full_chunk_cnt; 262 int data_bytes; 263 int spare_bytes; 264 int ecc_bytes; 265 int last_data_bytes; 266 int last_spare_bytes; 267 int last_ecc_bytes; 268 }; 269 270 #define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \ 271 { \ 272 .writesize = ws, \ 273 .chunk = dc, \ 274 .strength = ds, \ 275 .nchunks = nc, \ 276 .full_chunk_cnt = fcc, \ 277 .data_bytes = db, \ 278 .spare_bytes = sb, \ 279 .ecc_bytes = eb, \ 280 .last_data_bytes = ldb, \ 281 .last_spare_bytes = lsb, \ 282 .last_ecc_bytes = leb, \ 283 } 284 285 /* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */ 286 static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = { 287 MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0), 288 MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0), 289 MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0), 290 MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30), 291 MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30), 292 MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30), 293 MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30), 294 MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0), 295 MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30), 296 MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30), 297 MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30), 298 MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0), 299 MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30), 300 MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30), 301 MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30), 302 }; 303 304 /** 305 * struct marvell_nand_chip_sel - CS line description 306 * 307 * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection 308 * is made by a field in NDCB0 register, and in another field in NDCB2 register. 309 * The datasheet describes the logic with an error: ADDR5 field is once 310 * declared at the beginning of NDCB2, and another time at its end. Because the 311 * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical 312 * to use the last bit of this field instead of the first ones. 313 * 314 * @cs: Wanted CE lane. 315 * @ndcb0_csel: Value of the NDCB0 register with or without the flag 316 * selecting the wanted CE lane. This is set once when 317 * the Device Tree is probed. 318 * @rb: Ready/Busy pin for the flash chip 319 */ 320 struct marvell_nand_chip_sel { 321 unsigned int cs; 322 u32 ndcb0_csel; 323 unsigned int rb; 324 }; 325 326 /** 327 * struct marvell_nand_chip - stores NAND chip device related information 328 * 329 * @chip: Base NAND chip structure 330 * @node: Used to store NAND chips into a list 331 * @layout: NAND layout when using hardware ECC 332 * @ndcr: Controller register value for this NAND chip 333 * @ndtr0: Timing registers 0 value for this NAND chip 334 * @ndtr1: Timing registers 1 value for this NAND chip 335 * @addr_cyc: Amount of cycles needed to pass column address 336 * @selected_die: Current active CS 337 * @nsels: Number of CS lines required by the NAND chip 338 * @sels: Array of CS lines descriptions 339 */ 340 struct marvell_nand_chip { 341 struct nand_chip chip; 342 struct list_head node; 343 const struct marvell_hw_ecc_layout *layout; 344 u32 ndcr; 345 u32 ndtr0; 346 u32 ndtr1; 347 int addr_cyc; 348 int selected_die; 349 unsigned int nsels; 350 struct marvell_nand_chip_sel sels[]; 351 }; 352 353 static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip) 354 { 355 return container_of(chip, struct marvell_nand_chip, chip); 356 } 357 358 static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip 359 *nand) 360 { 361 return &nand->sels[nand->selected_die]; 362 } 363 364 /** 365 * struct marvell_nfc_caps - NAND controller capabilities for distinction 366 * between compatible strings 367 * 368 * @max_cs_nb: Number of Chip Select lines available 369 * @max_rb_nb: Number of Ready/Busy lines available 370 * @need_system_controller: Indicates if the SoC needs to have access to the 371 * system controller (ie. to enable the NAND controller) 372 * @legacy_of_bindings: Indicates if DT parsing must be done using the old 373 * fashion way 374 * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie. 375 * BCH error detection and correction algorithm, 376 * NDCB3 register has been added 377 * @use_dma: Use dma for data transfers 378 * @max_mode_number: Maximum timing mode supported by the controller 379 */ 380 struct marvell_nfc_caps { 381 unsigned int max_cs_nb; 382 unsigned int max_rb_nb; 383 bool need_system_controller; 384 bool legacy_of_bindings; 385 bool is_nfcv2; 386 bool use_dma; 387 unsigned int max_mode_number; 388 }; 389 390 /** 391 * struct marvell_nfc - stores Marvell NAND controller information 392 * 393 * @controller: Base controller structure 394 * @dev: Parent device (used to print error messages) 395 * @regs: NAND controller registers 396 * @core_clk: Core clock 397 * @reg_clk: Registers clock 398 * @complete: Completion object to wait for NAND controller events 399 * @assigned_cs: Bitmask describing already assigned CS lines 400 * @chips: List containing all the NAND chips attached to 401 * this NAND controller 402 * @selected_chip: Currently selected target chip 403 * @caps: NAND controller capabilities for each compatible string 404 * @use_dma: Whetner DMA is used 405 * @dma_chan: DMA channel (NFCv1 only) 406 * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only) 407 */ 408 struct marvell_nfc { 409 struct nand_controller controller; 410 struct device *dev; 411 void __iomem *regs; 412 struct clk *core_clk; 413 struct clk *reg_clk; 414 struct completion complete; 415 unsigned long assigned_cs; 416 struct list_head chips; 417 struct nand_chip *selected_chip; 418 const struct marvell_nfc_caps *caps; 419 420 /* DMA (NFCv1 only) */ 421 bool use_dma; 422 struct dma_chan *dma_chan; 423 u8 *dma_buf; 424 }; 425 426 static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl) 427 { 428 return container_of(ctrl, struct marvell_nfc, controller); 429 } 430 431 /** 432 * struct marvell_nfc_timings - NAND controller timings expressed in NAND 433 * Controller clock cycles 434 * 435 * @tRP: ND_nRE pulse width 436 * @tRH: ND_nRE high duration 437 * @tWP: ND_nWE pulse time 438 * @tWH: ND_nWE high duration 439 * @tCS: Enable signal setup time 440 * @tCH: Enable signal hold time 441 * @tADL: Address to write data delay 442 * @tAR: ND_ALE low to ND_nRE low delay 443 * @tWHR: ND_nWE high to ND_nRE low for status read 444 * @tRHW: ND_nRE high duration, read to write delay 445 * @tR: ND_nWE high to ND_nRE low for read 446 */ 447 struct marvell_nfc_timings { 448 /* NDTR0 fields */ 449 unsigned int tRP; 450 unsigned int tRH; 451 unsigned int tWP; 452 unsigned int tWH; 453 unsigned int tCS; 454 unsigned int tCH; 455 unsigned int tADL; 456 /* NDTR1 fields */ 457 unsigned int tAR; 458 unsigned int tWHR; 459 unsigned int tRHW; 460 unsigned int tR; 461 }; 462 463 /** 464 * TO_CYCLES() - Derives a duration in numbers of clock cycles. 465 * 466 * @ps: Duration in pico-seconds 467 * @period_ns: Clock period in nano-seconds 468 * 469 * Convert the duration in nano-seconds, then divide by the period and 470 * return the number of clock periods. 471 */ 472 #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) 473 #define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \ 474 period_ns)) 475 476 /** 477 * struct marvell_nfc_op - filled during the parsing of the ->exec_op() 478 * subop subset of instructions. 479 * 480 * @ndcb: Array of values written to NDCBx registers 481 * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle 482 * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin 483 * @rdy_delay_ns: Optional delay after waiting for the RB pin 484 * @data_delay_ns: Optional delay after the data xfer 485 * @data_instr_idx: Index of the data instruction in the subop 486 * @data_instr: Pointer to the data instruction in the subop 487 */ 488 struct marvell_nfc_op { 489 u32 ndcb[4]; 490 unsigned int cle_ale_delay_ns; 491 unsigned int rdy_timeout_ms; 492 unsigned int rdy_delay_ns; 493 unsigned int data_delay_ns; 494 unsigned int data_instr_idx; 495 const struct nand_op_instr *data_instr; 496 }; 497 498 /* 499 * Internal helper to conditionnally apply a delay (from the above structure, 500 * most of the time). 501 */ 502 static void cond_delay(unsigned int ns) 503 { 504 if (!ns) 505 return; 506 507 if (ns < 10000) 508 ndelay(ns); 509 else 510 udelay(DIV_ROUND_UP(ns, 1000)); 511 } 512 513 /* 514 * The controller has many flags that could generate interrupts, most of them 515 * are disabled and polling is used. For the very slow signals, using interrupts 516 * may relax the CPU charge. 517 */ 518 static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask) 519 { 520 u32 reg; 521 522 /* Writing 1 disables the interrupt */ 523 reg = readl_relaxed(nfc->regs + NDCR); 524 writel_relaxed(reg | int_mask, nfc->regs + NDCR); 525 } 526 527 static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask) 528 { 529 u32 reg; 530 531 /* Writing 0 enables the interrupt */ 532 reg = readl_relaxed(nfc->regs + NDCR); 533 writel_relaxed(reg & ~int_mask, nfc->regs + NDCR); 534 } 535 536 static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask) 537 { 538 u32 reg; 539 540 reg = readl_relaxed(nfc->regs + NDSR); 541 writel_relaxed(int_mask, nfc->regs + NDSR); 542 543 return reg & int_mask; 544 } 545 546 static void marvell_nfc_force_byte_access(struct nand_chip *chip, 547 bool force_8bit) 548 { 549 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 550 u32 ndcr; 551 552 /* 553 * Callers of this function do not verify if the NAND is using a 16-bit 554 * an 8-bit bus for normal operations, so we need to take care of that 555 * here by leaving the configuration unchanged if the NAND does not have 556 * the NAND_BUSWIDTH_16 flag set. 557 */ 558 if (!(chip->options & NAND_BUSWIDTH_16)) 559 return; 560 561 ndcr = readl_relaxed(nfc->regs + NDCR); 562 563 if (force_8bit) 564 ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C); 565 else 566 ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C; 567 568 writel_relaxed(ndcr, nfc->regs + NDCR); 569 } 570 571 static int marvell_nfc_wait_ndrun(struct nand_chip *chip) 572 { 573 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 574 u32 val; 575 int ret; 576 577 /* 578 * The command is being processed, wait for the ND_RUN bit to be 579 * cleared by the NFC. If not, we must clear it by hand. 580 */ 581 ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val, 582 (val & NDCR_ND_RUN) == 0, 583 POLL_PERIOD, POLL_TIMEOUT); 584 if (ret) { 585 dev_err(nfc->dev, "Timeout on NAND controller run mode\n"); 586 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN, 587 nfc->regs + NDCR); 588 return ret; 589 } 590 591 return 0; 592 } 593 594 /* 595 * Any time a command has to be sent to the controller, the following sequence 596 * has to be followed: 597 * - call marvell_nfc_prepare_cmd() 598 * -> activate the ND_RUN bit that will kind of 'start a job' 599 * -> wait the signal indicating the NFC is waiting for a command 600 * - send the command (cmd and address cycles) 601 * - enventually send or receive the data 602 * - call marvell_nfc_end_cmd() with the corresponding flag 603 * -> wait the flag to be triggered or cancel the job with a timeout 604 * 605 * The following helpers are here to factorize the code a bit so that 606 * specialized functions responsible for executing the actual NAND 607 * operations do not have to replicate the same code blocks. 608 */ 609 static int marvell_nfc_prepare_cmd(struct nand_chip *chip) 610 { 611 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 612 u32 ndcr, val; 613 int ret; 614 615 /* Poll ND_RUN and clear NDSR before issuing any command */ 616 ret = marvell_nfc_wait_ndrun(chip); 617 if (ret) { 618 dev_err(nfc->dev, "Last operation did not succeed\n"); 619 return ret; 620 } 621 622 ndcr = readl_relaxed(nfc->regs + NDCR); 623 writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR); 624 625 /* Assert ND_RUN bit and wait the NFC to be ready */ 626 writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR); 627 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, 628 val & NDSR_WRCMDREQ, 629 POLL_PERIOD, POLL_TIMEOUT); 630 if (ret) { 631 dev_err(nfc->dev, "Timeout on WRCMDRE\n"); 632 return -ETIMEDOUT; 633 } 634 635 /* Command may be written, clear WRCMDREQ status bit */ 636 writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR); 637 638 return 0; 639 } 640 641 static void marvell_nfc_send_cmd(struct nand_chip *chip, 642 struct marvell_nfc_op *nfc_op) 643 { 644 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 645 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 646 647 dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n" 648 "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n", 649 (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0], 650 nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]); 651 652 writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0], 653 nfc->regs + NDCB0); 654 writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0); 655 writel(nfc_op->ndcb[2], nfc->regs + NDCB0); 656 657 /* 658 * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7 659 * fields are used (only available on NFCv2). 660 */ 661 if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD || 662 NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) { 663 if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2)) 664 writel(nfc_op->ndcb[3], nfc->regs + NDCB0); 665 } 666 } 667 668 static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag, 669 const char *label) 670 { 671 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 672 u32 val; 673 int ret; 674 675 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, 676 val & flag, 677 POLL_PERIOD, POLL_TIMEOUT); 678 679 if (ret) { 680 dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n", 681 label, val); 682 if (nfc->dma_chan) 683 dmaengine_terminate_all(nfc->dma_chan); 684 return ret; 685 } 686 687 /* 688 * DMA function uses this helper to poll on CMDD bits without wanting 689 * them to be cleared. 690 */ 691 if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN)) 692 return 0; 693 694 writel_relaxed(flag, nfc->regs + NDSR); 695 696 return 0; 697 } 698 699 static int marvell_nfc_wait_cmdd(struct nand_chip *chip) 700 { 701 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 702 int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel); 703 704 return marvell_nfc_end_cmd(chip, cs_flag, "CMDD"); 705 } 706 707 static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask, 708 u32 expected_val, unsigned long timeout_ms) 709 { 710 unsigned long limit; 711 u32 st; 712 713 limit = jiffies + msecs_to_jiffies(timeout_ms); 714 do { 715 st = readl_relaxed(nfc->regs + NDSR); 716 if (st & NDSR_RDY(1)) 717 st |= NDSR_RDY(0); 718 719 if ((st & mask) == expected_val) 720 return 0; 721 722 cpu_relax(); 723 } while (time_after(limit, jiffies)); 724 725 return -ETIMEDOUT; 726 } 727 728 static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) 729 { 730 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 731 struct mtd_info *mtd = nand_to_mtd(chip); 732 u32 pending; 733 int ret; 734 735 /* Timeout is expressed in ms */ 736 if (!timeout_ms) 737 timeout_ms = IRQ_TIMEOUT; 738 739 if (mtd->oops_panic_write) { 740 ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0), 741 NDSR_RDY(0), 742 timeout_ms); 743 } else { 744 init_completion(&nfc->complete); 745 746 marvell_nfc_enable_int(nfc, NDCR_RDYM); 747 ret = wait_for_completion_timeout(&nfc->complete, 748 msecs_to_jiffies(timeout_ms)); 749 marvell_nfc_disable_int(nfc, NDCR_RDYM); 750 } 751 pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1)); 752 753 /* 754 * In case the interrupt was not served in the required time frame, 755 * check if the ISR was not served or if something went actually wrong. 756 */ 757 if (!ret && !pending) { 758 dev_err(nfc->dev, "Timeout waiting for RB signal\n"); 759 return -ETIMEDOUT; 760 } 761 762 return 0; 763 } 764 765 static void marvell_nfc_select_target(struct nand_chip *chip, 766 unsigned int die_nr) 767 { 768 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 769 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 770 u32 ndcr_generic; 771 772 /* 773 * Reset the NDCR register to a clean state for this particular chip, 774 * also clear ND_RUN bit. 775 */ 776 ndcr_generic = readl_relaxed(nfc->regs + NDCR) & 777 NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN; 778 writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR); 779 780 /* Also reset the interrupt status register */ 781 marvell_nfc_clear_int(nfc, NDCR_ALL_INT); 782 783 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die) 784 return; 785 786 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0); 787 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1); 788 789 nfc->selected_chip = chip; 790 marvell_nand->selected_die = die_nr; 791 } 792 793 static irqreturn_t marvell_nfc_isr(int irq, void *dev_id) 794 { 795 struct marvell_nfc *nfc = dev_id; 796 u32 st = readl_relaxed(nfc->regs + NDSR); 797 u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT; 798 799 /* 800 * RDY interrupt mask is one bit in NDCR while there are two status 801 * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]). 802 */ 803 if (st & NDSR_RDY(1)) 804 st |= NDSR_RDY(0); 805 806 if (!(st & ien)) 807 return IRQ_NONE; 808 809 marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT); 810 811 if (st & (NDSR_RDY(0) | NDSR_RDY(1))) 812 complete(&nfc->complete); 813 814 return IRQ_HANDLED; 815 } 816 817 /* HW ECC related functions */ 818 static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip) 819 { 820 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 821 u32 ndcr = readl_relaxed(nfc->regs + NDCR); 822 823 if (!(ndcr & NDCR_ECC_EN)) { 824 writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR); 825 826 /* 827 * When enabling BCH, set threshold to 0 to always know the 828 * number of corrected bitflips. 829 */ 830 if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 831 writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL); 832 } 833 } 834 835 static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip) 836 { 837 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 838 u32 ndcr = readl_relaxed(nfc->regs + NDCR); 839 840 if (ndcr & NDCR_ECC_EN) { 841 writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR); 842 if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 843 writel_relaxed(0, nfc->regs + NDECCCTRL); 844 } 845 } 846 847 /* DMA related helpers */ 848 static void marvell_nfc_enable_dma(struct marvell_nfc *nfc) 849 { 850 u32 reg; 851 852 reg = readl_relaxed(nfc->regs + NDCR); 853 writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR); 854 } 855 856 static void marvell_nfc_disable_dma(struct marvell_nfc *nfc) 857 { 858 u32 reg; 859 860 reg = readl_relaxed(nfc->regs + NDCR); 861 writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR); 862 } 863 864 /* Read/write PIO/DMA accessors */ 865 static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc, 866 enum dma_data_direction direction, 867 unsigned int len) 868 { 869 unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE); 870 struct dma_async_tx_descriptor *tx; 871 struct scatterlist sg; 872 dma_cookie_t cookie; 873 int ret; 874 875 marvell_nfc_enable_dma(nfc); 876 /* Prepare the DMA transfer */ 877 sg_init_one(&sg, nfc->dma_buf, dma_len); 878 ret = dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction); 879 if (!ret) { 880 dev_err(nfc->dev, "Could not map DMA S/G list\n"); 881 return -ENXIO; 882 } 883 884 tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1, 885 direction == DMA_FROM_DEVICE ? 886 DMA_DEV_TO_MEM : DMA_MEM_TO_DEV, 887 DMA_PREP_INTERRUPT); 888 if (!tx) { 889 dev_err(nfc->dev, "Could not prepare DMA S/G list\n"); 890 dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction); 891 return -ENXIO; 892 } 893 894 /* Do the task and wait for it to finish */ 895 cookie = dmaengine_submit(tx); 896 ret = dma_submit_error(cookie); 897 if (ret) 898 return -EIO; 899 900 dma_async_issue_pending(nfc->dma_chan); 901 ret = marvell_nfc_wait_cmdd(nfc->selected_chip); 902 dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction); 903 marvell_nfc_disable_dma(nfc); 904 if (ret) { 905 dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n", 906 dmaengine_tx_status(nfc->dma_chan, cookie, NULL)); 907 dmaengine_terminate_all(nfc->dma_chan); 908 return -ETIMEDOUT; 909 } 910 911 return 0; 912 } 913 914 static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in, 915 unsigned int len) 916 { 917 unsigned int last_len = len % FIFO_DEPTH; 918 unsigned int last_full_offset = round_down(len, FIFO_DEPTH); 919 int i; 920 921 for (i = 0; i < last_full_offset; i += FIFO_DEPTH) 922 ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH)); 923 924 if (last_len) { 925 u8 tmp_buf[FIFO_DEPTH]; 926 927 ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH)); 928 memcpy(in + last_full_offset, tmp_buf, last_len); 929 } 930 931 return 0; 932 } 933 934 static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out, 935 unsigned int len) 936 { 937 unsigned int last_len = len % FIFO_DEPTH; 938 unsigned int last_full_offset = round_down(len, FIFO_DEPTH); 939 int i; 940 941 for (i = 0; i < last_full_offset; i += FIFO_DEPTH) 942 iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH)); 943 944 if (last_len) { 945 u8 tmp_buf[FIFO_DEPTH]; 946 947 memcpy(tmp_buf, out + last_full_offset, last_len); 948 iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH)); 949 } 950 951 return 0; 952 } 953 954 static void marvell_nfc_check_empty_chunk(struct nand_chip *chip, 955 u8 *data, int data_len, 956 u8 *spare, int spare_len, 957 u8 *ecc, int ecc_len, 958 unsigned int *max_bitflips) 959 { 960 struct mtd_info *mtd = nand_to_mtd(chip); 961 int bf; 962 963 /* 964 * Blank pages (all 0xFF) that have not been written may be recognized 965 * as bad if bitflips occur, so whenever an uncorrectable error occurs, 966 * check if the entire page (with ECC bytes) is actually blank or not. 967 */ 968 if (!data) 969 data_len = 0; 970 if (!spare) 971 spare_len = 0; 972 if (!ecc) 973 ecc_len = 0; 974 975 bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len, 976 spare, spare_len, chip->ecc.strength); 977 if (bf < 0) { 978 mtd->ecc_stats.failed++; 979 return; 980 } 981 982 /* Update the stats and max_bitflips */ 983 mtd->ecc_stats.corrected += bf; 984 *max_bitflips = max_t(unsigned int, *max_bitflips, bf); 985 } 986 987 /* 988 * Check if a chunk is correct or not according to the hardware ECC engine. 989 * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however 990 * mtd->ecc_stats.failure is not, the function will instead return a non-zero 991 * value indicating that a check on the emptyness of the subpage must be 992 * performed before actually declaring the subpage as "corrupted". 993 */ 994 static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip, 995 unsigned int *max_bitflips) 996 { 997 struct mtd_info *mtd = nand_to_mtd(chip); 998 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 999 int bf = 0; 1000 u32 ndsr; 1001 1002 ndsr = readl_relaxed(nfc->regs + NDSR); 1003 1004 /* Check uncorrectable error flag */ 1005 if (ndsr & NDSR_UNCERR) { 1006 writel_relaxed(ndsr, nfc->regs + NDSR); 1007 1008 /* 1009 * Do not increment ->ecc_stats.failed now, instead, return a 1010 * non-zero value to indicate that this chunk was apparently 1011 * bad, and it should be check to see if it empty or not. If 1012 * the chunk (with ECC bytes) is not declared empty, the calling 1013 * function must increment the failure count. 1014 */ 1015 return -EBADMSG; 1016 } 1017 1018 /* Check correctable error flag */ 1019 if (ndsr & NDSR_CORERR) { 1020 writel_relaxed(ndsr, nfc->regs + NDSR); 1021 1022 if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 1023 bf = NDSR_ERRCNT(ndsr); 1024 else 1025 bf = 1; 1026 } 1027 1028 /* Update the stats and max_bitflips */ 1029 mtd->ecc_stats.corrected += bf; 1030 *max_bitflips = max_t(unsigned int, *max_bitflips, bf); 1031 1032 return 0; 1033 } 1034 1035 /* Hamming read helpers */ 1036 static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip, 1037 u8 *data_buf, u8 *oob_buf, 1038 bool raw, int page) 1039 { 1040 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 1041 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1042 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1043 struct marvell_nfc_op nfc_op = { 1044 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) | 1045 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | 1046 NDCB0_DBC | 1047 NDCB0_CMD1(NAND_CMD_READ0) | 1048 NDCB0_CMD2(NAND_CMD_READSTART), 1049 .ndcb[1] = NDCB1_ADDRS_PAGE(page), 1050 .ndcb[2] = NDCB2_ADDR5_PAGE(page), 1051 }; 1052 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0); 1053 int ret; 1054 1055 /* NFCv2 needs more information about the operation being executed */ 1056 if (nfc->caps->is_nfcv2) 1057 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); 1058 1059 ret = marvell_nfc_prepare_cmd(chip); 1060 if (ret) 1061 return ret; 1062 1063 marvell_nfc_send_cmd(chip, &nfc_op); 1064 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ, 1065 "RDDREQ while draining FIFO (data/oob)"); 1066 if (ret) 1067 return ret; 1068 1069 /* 1070 * Read the page then the OOB area. Unlike what is shown in current 1071 * documentation, spare bytes are protected by the ECC engine, and must 1072 * be at the beginning of the OOB area or running this driver on legacy 1073 * systems will prevent the discovery of the BBM/BBT. 1074 */ 1075 if (nfc->use_dma) { 1076 marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE, 1077 lt->data_bytes + oob_bytes); 1078 memcpy(data_buf, nfc->dma_buf, lt->data_bytes); 1079 memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes); 1080 } else { 1081 marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes); 1082 marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes); 1083 } 1084 1085 ret = marvell_nfc_wait_cmdd(chip); 1086 return ret; 1087 } 1088 1089 static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf, 1090 int oob_required, int page) 1091 { 1092 marvell_nfc_select_target(chip, chip->cur_cs); 1093 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, 1094 true, page); 1095 } 1096 1097 static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf, 1098 int oob_required, int page) 1099 { 1100 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1101 unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes; 1102 int max_bitflips = 0, ret; 1103 u8 *raw_buf; 1104 1105 marvell_nfc_select_target(chip, chip->cur_cs); 1106 marvell_nfc_enable_hw_ecc(chip); 1107 marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false, 1108 page); 1109 ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); 1110 marvell_nfc_disable_hw_ecc(chip); 1111 1112 if (!ret) 1113 return max_bitflips; 1114 1115 /* 1116 * When ECC failures are detected, check if the full page has been 1117 * written or not. Ignore the failure if it is actually empty. 1118 */ 1119 raw_buf = kmalloc(full_sz, GFP_KERNEL); 1120 if (!raw_buf) 1121 return -ENOMEM; 1122 1123 marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf + 1124 lt->data_bytes, true, page); 1125 marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0, 1126 &max_bitflips); 1127 kfree(raw_buf); 1128 1129 return max_bitflips; 1130 } 1131 1132 /* 1133 * Spare area in Hamming layouts is not protected by the ECC engine (even if 1134 * it appears before the ECC bytes when reading), the ->read_oob_raw() function 1135 * also stands for ->read_oob(). 1136 */ 1137 static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page) 1138 { 1139 u8 *buf = nand_get_data_buf(chip); 1140 1141 marvell_nfc_select_target(chip, chip->cur_cs); 1142 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, 1143 true, page); 1144 } 1145 1146 /* Hamming write helpers */ 1147 static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, 1148 const u8 *data_buf, 1149 const u8 *oob_buf, bool raw, 1150 int page) 1151 { 1152 const struct nand_sdr_timings *sdr = 1153 nand_get_sdr_timings(nand_get_interface_config(chip)); 1154 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 1155 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1156 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1157 struct marvell_nfc_op nfc_op = { 1158 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | 1159 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | 1160 NDCB0_CMD1(NAND_CMD_SEQIN) | 1161 NDCB0_CMD2(NAND_CMD_PAGEPROG) | 1162 NDCB0_DBC, 1163 .ndcb[1] = NDCB1_ADDRS_PAGE(page), 1164 .ndcb[2] = NDCB2_ADDR5_PAGE(page), 1165 }; 1166 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0); 1167 int ret; 1168 1169 /* NFCv2 needs more information about the operation being executed */ 1170 if (nfc->caps->is_nfcv2) 1171 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); 1172 1173 ret = marvell_nfc_prepare_cmd(chip); 1174 if (ret) 1175 return ret; 1176 1177 marvell_nfc_send_cmd(chip, &nfc_op); 1178 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ, 1179 "WRDREQ while loading FIFO (data)"); 1180 if (ret) 1181 return ret; 1182 1183 /* Write the page then the OOB area */ 1184 if (nfc->use_dma) { 1185 memcpy(nfc->dma_buf, data_buf, lt->data_bytes); 1186 memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes); 1187 marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes + 1188 lt->ecc_bytes + lt->spare_bytes); 1189 } else { 1190 marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes); 1191 marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes); 1192 } 1193 1194 ret = marvell_nfc_wait_cmdd(chip); 1195 if (ret) 1196 return ret; 1197 1198 ret = marvell_nfc_wait_op(chip, 1199 PSEC_TO_MSEC(sdr->tPROG_max)); 1200 return ret; 1201 } 1202 1203 static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip, 1204 const u8 *buf, 1205 int oob_required, int page) 1206 { 1207 marvell_nfc_select_target(chip, chip->cur_cs); 1208 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi, 1209 true, page); 1210 } 1211 1212 static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip, 1213 const u8 *buf, 1214 int oob_required, int page) 1215 { 1216 int ret; 1217 1218 marvell_nfc_select_target(chip, chip->cur_cs); 1219 marvell_nfc_enable_hw_ecc(chip); 1220 ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi, 1221 false, page); 1222 marvell_nfc_disable_hw_ecc(chip); 1223 1224 return ret; 1225 } 1226 1227 /* 1228 * Spare area in Hamming layouts is not protected by the ECC engine (even if 1229 * it appears before the ECC bytes when reading), the ->write_oob_raw() function 1230 * also stands for ->write_oob(). 1231 */ 1232 static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip, 1233 int page) 1234 { 1235 struct mtd_info *mtd = nand_to_mtd(chip); 1236 u8 *buf = nand_get_data_buf(chip); 1237 1238 memset(buf, 0xFF, mtd->writesize); 1239 1240 marvell_nfc_select_target(chip, chip->cur_cs); 1241 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi, 1242 true, page); 1243 } 1244 1245 /* BCH read helpers */ 1246 static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf, 1247 int oob_required, int page) 1248 { 1249 struct mtd_info *mtd = nand_to_mtd(chip); 1250 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1251 u8 *oob = chip->oob_poi; 1252 int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes; 1253 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) + 1254 lt->last_spare_bytes; 1255 int data_len = lt->data_bytes; 1256 int spare_len = lt->spare_bytes; 1257 int ecc_len = lt->ecc_bytes; 1258 int chunk; 1259 1260 marvell_nfc_select_target(chip, chip->cur_cs); 1261 1262 if (oob_required) 1263 memset(chip->oob_poi, 0xFF, mtd->oobsize); 1264 1265 nand_read_page_op(chip, page, 0, NULL, 0); 1266 1267 for (chunk = 0; chunk < lt->nchunks; chunk++) { 1268 /* Update last chunk length */ 1269 if (chunk >= lt->full_chunk_cnt) { 1270 data_len = lt->last_data_bytes; 1271 spare_len = lt->last_spare_bytes; 1272 ecc_len = lt->last_ecc_bytes; 1273 } 1274 1275 /* Read data bytes*/ 1276 nand_change_read_column_op(chip, chunk * chunk_size, 1277 buf + (lt->data_bytes * chunk), 1278 data_len, false); 1279 1280 /* Read spare bytes */ 1281 nand_read_data_op(chip, oob + (lt->spare_bytes * chunk), 1282 spare_len, false, false); 1283 1284 /* Read ECC bytes */ 1285 nand_read_data_op(chip, oob + ecc_offset + 1286 (ALIGN(lt->ecc_bytes, 32) * chunk), 1287 ecc_len, false, false); 1288 } 1289 1290 return 0; 1291 } 1292 1293 static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk, 1294 u8 *data, unsigned int data_len, 1295 u8 *spare, unsigned int spare_len, 1296 int page) 1297 { 1298 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 1299 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1300 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1301 int i, ret; 1302 struct marvell_nfc_op nfc_op = { 1303 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) | 1304 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | 1305 NDCB0_LEN_OVRD, 1306 .ndcb[1] = NDCB1_ADDRS_PAGE(page), 1307 .ndcb[2] = NDCB2_ADDR5_PAGE(page), 1308 .ndcb[3] = data_len + spare_len, 1309 }; 1310 1311 ret = marvell_nfc_prepare_cmd(chip); 1312 if (ret) 1313 return; 1314 1315 if (chunk == 0) 1316 nfc_op.ndcb[0] |= NDCB0_DBC | 1317 NDCB0_CMD1(NAND_CMD_READ0) | 1318 NDCB0_CMD2(NAND_CMD_READSTART); 1319 1320 /* 1321 * Trigger the monolithic read on the first chunk, then naked read on 1322 * intermediate chunks and finally a last naked read on the last chunk. 1323 */ 1324 if (chunk == 0) 1325 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); 1326 else if (chunk < lt->nchunks - 1) 1327 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW); 1328 else 1329 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1330 1331 marvell_nfc_send_cmd(chip, &nfc_op); 1332 1333 /* 1334 * According to the datasheet, when reading from NDDB 1335 * with BCH enabled, after each 32 bytes reads, we 1336 * have to make sure that the NDSR.RDDREQ bit is set. 1337 * 1338 * Drain the FIFO, 8 32-bit reads at a time, and skip 1339 * the polling on the last read. 1340 * 1341 * Length is a multiple of 32 bytes, hence it is a multiple of 8 too. 1342 */ 1343 for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) { 1344 marvell_nfc_end_cmd(chip, NDSR_RDDREQ, 1345 "RDDREQ while draining FIFO (data)"); 1346 marvell_nfc_xfer_data_in_pio(nfc, data, 1347 FIFO_DEPTH * BCH_SEQ_READS); 1348 data += FIFO_DEPTH * BCH_SEQ_READS; 1349 } 1350 1351 for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) { 1352 marvell_nfc_end_cmd(chip, NDSR_RDDREQ, 1353 "RDDREQ while draining FIFO (OOB)"); 1354 marvell_nfc_xfer_data_in_pio(nfc, spare, 1355 FIFO_DEPTH * BCH_SEQ_READS); 1356 spare += FIFO_DEPTH * BCH_SEQ_READS; 1357 } 1358 } 1359 1360 static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, 1361 u8 *buf, int oob_required, 1362 int page) 1363 { 1364 struct mtd_info *mtd = nand_to_mtd(chip); 1365 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1366 int data_len = lt->data_bytes, spare_len = lt->spare_bytes; 1367 u8 *data = buf, *spare = chip->oob_poi; 1368 int max_bitflips = 0; 1369 u32 failure_mask = 0; 1370 int chunk, ret; 1371 1372 marvell_nfc_select_target(chip, chip->cur_cs); 1373 1374 /* 1375 * With BCH, OOB is not fully used (and thus not read entirely), not 1376 * expected bytes could show up at the end of the OOB buffer if not 1377 * explicitly erased. 1378 */ 1379 if (oob_required) 1380 memset(chip->oob_poi, 0xFF, mtd->oobsize); 1381 1382 marvell_nfc_enable_hw_ecc(chip); 1383 1384 for (chunk = 0; chunk < lt->nchunks; chunk++) { 1385 /* Update length for the last chunk */ 1386 if (chunk >= lt->full_chunk_cnt) { 1387 data_len = lt->last_data_bytes; 1388 spare_len = lt->last_spare_bytes; 1389 } 1390 1391 /* Read the chunk and detect number of bitflips */ 1392 marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len, 1393 spare, spare_len, page); 1394 ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); 1395 if (ret) 1396 failure_mask |= BIT(chunk); 1397 1398 data += data_len; 1399 spare += spare_len; 1400 } 1401 1402 marvell_nfc_disable_hw_ecc(chip); 1403 1404 if (!failure_mask) 1405 return max_bitflips; 1406 1407 /* 1408 * Please note that dumping the ECC bytes during a normal read with OOB 1409 * area would add a significant overhead as ECC bytes are "consumed" by 1410 * the controller in normal mode and must be re-read in raw mode. To 1411 * avoid dropping the performances, we prefer not to include them. The 1412 * user should re-read the page in raw mode if ECC bytes are required. 1413 */ 1414 1415 /* 1416 * In case there is any subpage read error, we usually re-read only ECC 1417 * bytes in raw mode and check if the whole page is empty. In this case, 1418 * it is normal that the ECC check failed and we just ignore the error. 1419 * 1420 * However, it has been empirically observed that for some layouts (e.g 1421 * 2k page, 8b strength per 512B chunk), the controller tries to correct 1422 * bits and may create itself bitflips in the erased area. To overcome 1423 * this strange behavior, the whole page is re-read in raw mode, not 1424 * only the ECC bytes. 1425 */ 1426 for (chunk = 0; chunk < lt->nchunks; chunk++) { 1427 int data_off_in_page, spare_off_in_page, ecc_off_in_page; 1428 int data_off, spare_off, ecc_off; 1429 int data_len, spare_len, ecc_len; 1430 1431 /* No failure reported for this chunk, move to the next one */ 1432 if (!(failure_mask & BIT(chunk))) 1433 continue; 1434 1435 data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes + 1436 lt->ecc_bytes); 1437 spare_off_in_page = data_off_in_page + 1438 (chunk < lt->full_chunk_cnt ? lt->data_bytes : 1439 lt->last_data_bytes); 1440 ecc_off_in_page = spare_off_in_page + 1441 (chunk < lt->full_chunk_cnt ? lt->spare_bytes : 1442 lt->last_spare_bytes); 1443 1444 data_off = chunk * lt->data_bytes; 1445 spare_off = chunk * lt->spare_bytes; 1446 ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) + 1447 lt->last_spare_bytes + 1448 (chunk * (lt->ecc_bytes + 2)); 1449 1450 data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes : 1451 lt->last_data_bytes; 1452 spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes : 1453 lt->last_spare_bytes; 1454 ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes : 1455 lt->last_ecc_bytes; 1456 1457 /* 1458 * Only re-read the ECC bytes, unless we are using the 2k/8b 1459 * layout which is buggy in the sense that the ECC engine will 1460 * try to correct data bytes anyway, creating bitflips. In this 1461 * case, re-read the entire page. 1462 */ 1463 if (lt->writesize == 2048 && lt->strength == 8) { 1464 nand_change_read_column_op(chip, data_off_in_page, 1465 buf + data_off, data_len, 1466 false); 1467 nand_change_read_column_op(chip, spare_off_in_page, 1468 chip->oob_poi + spare_off, spare_len, 1469 false); 1470 } 1471 1472 nand_change_read_column_op(chip, ecc_off_in_page, 1473 chip->oob_poi + ecc_off, ecc_len, 1474 false); 1475 1476 /* Check the entire chunk (data + spare + ecc) for emptyness */ 1477 marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len, 1478 chip->oob_poi + spare_off, spare_len, 1479 chip->oob_poi + ecc_off, ecc_len, 1480 &max_bitflips); 1481 } 1482 1483 return max_bitflips; 1484 } 1485 1486 static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page) 1487 { 1488 u8 *buf = nand_get_data_buf(chip); 1489 1490 return chip->ecc.read_page_raw(chip, buf, true, page); 1491 } 1492 1493 static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page) 1494 { 1495 u8 *buf = nand_get_data_buf(chip); 1496 1497 return chip->ecc.read_page(chip, buf, true, page); 1498 } 1499 1500 /* BCH write helpers */ 1501 static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip, 1502 const u8 *buf, 1503 int oob_required, int page) 1504 { 1505 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1506 int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes; 1507 int data_len = lt->data_bytes; 1508 int spare_len = lt->spare_bytes; 1509 int ecc_len = lt->ecc_bytes; 1510 int spare_offset = 0; 1511 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) + 1512 lt->last_spare_bytes; 1513 int chunk; 1514 1515 marvell_nfc_select_target(chip, chip->cur_cs); 1516 1517 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 1518 1519 for (chunk = 0; chunk < lt->nchunks; chunk++) { 1520 if (chunk >= lt->full_chunk_cnt) { 1521 data_len = lt->last_data_bytes; 1522 spare_len = lt->last_spare_bytes; 1523 ecc_len = lt->last_ecc_bytes; 1524 } 1525 1526 /* Point to the column of the next chunk */ 1527 nand_change_write_column_op(chip, chunk * full_chunk_size, 1528 NULL, 0, false); 1529 1530 /* Write the data */ 1531 nand_write_data_op(chip, buf + (chunk * lt->data_bytes), 1532 data_len, false); 1533 1534 if (!oob_required) 1535 continue; 1536 1537 /* Write the spare bytes */ 1538 if (spare_len) 1539 nand_write_data_op(chip, chip->oob_poi + spare_offset, 1540 spare_len, false); 1541 1542 /* Write the ECC bytes */ 1543 if (ecc_len) 1544 nand_write_data_op(chip, chip->oob_poi + ecc_offset, 1545 ecc_len, false); 1546 1547 spare_offset += spare_len; 1548 ecc_offset += ALIGN(ecc_len, 32); 1549 } 1550 1551 return nand_prog_page_end_op(chip); 1552 } 1553 1554 static int 1555 marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, 1556 const u8 *data, unsigned int data_len, 1557 const u8 *spare, unsigned int spare_len, 1558 int page) 1559 { 1560 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 1561 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1562 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1563 u32 xtype; 1564 int ret; 1565 struct marvell_nfc_op nfc_op = { 1566 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD, 1567 .ndcb[3] = data_len + spare_len, 1568 }; 1569 1570 /* 1571 * First operation dispatches the CMD_SEQIN command, issue the address 1572 * cycles and asks for the first chunk of data. 1573 * All operations in the middle (if any) will issue a naked write and 1574 * also ask for data. 1575 * Last operation (if any) asks for the last chunk of data through a 1576 * last naked write. 1577 */ 1578 if (chunk == 0) { 1579 if (lt->nchunks == 1) 1580 xtype = XTYPE_MONOLITHIC_RW; 1581 else 1582 xtype = XTYPE_WRITE_DISPATCH; 1583 1584 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) | 1585 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | 1586 NDCB0_CMD1(NAND_CMD_SEQIN); 1587 nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page); 1588 nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page); 1589 } else if (chunk < lt->nchunks - 1) { 1590 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW); 1591 } else { 1592 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1593 } 1594 1595 /* Always dispatch the PAGEPROG command on the last chunk */ 1596 if (chunk == lt->nchunks - 1) 1597 nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC; 1598 1599 ret = marvell_nfc_prepare_cmd(chip); 1600 if (ret) 1601 return ret; 1602 1603 marvell_nfc_send_cmd(chip, &nfc_op); 1604 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ, 1605 "WRDREQ while loading FIFO (data)"); 1606 if (ret) 1607 return ret; 1608 1609 /* Transfer the contents */ 1610 iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len)); 1611 iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len)); 1612 1613 return 0; 1614 } 1615 1616 static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, 1617 const u8 *buf, 1618 int oob_required, int page) 1619 { 1620 const struct nand_sdr_timings *sdr = 1621 nand_get_sdr_timings(nand_get_interface_config(chip)); 1622 struct mtd_info *mtd = nand_to_mtd(chip); 1623 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 1624 const u8 *data = buf; 1625 const u8 *spare = chip->oob_poi; 1626 int data_len = lt->data_bytes; 1627 int spare_len = lt->spare_bytes; 1628 int chunk, ret; 1629 1630 marvell_nfc_select_target(chip, chip->cur_cs); 1631 1632 /* Spare data will be written anyway, so clear it to avoid garbage */ 1633 if (!oob_required) 1634 memset(chip->oob_poi, 0xFF, mtd->oobsize); 1635 1636 marvell_nfc_enable_hw_ecc(chip); 1637 1638 for (chunk = 0; chunk < lt->nchunks; chunk++) { 1639 if (chunk >= lt->full_chunk_cnt) { 1640 data_len = lt->last_data_bytes; 1641 spare_len = lt->last_spare_bytes; 1642 } 1643 1644 marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len, 1645 spare, spare_len, page); 1646 data += data_len; 1647 spare += spare_len; 1648 1649 /* 1650 * Waiting only for CMDD or PAGED is not enough, ECC are 1651 * partially written. No flag is set once the operation is 1652 * really finished but the ND_RUN bit is cleared, so wait for it 1653 * before stepping into the next command. 1654 */ 1655 marvell_nfc_wait_ndrun(chip); 1656 } 1657 1658 ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max)); 1659 1660 marvell_nfc_disable_hw_ecc(chip); 1661 1662 if (ret) 1663 return ret; 1664 1665 return 0; 1666 } 1667 1668 static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip, 1669 int page) 1670 { 1671 struct mtd_info *mtd = nand_to_mtd(chip); 1672 u8 *buf = nand_get_data_buf(chip); 1673 1674 memset(buf, 0xFF, mtd->writesize); 1675 1676 return chip->ecc.write_page_raw(chip, buf, true, page); 1677 } 1678 1679 static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page) 1680 { 1681 struct mtd_info *mtd = nand_to_mtd(chip); 1682 u8 *buf = nand_get_data_buf(chip); 1683 1684 memset(buf, 0xFF, mtd->writesize); 1685 1686 return chip->ecc.write_page(chip, buf, true, page); 1687 } 1688 1689 /* NAND framework ->exec_op() hooks and related helpers */ 1690 static void marvell_nfc_parse_instructions(struct nand_chip *chip, 1691 const struct nand_subop *subop, 1692 struct marvell_nfc_op *nfc_op) 1693 { 1694 const struct nand_op_instr *instr = NULL; 1695 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1696 bool first_cmd = true; 1697 unsigned int op_id; 1698 int i; 1699 1700 /* Reset the input structure as most of its fields will be OR'ed */ 1701 memset(nfc_op, 0, sizeof(struct marvell_nfc_op)); 1702 1703 for (op_id = 0; op_id < subop->ninstrs; op_id++) { 1704 unsigned int offset, naddrs; 1705 const u8 *addrs; 1706 int len; 1707 1708 instr = &subop->instrs[op_id]; 1709 1710 switch (instr->type) { 1711 case NAND_OP_CMD_INSTR: 1712 if (first_cmd) 1713 nfc_op->ndcb[0] |= 1714 NDCB0_CMD1(instr->ctx.cmd.opcode); 1715 else 1716 nfc_op->ndcb[0] |= 1717 NDCB0_CMD2(instr->ctx.cmd.opcode) | 1718 NDCB0_DBC; 1719 1720 nfc_op->cle_ale_delay_ns = instr->delay_ns; 1721 first_cmd = false; 1722 break; 1723 1724 case NAND_OP_ADDR_INSTR: 1725 offset = nand_subop_get_addr_start_off(subop, op_id); 1726 naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 1727 addrs = &instr->ctx.addr.addrs[offset]; 1728 1729 nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs); 1730 1731 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) 1732 nfc_op->ndcb[1] |= addrs[i] << (8 * i); 1733 1734 if (naddrs >= 5) 1735 nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]); 1736 if (naddrs >= 6) 1737 nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]); 1738 if (naddrs == 7) 1739 nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]); 1740 1741 nfc_op->cle_ale_delay_ns = instr->delay_ns; 1742 break; 1743 1744 case NAND_OP_DATA_IN_INSTR: 1745 nfc_op->data_instr = instr; 1746 nfc_op->data_instr_idx = op_id; 1747 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ); 1748 if (nfc->caps->is_nfcv2) { 1749 nfc_op->ndcb[0] |= 1750 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1751 NDCB0_LEN_OVRD; 1752 len = nand_subop_get_data_len(subop, op_id); 1753 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1754 } 1755 nfc_op->data_delay_ns = instr->delay_ns; 1756 break; 1757 1758 case NAND_OP_DATA_OUT_INSTR: 1759 nfc_op->data_instr = instr; 1760 nfc_op->data_instr_idx = op_id; 1761 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE); 1762 if (nfc->caps->is_nfcv2) { 1763 nfc_op->ndcb[0] |= 1764 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1765 NDCB0_LEN_OVRD; 1766 len = nand_subop_get_data_len(subop, op_id); 1767 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1768 } 1769 nfc_op->data_delay_ns = instr->delay_ns; 1770 break; 1771 1772 case NAND_OP_WAITRDY_INSTR: 1773 nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; 1774 nfc_op->rdy_delay_ns = instr->delay_ns; 1775 break; 1776 } 1777 } 1778 } 1779 1780 static int marvell_nfc_xfer_data_pio(struct nand_chip *chip, 1781 const struct nand_subop *subop, 1782 struct marvell_nfc_op *nfc_op) 1783 { 1784 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1785 const struct nand_op_instr *instr = nfc_op->data_instr; 1786 unsigned int op_id = nfc_op->data_instr_idx; 1787 unsigned int len = nand_subop_get_data_len(subop, op_id); 1788 unsigned int offset = nand_subop_get_data_start_off(subop, op_id); 1789 bool reading = (instr->type == NAND_OP_DATA_IN_INSTR); 1790 int ret; 1791 1792 if (instr->ctx.data.force_8bit) 1793 marvell_nfc_force_byte_access(chip, true); 1794 1795 if (reading) { 1796 u8 *in = instr->ctx.data.buf.in + offset; 1797 1798 ret = marvell_nfc_xfer_data_in_pio(nfc, in, len); 1799 } else { 1800 const u8 *out = instr->ctx.data.buf.out + offset; 1801 1802 ret = marvell_nfc_xfer_data_out_pio(nfc, out, len); 1803 } 1804 1805 if (instr->ctx.data.force_8bit) 1806 marvell_nfc_force_byte_access(chip, false); 1807 1808 return ret; 1809 } 1810 1811 static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip, 1812 const struct nand_subop *subop) 1813 { 1814 struct marvell_nfc_op nfc_op; 1815 bool reading; 1816 int ret; 1817 1818 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 1819 reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR); 1820 1821 ret = marvell_nfc_prepare_cmd(chip); 1822 if (ret) 1823 return ret; 1824 1825 marvell_nfc_send_cmd(chip, &nfc_op); 1826 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ, 1827 "RDDREQ/WRDREQ while draining raw data"); 1828 if (ret) 1829 return ret; 1830 1831 cond_delay(nfc_op.cle_ale_delay_ns); 1832 1833 if (reading) { 1834 if (nfc_op.rdy_timeout_ms) { 1835 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 1836 if (ret) 1837 return ret; 1838 } 1839 1840 cond_delay(nfc_op.rdy_delay_ns); 1841 } 1842 1843 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op); 1844 ret = marvell_nfc_wait_cmdd(chip); 1845 if (ret) 1846 return ret; 1847 1848 cond_delay(nfc_op.data_delay_ns); 1849 1850 if (!reading) { 1851 if (nfc_op.rdy_timeout_ms) { 1852 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 1853 if (ret) 1854 return ret; 1855 } 1856 1857 cond_delay(nfc_op.rdy_delay_ns); 1858 } 1859 1860 /* 1861 * NDCR ND_RUN bit should be cleared automatically at the end of each 1862 * operation but experience shows that the behavior is buggy when it 1863 * comes to writes (with LEN_OVRD). Clear it by hand in this case. 1864 */ 1865 if (!reading) { 1866 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1867 1868 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN, 1869 nfc->regs + NDCR); 1870 } 1871 1872 return 0; 1873 } 1874 1875 static int marvell_nfc_naked_access_exec(struct nand_chip *chip, 1876 const struct nand_subop *subop) 1877 { 1878 struct marvell_nfc_op nfc_op; 1879 int ret; 1880 1881 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 1882 1883 /* 1884 * Naked access are different in that they need to be flagged as naked 1885 * by the controller. Reset the controller registers fields that inform 1886 * on the type and refill them according to the ongoing operation. 1887 */ 1888 nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) | 1889 NDCB0_CMD_XTYPE(XTYPE_MASK)); 1890 switch (subop->instrs[0].type) { 1891 case NAND_OP_CMD_INSTR: 1892 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD); 1893 break; 1894 case NAND_OP_ADDR_INSTR: 1895 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR); 1896 break; 1897 case NAND_OP_DATA_IN_INSTR: 1898 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) | 1899 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1900 break; 1901 case NAND_OP_DATA_OUT_INSTR: 1902 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) | 1903 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1904 break; 1905 default: 1906 /* This should never happen */ 1907 break; 1908 } 1909 1910 ret = marvell_nfc_prepare_cmd(chip); 1911 if (ret) 1912 return ret; 1913 1914 marvell_nfc_send_cmd(chip, &nfc_op); 1915 1916 if (!nfc_op.data_instr) { 1917 ret = marvell_nfc_wait_cmdd(chip); 1918 cond_delay(nfc_op.cle_ale_delay_ns); 1919 return ret; 1920 } 1921 1922 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ, 1923 "RDDREQ/WRDREQ while draining raw data"); 1924 if (ret) 1925 return ret; 1926 1927 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op); 1928 ret = marvell_nfc_wait_cmdd(chip); 1929 if (ret) 1930 return ret; 1931 1932 /* 1933 * NDCR ND_RUN bit should be cleared automatically at the end of each 1934 * operation but experience shows that the behavior is buggy when it 1935 * comes to writes (with LEN_OVRD). Clear it by hand in this case. 1936 */ 1937 if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) { 1938 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 1939 1940 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN, 1941 nfc->regs + NDCR); 1942 } 1943 1944 return 0; 1945 } 1946 1947 static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip, 1948 const struct nand_subop *subop) 1949 { 1950 struct marvell_nfc_op nfc_op; 1951 int ret; 1952 1953 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 1954 1955 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 1956 cond_delay(nfc_op.rdy_delay_ns); 1957 1958 return ret; 1959 } 1960 1961 static int marvell_nfc_read_id_type_exec(struct nand_chip *chip, 1962 const struct nand_subop *subop) 1963 { 1964 struct marvell_nfc_op nfc_op; 1965 int ret; 1966 1967 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 1968 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ); 1969 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID); 1970 1971 ret = marvell_nfc_prepare_cmd(chip); 1972 if (ret) 1973 return ret; 1974 1975 marvell_nfc_send_cmd(chip, &nfc_op); 1976 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ, 1977 "RDDREQ while reading ID"); 1978 if (ret) 1979 return ret; 1980 1981 cond_delay(nfc_op.cle_ale_delay_ns); 1982 1983 if (nfc_op.rdy_timeout_ms) { 1984 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 1985 if (ret) 1986 return ret; 1987 } 1988 1989 cond_delay(nfc_op.rdy_delay_ns); 1990 1991 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op); 1992 ret = marvell_nfc_wait_cmdd(chip); 1993 if (ret) 1994 return ret; 1995 1996 cond_delay(nfc_op.data_delay_ns); 1997 1998 return 0; 1999 } 2000 2001 static int marvell_nfc_read_status_exec(struct nand_chip *chip, 2002 const struct nand_subop *subop) 2003 { 2004 struct marvell_nfc_op nfc_op; 2005 int ret; 2006 2007 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 2008 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ); 2009 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS); 2010 2011 ret = marvell_nfc_prepare_cmd(chip); 2012 if (ret) 2013 return ret; 2014 2015 marvell_nfc_send_cmd(chip, &nfc_op); 2016 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ, 2017 "RDDREQ while reading status"); 2018 if (ret) 2019 return ret; 2020 2021 cond_delay(nfc_op.cle_ale_delay_ns); 2022 2023 if (nfc_op.rdy_timeout_ms) { 2024 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 2025 if (ret) 2026 return ret; 2027 } 2028 2029 cond_delay(nfc_op.rdy_delay_ns); 2030 2031 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op); 2032 ret = marvell_nfc_wait_cmdd(chip); 2033 if (ret) 2034 return ret; 2035 2036 cond_delay(nfc_op.data_delay_ns); 2037 2038 return 0; 2039 } 2040 2041 static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip, 2042 const struct nand_subop *subop) 2043 { 2044 struct marvell_nfc_op nfc_op; 2045 int ret; 2046 2047 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 2048 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET); 2049 2050 ret = marvell_nfc_prepare_cmd(chip); 2051 if (ret) 2052 return ret; 2053 2054 marvell_nfc_send_cmd(chip, &nfc_op); 2055 ret = marvell_nfc_wait_cmdd(chip); 2056 if (ret) 2057 return ret; 2058 2059 cond_delay(nfc_op.cle_ale_delay_ns); 2060 2061 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 2062 if (ret) 2063 return ret; 2064 2065 cond_delay(nfc_op.rdy_delay_ns); 2066 2067 return 0; 2068 } 2069 2070 static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip, 2071 const struct nand_subop *subop) 2072 { 2073 struct marvell_nfc_op nfc_op; 2074 int ret; 2075 2076 marvell_nfc_parse_instructions(chip, subop, &nfc_op); 2077 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE); 2078 2079 ret = marvell_nfc_prepare_cmd(chip); 2080 if (ret) 2081 return ret; 2082 2083 marvell_nfc_send_cmd(chip, &nfc_op); 2084 ret = marvell_nfc_wait_cmdd(chip); 2085 if (ret) 2086 return ret; 2087 2088 cond_delay(nfc_op.cle_ale_delay_ns); 2089 2090 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); 2091 if (ret) 2092 return ret; 2093 2094 cond_delay(nfc_op.rdy_delay_ns); 2095 2096 return 0; 2097 } 2098 2099 static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER( 2100 /* Monolithic reads/writes */ 2101 NAND_OP_PARSER_PATTERN( 2102 marvell_nfc_monolithic_access_exec, 2103 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2104 NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2), 2105 NAND_OP_PARSER_PAT_CMD_ELEM(true), 2106 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 2107 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), 2108 NAND_OP_PARSER_PATTERN( 2109 marvell_nfc_monolithic_access_exec, 2110 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2111 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2), 2112 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE), 2113 NAND_OP_PARSER_PAT_CMD_ELEM(true), 2114 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 2115 /* Naked commands */ 2116 NAND_OP_PARSER_PATTERN( 2117 marvell_nfc_naked_access_exec, 2118 NAND_OP_PARSER_PAT_CMD_ELEM(false)), 2119 NAND_OP_PARSER_PATTERN( 2120 marvell_nfc_naked_access_exec, 2121 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)), 2122 NAND_OP_PARSER_PATTERN( 2123 marvell_nfc_naked_access_exec, 2124 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), 2125 NAND_OP_PARSER_PATTERN( 2126 marvell_nfc_naked_access_exec, 2127 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)), 2128 NAND_OP_PARSER_PATTERN( 2129 marvell_nfc_naked_waitrdy_exec, 2130 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 2131 ); 2132 2133 static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER( 2134 /* Naked commands not supported, use a function for each pattern */ 2135 NAND_OP_PARSER_PATTERN( 2136 marvell_nfc_read_id_type_exec, 2137 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2138 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1), 2139 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), 2140 NAND_OP_PARSER_PATTERN( 2141 marvell_nfc_erase_cmd_type_exec, 2142 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2143 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1), 2144 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2145 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 2146 NAND_OP_PARSER_PATTERN( 2147 marvell_nfc_read_status_exec, 2148 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2149 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), 2150 NAND_OP_PARSER_PATTERN( 2151 marvell_nfc_reset_cmd_type_exec, 2152 NAND_OP_PARSER_PAT_CMD_ELEM(false), 2153 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 2154 NAND_OP_PARSER_PATTERN( 2155 marvell_nfc_naked_waitrdy_exec, 2156 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 2157 ); 2158 2159 static int marvell_nfc_exec_op(struct nand_chip *chip, 2160 const struct nand_operation *op, 2161 bool check_only) 2162 { 2163 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 2164 2165 if (!check_only) 2166 marvell_nfc_select_target(chip, op->cs); 2167 2168 if (nfc->caps->is_nfcv2) 2169 return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser, 2170 op, check_only); 2171 else 2172 return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser, 2173 op, check_only); 2174 } 2175 2176 /* 2177 * Layouts were broken in old pxa3xx_nand driver, these are supposed to be 2178 * usable. 2179 */ 2180 static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 2181 struct mtd_oob_region *oobregion) 2182 { 2183 struct nand_chip *chip = mtd_to_nand(mtd); 2184 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 2185 2186 if (section) 2187 return -ERANGE; 2188 2189 oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) + 2190 lt->last_ecc_bytes; 2191 oobregion->offset = mtd->oobsize - oobregion->length; 2192 2193 return 0; 2194 } 2195 2196 static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section, 2197 struct mtd_oob_region *oobregion) 2198 { 2199 struct nand_chip *chip = mtd_to_nand(mtd); 2200 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; 2201 2202 if (section) 2203 return -ERANGE; 2204 2205 /* 2206 * Bootrom looks in bytes 0 & 5 for bad blocks for the 2207 * 4KB page / 4bit BCH combination. 2208 */ 2209 if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K) 2210 oobregion->offset = 6; 2211 else 2212 oobregion->offset = 2; 2213 2214 oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) + 2215 lt->last_spare_bytes - oobregion->offset; 2216 2217 return 0; 2218 } 2219 2220 static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = { 2221 .ecc = marvell_nand_ooblayout_ecc, 2222 .free = marvell_nand_ooblayout_free, 2223 }; 2224 2225 static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd, 2226 struct nand_ecc_ctrl *ecc) 2227 { 2228 struct nand_chip *chip = mtd_to_nand(mtd); 2229 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 2230 const struct marvell_hw_ecc_layout *l; 2231 int i; 2232 2233 if (!nfc->caps->is_nfcv2 && 2234 (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) { 2235 dev_err(nfc->dev, 2236 "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n", 2237 mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize); 2238 return -ENOTSUPP; 2239 } 2240 2241 to_marvell_nand(chip)->layout = NULL; 2242 for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) { 2243 l = &marvell_nfc_layouts[i]; 2244 if (mtd->writesize == l->writesize && 2245 ecc->size == l->chunk && ecc->strength == l->strength) { 2246 to_marvell_nand(chip)->layout = l; 2247 break; 2248 } 2249 } 2250 2251 if (!to_marvell_nand(chip)->layout || 2252 (!nfc->caps->is_nfcv2 && ecc->strength > 1)) { 2253 dev_err(nfc->dev, 2254 "ECC strength %d at page size %d is not supported\n", 2255 ecc->strength, mtd->writesize); 2256 return -ENOTSUPP; 2257 } 2258 2259 /* Special care for the layout 2k/8-bit/512B */ 2260 if (l->writesize == 2048 && l->strength == 8) { 2261 if (mtd->oobsize < 128) { 2262 dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n"); 2263 return -ENOTSUPP; 2264 } else { 2265 chip->bbt_options |= NAND_BBT_NO_OOB_BBM; 2266 } 2267 } 2268 2269 mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops); 2270 ecc->steps = l->nchunks; 2271 ecc->size = l->data_bytes; 2272 2273 if (ecc->strength == 1) { 2274 chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 2275 ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw; 2276 ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page; 2277 ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw; 2278 ecc->read_oob = ecc->read_oob_raw; 2279 ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw; 2280 ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page; 2281 ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw; 2282 ecc->write_oob = ecc->write_oob_raw; 2283 } else { 2284 chip->ecc.algo = NAND_ECC_ALGO_BCH; 2285 ecc->strength = 16; 2286 ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw; 2287 ecc->read_page = marvell_nfc_hw_ecc_bch_read_page; 2288 ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw; 2289 ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob; 2290 ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw; 2291 ecc->write_page = marvell_nfc_hw_ecc_bch_write_page; 2292 ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw; 2293 ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob; 2294 } 2295 2296 return 0; 2297 } 2298 2299 static int marvell_nand_ecc_init(struct mtd_info *mtd, 2300 struct nand_ecc_ctrl *ecc) 2301 { 2302 struct nand_chip *chip = mtd_to_nand(mtd); 2303 const struct nand_ecc_props *requirements = 2304 nanddev_get_ecc_requirements(&chip->base); 2305 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 2306 int ret; 2307 2308 if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE && 2309 (!ecc->size || !ecc->strength)) { 2310 if (requirements->step_size && requirements->strength) { 2311 ecc->size = requirements->step_size; 2312 ecc->strength = requirements->strength; 2313 } else { 2314 dev_info(nfc->dev, 2315 "No minimum ECC strength, using 1b/512B\n"); 2316 ecc->size = 512; 2317 ecc->strength = 1; 2318 } 2319 } 2320 2321 switch (ecc->engine_type) { 2322 case NAND_ECC_ENGINE_TYPE_ON_HOST: 2323 ret = marvell_nand_hw_ecc_controller_init(mtd, ecc); 2324 if (ret) 2325 return ret; 2326 break; 2327 case NAND_ECC_ENGINE_TYPE_NONE: 2328 case NAND_ECC_ENGINE_TYPE_SOFT: 2329 case NAND_ECC_ENGINE_TYPE_ON_DIE: 2330 if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 && 2331 mtd->writesize != SZ_2K) { 2332 dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n", 2333 mtd->writesize); 2334 return -EINVAL; 2335 } 2336 break; 2337 default: 2338 return -EINVAL; 2339 } 2340 2341 return 0; 2342 } 2343 2344 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' }; 2345 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' }; 2346 2347 static struct nand_bbt_descr bbt_main_descr = { 2348 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 2349 NAND_BBT_2BIT | NAND_BBT_VERSION, 2350 .offs = 8, 2351 .len = 6, 2352 .veroffs = 14, 2353 .maxblocks = 8, /* Last 8 blocks in each chip */ 2354 .pattern = bbt_pattern 2355 }; 2356 2357 static struct nand_bbt_descr bbt_mirror_descr = { 2358 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 2359 NAND_BBT_2BIT | NAND_BBT_VERSION, 2360 .offs = 8, 2361 .len = 6, 2362 .veroffs = 14, 2363 .maxblocks = 8, /* Last 8 blocks in each chip */ 2364 .pattern = bbt_mirror_pattern 2365 }; 2366 2367 static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, 2368 const struct nand_interface_config *conf) 2369 { 2370 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 2371 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 2372 unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2; 2373 const struct nand_sdr_timings *sdr; 2374 struct marvell_nfc_timings nfc_tmg; 2375 int read_delay; 2376 2377 sdr = nand_get_sdr_timings(conf); 2378 if (IS_ERR(sdr)) 2379 return PTR_ERR(sdr); 2380 2381 if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode) 2382 return -EOPNOTSUPP; 2383 2384 /* 2385 * SDR timings are given in pico-seconds while NFC timings must be 2386 * expressed in NAND controller clock cycles, which is half of the 2387 * frequency of the accessible ECC clock retrieved by clk_get_rate(). 2388 * This is not written anywhere in the datasheet but was observed 2389 * with an oscilloscope. 2390 * 2391 * NFC datasheet gives equations from which thoses calculations 2392 * are derived, they tend to be slightly more restrictives than the 2393 * given core timings and may improve the overall speed. 2394 */ 2395 nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1; 2396 nfc_tmg.tRH = nfc_tmg.tRP; 2397 nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1; 2398 nfc_tmg.tWH = nfc_tmg.tWP; 2399 nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns); 2400 nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1; 2401 nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns); 2402 /* 2403 * Read delay is the time of propagation from SoC pins to NFC internal 2404 * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In 2405 * EDO mode, an additional delay of tRH must be taken into account so 2406 * the data is sampled on the falling edge instead of the rising edge. 2407 */ 2408 read_delay = sdr->tRC_min >= 30000 ? 2409 MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH; 2410 2411 nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns); 2412 /* 2413 * tWHR and tRHW are supposed to be read to write delays (and vice 2414 * versa) but in some cases, ie. when doing a change column, they must 2415 * be greater than that to be sure tCCS delay is respected. 2416 */ 2417 nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min), 2418 period_ns) - 2; 2419 nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min), 2420 period_ns); 2421 2422 /* 2423 * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays. 2424 * NFCv1: No WAIT_MODE, tR must be maximal. 2425 */ 2426 if (nfc->caps->is_nfcv2) { 2427 nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns); 2428 } else { 2429 nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max, 2430 period_ns); 2431 if (nfc_tmg.tR + 3 > nfc_tmg.tCH) 2432 nfc_tmg.tR = nfc_tmg.tCH - 3; 2433 else 2434 nfc_tmg.tR = 0; 2435 } 2436 2437 if (chipnr < 0) 2438 return 0; 2439 2440 marvell_nand->ndtr0 = 2441 NDTR0_TRP(nfc_tmg.tRP) | 2442 NDTR0_TRH(nfc_tmg.tRH) | 2443 NDTR0_ETRP(nfc_tmg.tRP) | 2444 NDTR0_TWP(nfc_tmg.tWP) | 2445 NDTR0_TWH(nfc_tmg.tWH) | 2446 NDTR0_TCS(nfc_tmg.tCS) | 2447 NDTR0_TCH(nfc_tmg.tCH); 2448 2449 marvell_nand->ndtr1 = 2450 NDTR1_TAR(nfc_tmg.tAR) | 2451 NDTR1_TWHR(nfc_tmg.tWHR) | 2452 NDTR1_TR(nfc_tmg.tR); 2453 2454 if (nfc->caps->is_nfcv2) { 2455 marvell_nand->ndtr0 |= 2456 NDTR0_RD_CNT_DEL(read_delay) | 2457 NDTR0_SELCNTR | 2458 NDTR0_TADL(nfc_tmg.tADL); 2459 2460 marvell_nand->ndtr1 |= 2461 NDTR1_TRHW(nfc_tmg.tRHW) | 2462 NDTR1_WAIT_MODE; 2463 } 2464 2465 /* 2466 * Reset nfc->selected_chip so the next command will cause the timing 2467 * registers to be updated in marvell_nfc_select_target(). 2468 */ 2469 nfc->selected_chip = NULL; 2470 2471 return 0; 2472 } 2473 2474 static int marvell_nand_attach_chip(struct nand_chip *chip) 2475 { 2476 struct mtd_info *mtd = nand_to_mtd(chip); 2477 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); 2478 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 2479 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev); 2480 int ret; 2481 2482 if (pdata && pdata->flash_bbt) 2483 chip->bbt_options |= NAND_BBT_USE_FLASH; 2484 2485 if (chip->bbt_options & NAND_BBT_USE_FLASH) { 2486 /* 2487 * We'll use a bad block table stored in-flash and don't 2488 * allow writing the bad block marker to the flash. 2489 */ 2490 chip->bbt_options |= NAND_BBT_NO_OOB_BBM; 2491 chip->bbt_td = &bbt_main_descr; 2492 chip->bbt_md = &bbt_mirror_descr; 2493 } 2494 2495 /* Save the chip-specific fields of NDCR */ 2496 marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize); 2497 if (chip->options & NAND_BUSWIDTH_16) 2498 marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C; 2499 2500 /* 2501 * On small page NANDs, only one cycle is needed to pass the 2502 * column address. 2503 */ 2504 if (mtd->writesize <= 512) { 2505 marvell_nand->addr_cyc = 1; 2506 } else { 2507 marvell_nand->addr_cyc = 2; 2508 marvell_nand->ndcr |= NDCR_RA_START; 2509 } 2510 2511 /* 2512 * Now add the number of cycles needed to pass the row 2513 * address. 2514 * 2515 * Addressing a chip using CS 2 or 3 should also need the third row 2516 * cycle but due to inconsistance in the documentation and lack of 2517 * hardware to test this situation, this case is not supported. 2518 */ 2519 if (chip->options & NAND_ROW_ADDR_3) 2520 marvell_nand->addr_cyc += 3; 2521 else 2522 marvell_nand->addr_cyc += 2; 2523 2524 if (pdata) { 2525 chip->ecc.size = pdata->ecc_step_size; 2526 chip->ecc.strength = pdata->ecc_strength; 2527 } 2528 2529 ret = marvell_nand_ecc_init(mtd, &chip->ecc); 2530 if (ret) { 2531 dev_err(nfc->dev, "ECC init failed: %d\n", ret); 2532 return ret; 2533 } 2534 2535 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) { 2536 /* 2537 * Subpage write not available with hardware ECC, prohibit also 2538 * subpage read as in userspace subpage access would still be 2539 * allowed and subpage write, if used, would lead to numerous 2540 * uncorrectable ECC errors. 2541 */ 2542 chip->options |= NAND_NO_SUBPAGE_WRITE; 2543 } 2544 2545 if (pdata || nfc->caps->legacy_of_bindings) { 2546 /* 2547 * We keep the MTD name unchanged to avoid breaking platforms 2548 * where the MTD cmdline parser is used and the bootloader 2549 * has not been updated to use the new naming scheme. 2550 */ 2551 mtd->name = "pxa3xx_nand-0"; 2552 } else if (!mtd->name) { 2553 /* 2554 * If the new bindings are used and the bootloader has not been 2555 * updated to pass a new mtdparts parameter on the cmdline, you 2556 * should define the following property in your NAND node, ie: 2557 * 2558 * label = "main-storage"; 2559 * 2560 * This way, mtd->name will be set by the core when 2561 * nand_set_flash_node() is called. 2562 */ 2563 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, 2564 "%s:nand.%d", dev_name(nfc->dev), 2565 marvell_nand->sels[0].cs); 2566 if (!mtd->name) { 2567 dev_err(nfc->dev, "Failed to allocate mtd->name\n"); 2568 return -ENOMEM; 2569 } 2570 } 2571 2572 return 0; 2573 } 2574 2575 static const struct nand_controller_ops marvell_nand_controller_ops = { 2576 .attach_chip = marvell_nand_attach_chip, 2577 .exec_op = marvell_nfc_exec_op, 2578 .setup_interface = marvell_nfc_setup_interface, 2579 }; 2580 2581 static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, 2582 struct device_node *np) 2583 { 2584 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev); 2585 struct marvell_nand_chip *marvell_nand; 2586 struct mtd_info *mtd; 2587 struct nand_chip *chip; 2588 int nsels, ret, i; 2589 u32 cs, rb; 2590 2591 /* 2592 * The legacy "num-cs" property indicates the number of CS on the only 2593 * chip connected to the controller (legacy bindings does not support 2594 * more than one chip). The CS and RB pins are always the #0. 2595 * 2596 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2597 * properties must be filled. For each chip, expressed as a subnode, 2598 * "reg" points to the CS lines and "nand-rb" to the RB line. 2599 */ 2600 if (pdata || nfc->caps->legacy_of_bindings) { 2601 nsels = 1; 2602 } else { 2603 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); 2604 if (nsels <= 0) { 2605 dev_err(dev, "missing/invalid reg property\n"); 2606 return -EINVAL; 2607 } 2608 } 2609 2610 /* Alloc the nand chip structure */ 2611 marvell_nand = devm_kzalloc(dev, 2612 struct_size(marvell_nand, sels, nsels), 2613 GFP_KERNEL); 2614 if (!marvell_nand) { 2615 dev_err(dev, "could not allocate chip structure\n"); 2616 return -ENOMEM; 2617 } 2618 2619 marvell_nand->nsels = nsels; 2620 marvell_nand->selected_die = -1; 2621 2622 for (i = 0; i < nsels; i++) { 2623 if (pdata || nfc->caps->legacy_of_bindings) { 2624 /* 2625 * Legacy bindings use the CS lines in natural 2626 * order (0, 1, ...) 2627 */ 2628 cs = i; 2629 } else { 2630 /* Retrieve CS id */ 2631 ret = of_property_read_u32_index(np, "reg", i, &cs); 2632 if (ret) { 2633 dev_err(dev, "could not retrieve reg property: %d\n", 2634 ret); 2635 return ret; 2636 } 2637 } 2638 2639 if (cs >= nfc->caps->max_cs_nb) { 2640 dev_err(dev, "invalid reg value: %u (max CS = %d)\n", 2641 cs, nfc->caps->max_cs_nb); 2642 return -EINVAL; 2643 } 2644 2645 if (test_and_set_bit(cs, &nfc->assigned_cs)) { 2646 dev_err(dev, "CS %d already assigned\n", cs); 2647 return -EINVAL; 2648 } 2649 2650 /* 2651 * The cs variable represents the chip select id, which must be 2652 * converted in bit fields for NDCB0 and NDCB2 to select the 2653 * right chip. Unfortunately, due to a lack of information on 2654 * the subject and incoherent documentation, the user should not 2655 * use CS1 and CS3 at all as asserting them is not supported in 2656 * a reliable way (due to multiplexing inside ADDR5 field). 2657 */ 2658 marvell_nand->sels[i].cs = cs; 2659 switch (cs) { 2660 case 0: 2661 case 2: 2662 marvell_nand->sels[i].ndcb0_csel = 0; 2663 break; 2664 case 1: 2665 case 3: 2666 marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL; 2667 break; 2668 default: 2669 return -EINVAL; 2670 } 2671 2672 /* Retrieve RB id */ 2673 if (pdata || nfc->caps->legacy_of_bindings) { 2674 /* Legacy bindings always use RB #0 */ 2675 rb = 0; 2676 } else { 2677 ret = of_property_read_u32_index(np, "nand-rb", i, 2678 &rb); 2679 if (ret) { 2680 dev_err(dev, 2681 "could not retrieve RB property: %d\n", 2682 ret); 2683 return ret; 2684 } 2685 } 2686 2687 if (rb >= nfc->caps->max_rb_nb) { 2688 dev_err(dev, "invalid reg value: %u (max RB = %d)\n", 2689 rb, nfc->caps->max_rb_nb); 2690 return -EINVAL; 2691 } 2692 2693 marvell_nand->sels[i].rb = rb; 2694 } 2695 2696 chip = &marvell_nand->chip; 2697 chip->controller = &nfc->controller; 2698 nand_set_flash_node(chip, np); 2699 2700 if (of_property_read_bool(np, "marvell,nand-keep-config")) 2701 chip->options |= NAND_KEEP_TIMINGS; 2702 2703 mtd = nand_to_mtd(chip); 2704 mtd->dev.parent = dev; 2705 2706 /* 2707 * Save a reference value for timing registers before 2708 * ->setup_interface() is called. 2709 */ 2710 marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0); 2711 marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1); 2712 2713 chip->options |= NAND_BUSWIDTH_AUTO; 2714 2715 ret = nand_scan(chip, marvell_nand->nsels); 2716 if (ret) { 2717 dev_err(dev, "could not scan the nand chip\n"); 2718 return ret; 2719 } 2720 2721 if (pdata) 2722 /* Legacy bindings support only one chip */ 2723 ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 2724 else 2725 ret = mtd_device_register(mtd, NULL, 0); 2726 if (ret) { 2727 dev_err(dev, "failed to register mtd device: %d\n", ret); 2728 nand_cleanup(chip); 2729 return ret; 2730 } 2731 2732 list_add_tail(&marvell_nand->node, &nfc->chips); 2733 2734 return 0; 2735 } 2736 2737 static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) 2738 { 2739 struct marvell_nand_chip *entry, *temp; 2740 struct nand_chip *chip; 2741 int ret; 2742 2743 list_for_each_entry_safe(entry, temp, &nfc->chips, node) { 2744 chip = &entry->chip; 2745 ret = mtd_device_unregister(nand_to_mtd(chip)); 2746 WARN_ON(ret); 2747 nand_cleanup(chip); 2748 list_del(&entry->node); 2749 } 2750 } 2751 2752 static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) 2753 { 2754 struct device_node *np = dev->of_node; 2755 struct device_node *nand_np; 2756 int max_cs = nfc->caps->max_cs_nb; 2757 int nchips; 2758 int ret; 2759 2760 if (!np) 2761 nchips = 1; 2762 else 2763 nchips = of_get_child_count(np); 2764 2765 if (nchips > max_cs) { 2766 dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips, 2767 max_cs); 2768 return -EINVAL; 2769 } 2770 2771 /* 2772 * Legacy bindings do not use child nodes to exhibit NAND chip 2773 * properties and layout. Instead, NAND properties are mixed with the 2774 * controller ones, and partitions are defined as direct subnodes of the 2775 * NAND controller node. 2776 */ 2777 if (nfc->caps->legacy_of_bindings) { 2778 ret = marvell_nand_chip_init(dev, nfc, np); 2779 return ret; 2780 } 2781 2782 for_each_child_of_node(np, nand_np) { 2783 ret = marvell_nand_chip_init(dev, nfc, nand_np); 2784 if (ret) { 2785 of_node_put(nand_np); 2786 goto cleanup_chips; 2787 } 2788 } 2789 2790 return 0; 2791 2792 cleanup_chips: 2793 marvell_nand_chips_cleanup(nfc); 2794 2795 return ret; 2796 } 2797 2798 static int marvell_nfc_init_dma(struct marvell_nfc *nfc) 2799 { 2800 struct platform_device *pdev = container_of(nfc->dev, 2801 struct platform_device, 2802 dev); 2803 struct dma_slave_config config = {}; 2804 struct resource *r; 2805 int ret; 2806 2807 if (!IS_ENABLED(CONFIG_PXA_DMA)) { 2808 dev_warn(nfc->dev, 2809 "DMA not enabled in configuration\n"); 2810 return -ENOTSUPP; 2811 } 2812 2813 ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32)); 2814 if (ret) 2815 return ret; 2816 2817 nfc->dma_chan = dma_request_chan(nfc->dev, "data"); 2818 if (IS_ERR(nfc->dma_chan)) { 2819 ret = PTR_ERR(nfc->dma_chan); 2820 nfc->dma_chan = NULL; 2821 return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n"); 2822 } 2823 2824 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2825 if (!r) { 2826 ret = -ENXIO; 2827 goto release_channel; 2828 } 2829 2830 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 2831 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 2832 config.src_addr = r->start + NDDB; 2833 config.dst_addr = r->start + NDDB; 2834 config.src_maxburst = 32; 2835 config.dst_maxburst = 32; 2836 ret = dmaengine_slave_config(nfc->dma_chan, &config); 2837 if (ret < 0) { 2838 dev_err(nfc->dev, "Failed to configure DMA channel\n"); 2839 goto release_channel; 2840 } 2841 2842 /* 2843 * DMA must act on length multiple of 32 and this length may be 2844 * bigger than the destination buffer. Use this buffer instead 2845 * for DMA transfers and then copy the desired amount of data to 2846 * the provided buffer. 2847 */ 2848 nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA); 2849 if (!nfc->dma_buf) { 2850 ret = -ENOMEM; 2851 goto release_channel; 2852 } 2853 2854 nfc->use_dma = true; 2855 2856 return 0; 2857 2858 release_channel: 2859 dma_release_channel(nfc->dma_chan); 2860 nfc->dma_chan = NULL; 2861 2862 return ret; 2863 } 2864 2865 static void marvell_nfc_reset(struct marvell_nfc *nfc) 2866 { 2867 /* 2868 * ECC operations and interruptions are only enabled when specifically 2869 * needed. ECC shall not be activated in the early stages (fails probe). 2870 * Arbiter flag, even if marked as "reserved", must be set (empirical). 2871 * SPARE_EN bit must always be set or ECC bytes will not be at the same 2872 * offset in the read page and this will fail the protection. 2873 */ 2874 writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN | 2875 NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR); 2876 writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR); 2877 writel_relaxed(0, nfc->regs + NDECCCTRL); 2878 } 2879 2880 static int marvell_nfc_init(struct marvell_nfc *nfc) 2881 { 2882 struct device_node *np = nfc->dev->of_node; 2883 2884 /* 2885 * Some SoCs like A7k/A8k need to enable manually the NAND 2886 * controller, gated clocks and reset bits to avoid being bootloader 2887 * dependent. This is done through the use of the System Functions 2888 * registers. 2889 */ 2890 if (nfc->caps->need_system_controller) { 2891 struct regmap *sysctrl_base = 2892 syscon_regmap_lookup_by_phandle(np, 2893 "marvell,system-controller"); 2894 2895 if (IS_ERR(sysctrl_base)) 2896 return PTR_ERR(sysctrl_base); 2897 2898 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, 2899 GENCONF_SOC_DEVICE_MUX_NFC_EN | 2900 GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST | 2901 GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST | 2902 GENCONF_SOC_DEVICE_MUX_NFC_INT_EN | 2903 GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN); 2904 2905 regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, 2906 GENCONF_CLK_GATING_CTRL_ND_GATE, 2907 GENCONF_CLK_GATING_CTRL_ND_GATE); 2908 } 2909 2910 /* Configure the DMA if appropriate */ 2911 if (!nfc->caps->is_nfcv2) 2912 marvell_nfc_init_dma(nfc); 2913 2914 marvell_nfc_reset(nfc); 2915 2916 return 0; 2917 } 2918 2919 static int marvell_nfc_probe(struct platform_device *pdev) 2920 { 2921 struct device *dev = &pdev->dev; 2922 struct marvell_nfc *nfc; 2923 int ret; 2924 int irq; 2925 2926 nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc), 2927 GFP_KERNEL); 2928 if (!nfc) 2929 return -ENOMEM; 2930 2931 nfc->dev = dev; 2932 nand_controller_init(&nfc->controller); 2933 nfc->controller.ops = &marvell_nand_controller_ops; 2934 INIT_LIST_HEAD(&nfc->chips); 2935 2936 nfc->regs = devm_platform_ioremap_resource(pdev, 0); 2937 if (IS_ERR(nfc->regs)) 2938 return PTR_ERR(nfc->regs); 2939 2940 irq = platform_get_irq(pdev, 0); 2941 if (irq < 0) 2942 return irq; 2943 2944 nfc->core_clk = devm_clk_get(&pdev->dev, "core"); 2945 2946 /* Managed the legacy case (when the first clock was not named) */ 2947 if (nfc->core_clk == ERR_PTR(-ENOENT)) 2948 nfc->core_clk = devm_clk_get(&pdev->dev, NULL); 2949 2950 if (IS_ERR(nfc->core_clk)) 2951 return PTR_ERR(nfc->core_clk); 2952 2953 ret = clk_prepare_enable(nfc->core_clk); 2954 if (ret) 2955 return ret; 2956 2957 nfc->reg_clk = devm_clk_get(&pdev->dev, "reg"); 2958 if (IS_ERR(nfc->reg_clk)) { 2959 if (PTR_ERR(nfc->reg_clk) != -ENOENT) { 2960 ret = PTR_ERR(nfc->reg_clk); 2961 goto unprepare_core_clk; 2962 } 2963 2964 nfc->reg_clk = NULL; 2965 } 2966 2967 ret = clk_prepare_enable(nfc->reg_clk); 2968 if (ret) 2969 goto unprepare_core_clk; 2970 2971 marvell_nfc_disable_int(nfc, NDCR_ALL_INT); 2972 marvell_nfc_clear_int(nfc, NDCR_ALL_INT); 2973 ret = devm_request_irq(dev, irq, marvell_nfc_isr, 2974 0, "marvell-nfc", nfc); 2975 if (ret) 2976 goto unprepare_reg_clk; 2977 2978 /* Get NAND controller capabilities */ 2979 if (pdev->id_entry) 2980 nfc->caps = (void *)pdev->id_entry->driver_data; 2981 else 2982 nfc->caps = of_device_get_match_data(&pdev->dev); 2983 2984 if (!nfc->caps) { 2985 dev_err(dev, "Could not retrieve NFC caps\n"); 2986 ret = -EINVAL; 2987 goto unprepare_reg_clk; 2988 } 2989 2990 /* Init the controller and then probe the chips */ 2991 ret = marvell_nfc_init(nfc); 2992 if (ret) 2993 goto unprepare_reg_clk; 2994 2995 platform_set_drvdata(pdev, nfc); 2996 2997 ret = marvell_nand_chips_init(dev, nfc); 2998 if (ret) 2999 goto release_dma; 3000 3001 return 0; 3002 3003 release_dma: 3004 if (nfc->use_dma) 3005 dma_release_channel(nfc->dma_chan); 3006 unprepare_reg_clk: 3007 clk_disable_unprepare(nfc->reg_clk); 3008 unprepare_core_clk: 3009 clk_disable_unprepare(nfc->core_clk); 3010 3011 return ret; 3012 } 3013 3014 static void marvell_nfc_remove(struct platform_device *pdev) 3015 { 3016 struct marvell_nfc *nfc = platform_get_drvdata(pdev); 3017 3018 marvell_nand_chips_cleanup(nfc); 3019 3020 if (nfc->use_dma) { 3021 dmaengine_terminate_all(nfc->dma_chan); 3022 dma_release_channel(nfc->dma_chan); 3023 } 3024 3025 clk_disable_unprepare(nfc->reg_clk); 3026 clk_disable_unprepare(nfc->core_clk); 3027 } 3028 3029 static int __maybe_unused marvell_nfc_suspend(struct device *dev) 3030 { 3031 struct marvell_nfc *nfc = dev_get_drvdata(dev); 3032 struct marvell_nand_chip *chip; 3033 3034 list_for_each_entry(chip, &nfc->chips, node) 3035 marvell_nfc_wait_ndrun(&chip->chip); 3036 3037 clk_disable_unprepare(nfc->reg_clk); 3038 clk_disable_unprepare(nfc->core_clk); 3039 3040 return 0; 3041 } 3042 3043 static int __maybe_unused marvell_nfc_resume(struct device *dev) 3044 { 3045 struct marvell_nfc *nfc = dev_get_drvdata(dev); 3046 int ret; 3047 3048 ret = clk_prepare_enable(nfc->core_clk); 3049 if (ret < 0) 3050 return ret; 3051 3052 ret = clk_prepare_enable(nfc->reg_clk); 3053 if (ret < 0) { 3054 clk_disable_unprepare(nfc->core_clk); 3055 return ret; 3056 } 3057 3058 /* 3059 * Reset nfc->selected_chip so the next command will cause the timing 3060 * registers to be restored in marvell_nfc_select_target(). 3061 */ 3062 nfc->selected_chip = NULL; 3063 3064 /* Reset registers that have lost their contents */ 3065 marvell_nfc_reset(nfc); 3066 3067 return 0; 3068 } 3069 3070 static const struct dev_pm_ops marvell_nfc_pm_ops = { 3071 SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume) 3072 }; 3073 3074 static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = { 3075 .max_cs_nb = 4, 3076 .max_rb_nb = 2, 3077 .need_system_controller = true, 3078 .is_nfcv2 = true, 3079 }; 3080 3081 static const struct marvell_nfc_caps marvell_ac5_caps = { 3082 .max_cs_nb = 2, 3083 .max_rb_nb = 1, 3084 .is_nfcv2 = true, 3085 .max_mode_number = 3, 3086 }; 3087 3088 static const struct marvell_nfc_caps marvell_armada370_nfc_caps = { 3089 .max_cs_nb = 4, 3090 .max_rb_nb = 2, 3091 .is_nfcv2 = true, 3092 }; 3093 3094 static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = { 3095 .max_cs_nb = 2, 3096 .max_rb_nb = 1, 3097 .use_dma = true, 3098 }; 3099 3100 static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = { 3101 .max_cs_nb = 4, 3102 .max_rb_nb = 2, 3103 .need_system_controller = true, 3104 .legacy_of_bindings = true, 3105 .is_nfcv2 = true, 3106 }; 3107 3108 static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = { 3109 .max_cs_nb = 4, 3110 .max_rb_nb = 2, 3111 .legacy_of_bindings = true, 3112 .is_nfcv2 = true, 3113 }; 3114 3115 static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = { 3116 .max_cs_nb = 2, 3117 .max_rb_nb = 1, 3118 .legacy_of_bindings = true, 3119 .use_dma = true, 3120 }; 3121 3122 static const struct platform_device_id marvell_nfc_platform_ids[] = { 3123 { 3124 .name = "pxa3xx-nand", 3125 .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps, 3126 }, 3127 { /* sentinel */ }, 3128 }; 3129 MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids); 3130 3131 static const struct of_device_id marvell_nfc_of_ids[] = { 3132 { 3133 .compatible = "marvell,armada-8k-nand-controller", 3134 .data = &marvell_armada_8k_nfc_caps, 3135 }, 3136 { 3137 .compatible = "marvell,ac5-nand-controller", 3138 .data = &marvell_ac5_caps, 3139 }, 3140 { 3141 .compatible = "marvell,armada370-nand-controller", 3142 .data = &marvell_armada370_nfc_caps, 3143 }, 3144 { 3145 .compatible = "marvell,pxa3xx-nand-controller", 3146 .data = &marvell_pxa3xx_nfc_caps, 3147 }, 3148 /* Support for old/deprecated bindings: */ 3149 { 3150 .compatible = "marvell,armada-8k-nand", 3151 .data = &marvell_armada_8k_nfc_legacy_caps, 3152 }, 3153 { 3154 .compatible = "marvell,armada370-nand", 3155 .data = &marvell_armada370_nfc_legacy_caps, 3156 }, 3157 { 3158 .compatible = "marvell,pxa3xx-nand", 3159 .data = &marvell_pxa3xx_nfc_legacy_caps, 3160 }, 3161 { /* sentinel */ }, 3162 }; 3163 MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids); 3164 3165 static struct platform_driver marvell_nfc_driver = { 3166 .driver = { 3167 .name = "marvell-nfc", 3168 .of_match_table = marvell_nfc_of_ids, 3169 .pm = &marvell_nfc_pm_ops, 3170 }, 3171 .id_table = marvell_nfc_platform_ids, 3172 .probe = marvell_nfc_probe, 3173 .remove_new = marvell_nfc_remove, 3174 }; 3175 module_platform_driver(marvell_nfc_driver); 3176 3177 MODULE_LICENSE("GPL"); 3178 MODULE_DESCRIPTION("Marvell NAND controller driver"); 3179