1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2010-2015 Broadcom Corporation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/version.h> 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/platform_device.h> 13 #include <linux/err.h> 14 #include <linux/completion.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/ioport.h> 19 #include <linux/bug.h> 20 #include <linux/kernel.h> 21 #include <linux/bitops.h> 22 #include <linux/mm.h> 23 #include <linux/mtd/mtd.h> 24 #include <linux/mtd/rawnand.h> 25 #include <linux/mtd/partitions.h> 26 #include <linux/of.h> 27 #include <linux/of_platform.h> 28 #include <linux/slab.h> 29 #include <linux/list.h> 30 #include <linux/log2.h> 31 32 #include "brcmnand.h" 33 34 /* 35 * This flag controls if WP stays on between erase/write commands to mitigate 36 * flash corruption due to power glitches. Values: 37 * 0: NAND_WP is not used or not available 38 * 1: NAND_WP is set by default, cleared for erase/write operations 39 * 2: NAND_WP is always cleared 40 */ 41 static int wp_on = 1; 42 module_param(wp_on, int, 0444); 43 44 /*********************************************************************** 45 * Definitions 46 ***********************************************************************/ 47 48 #define DRV_NAME "brcmnand" 49 50 #define CMD_NULL 0x00 51 #define CMD_PAGE_READ 0x01 52 #define CMD_SPARE_AREA_READ 0x02 53 #define CMD_STATUS_READ 0x03 54 #define CMD_PROGRAM_PAGE 0x04 55 #define CMD_PROGRAM_SPARE_AREA 0x05 56 #define CMD_COPY_BACK 0x06 57 #define CMD_DEVICE_ID_READ 0x07 58 #define CMD_BLOCK_ERASE 0x08 59 #define CMD_FLASH_RESET 0x09 60 #define CMD_BLOCKS_LOCK 0x0a 61 #define CMD_BLOCKS_LOCK_DOWN 0x0b 62 #define CMD_BLOCKS_UNLOCK 0x0c 63 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d 64 #define CMD_PARAMETER_READ 0x0e 65 #define CMD_PARAMETER_CHANGE_COL 0x0f 66 #define CMD_LOW_LEVEL_OP 0x10 67 68 struct brcm_nand_dma_desc { 69 u32 next_desc; 70 u32 next_desc_ext; 71 u32 cmd_irq; 72 u32 dram_addr; 73 u32 dram_addr_ext; 74 u32 tfr_len; 75 u32 total_len; 76 u32 flash_addr; 77 u32 flash_addr_ext; 78 u32 cs; 79 u32 pad2[5]; 80 u32 status_valid; 81 } __packed; 82 83 /* Bitfields for brcm_nand_dma_desc::status_valid */ 84 #define FLASH_DMA_ECC_ERROR (1 << 8) 85 #define FLASH_DMA_CORR_ERROR (1 << 9) 86 87 /* 512B flash cache in the NAND controller HW */ 88 #define FC_SHIFT 9U 89 #define FC_BYTES 512U 90 #define FC_WORDS (FC_BYTES >> 2) 91 92 #define BRCMNAND_MIN_PAGESIZE 512 93 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) 94 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) 95 96 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) 97 #define NAND_POLL_STATUS_TIMEOUT_MS 100 98 99 /* Controller feature flags */ 100 enum { 101 BRCMNAND_HAS_1K_SECTORS = BIT(0), 102 BRCMNAND_HAS_PREFETCH = BIT(1), 103 BRCMNAND_HAS_CACHE_MODE = BIT(2), 104 BRCMNAND_HAS_WP = BIT(3), 105 }; 106 107 struct brcmnand_controller { 108 struct device *dev; 109 struct nand_controller controller; 110 void __iomem *nand_base; 111 void __iomem *nand_fc; /* flash cache */ 112 void __iomem *flash_dma_base; 113 unsigned int irq; 114 unsigned int dma_irq; 115 int nand_version; 116 117 /* Some SoCs provide custom interrupt status register(s) */ 118 struct brcmnand_soc *soc; 119 120 /* Some SoCs have a gateable clock for the controller */ 121 struct clk *clk; 122 123 int cmd_pending; 124 bool dma_pending; 125 struct completion done; 126 struct completion dma_done; 127 128 /* List of NAND hosts (one for each chip-select) */ 129 struct list_head host_list; 130 131 struct brcm_nand_dma_desc *dma_desc; 132 dma_addr_t dma_pa; 133 134 /* in-memory cache of the FLASH_CACHE, used only for some commands */ 135 u8 flash_cache[FC_BYTES]; 136 137 /* Controller revision details */ 138 const u16 *reg_offsets; 139 unsigned int reg_spacing; /* between CS1, CS2, ... regs */ 140 const u8 *cs_offsets; /* within each chip-select */ 141 const u8 *cs0_offsets; /* within CS0, if different */ 142 unsigned int max_block_size; 143 const unsigned int *block_sizes; 144 unsigned int max_page_size; 145 const unsigned int *page_sizes; 146 unsigned int max_oob; 147 u32 features; 148 149 /* for low-power standby/resume only */ 150 u32 nand_cs_nand_select; 151 u32 nand_cs_nand_xor; 152 u32 corr_stat_threshold; 153 u32 flash_dma_mode; 154 }; 155 156 struct brcmnand_cfg { 157 u64 device_size; 158 unsigned int block_size; 159 unsigned int page_size; 160 unsigned int spare_area_size; 161 unsigned int device_width; 162 unsigned int col_adr_bytes; 163 unsigned int blk_adr_bytes; 164 unsigned int ful_adr_bytes; 165 unsigned int sector_size_1k; 166 unsigned int ecc_level; 167 /* use for low-power standby/resume only */ 168 u32 acc_control; 169 u32 config; 170 u32 config_ext; 171 u32 timing_1; 172 u32 timing_2; 173 }; 174 175 struct brcmnand_host { 176 struct list_head node; 177 178 struct nand_chip chip; 179 struct platform_device *pdev; 180 int cs; 181 182 unsigned int last_cmd; 183 unsigned int last_byte; 184 u64 last_addr; 185 struct brcmnand_cfg hwcfg; 186 struct brcmnand_controller *ctrl; 187 }; 188 189 enum brcmnand_reg { 190 BRCMNAND_CMD_START = 0, 191 BRCMNAND_CMD_EXT_ADDRESS, 192 BRCMNAND_CMD_ADDRESS, 193 BRCMNAND_INTFC_STATUS, 194 BRCMNAND_CS_SELECT, 195 BRCMNAND_CS_XOR, 196 BRCMNAND_LL_OP, 197 BRCMNAND_CS0_BASE, 198 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */ 199 BRCMNAND_CORR_THRESHOLD, 200 BRCMNAND_CORR_THRESHOLD_EXT, 201 BRCMNAND_UNCORR_COUNT, 202 BRCMNAND_CORR_COUNT, 203 BRCMNAND_CORR_EXT_ADDR, 204 BRCMNAND_CORR_ADDR, 205 BRCMNAND_UNCORR_EXT_ADDR, 206 BRCMNAND_UNCORR_ADDR, 207 BRCMNAND_SEMAPHORE, 208 BRCMNAND_ID, 209 BRCMNAND_ID_EXT, 210 BRCMNAND_LL_RDATA, 211 BRCMNAND_OOB_READ_BASE, 212 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */ 213 BRCMNAND_OOB_WRITE_BASE, 214 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */ 215 BRCMNAND_FC_BASE, 216 }; 217 218 /* BRCMNAND v4.0 */ 219 static const u16 brcmnand_regs_v40[] = { 220 [BRCMNAND_CMD_START] = 0x04, 221 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 222 [BRCMNAND_CMD_ADDRESS] = 0x0c, 223 [BRCMNAND_INTFC_STATUS] = 0x6c, 224 [BRCMNAND_CS_SELECT] = 0x14, 225 [BRCMNAND_CS_XOR] = 0x18, 226 [BRCMNAND_LL_OP] = 0x178, 227 [BRCMNAND_CS0_BASE] = 0x40, 228 [BRCMNAND_CS1_BASE] = 0xd0, 229 [BRCMNAND_CORR_THRESHOLD] = 0x84, 230 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 231 [BRCMNAND_UNCORR_COUNT] = 0, 232 [BRCMNAND_CORR_COUNT] = 0, 233 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 234 [BRCMNAND_CORR_ADDR] = 0x74, 235 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 236 [BRCMNAND_UNCORR_ADDR] = 0x7c, 237 [BRCMNAND_SEMAPHORE] = 0x58, 238 [BRCMNAND_ID] = 0x60, 239 [BRCMNAND_ID_EXT] = 0x64, 240 [BRCMNAND_LL_RDATA] = 0x17c, 241 [BRCMNAND_OOB_READ_BASE] = 0x20, 242 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 243 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 244 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 245 [BRCMNAND_FC_BASE] = 0x200, 246 }; 247 248 /* BRCMNAND v5.0 */ 249 static const u16 brcmnand_regs_v50[] = { 250 [BRCMNAND_CMD_START] = 0x04, 251 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 252 [BRCMNAND_CMD_ADDRESS] = 0x0c, 253 [BRCMNAND_INTFC_STATUS] = 0x6c, 254 [BRCMNAND_CS_SELECT] = 0x14, 255 [BRCMNAND_CS_XOR] = 0x18, 256 [BRCMNAND_LL_OP] = 0x178, 257 [BRCMNAND_CS0_BASE] = 0x40, 258 [BRCMNAND_CS1_BASE] = 0xd0, 259 [BRCMNAND_CORR_THRESHOLD] = 0x84, 260 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 261 [BRCMNAND_UNCORR_COUNT] = 0, 262 [BRCMNAND_CORR_COUNT] = 0, 263 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 264 [BRCMNAND_CORR_ADDR] = 0x74, 265 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 266 [BRCMNAND_UNCORR_ADDR] = 0x7c, 267 [BRCMNAND_SEMAPHORE] = 0x58, 268 [BRCMNAND_ID] = 0x60, 269 [BRCMNAND_ID_EXT] = 0x64, 270 [BRCMNAND_LL_RDATA] = 0x17c, 271 [BRCMNAND_OOB_READ_BASE] = 0x20, 272 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 273 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 274 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140, 275 [BRCMNAND_FC_BASE] = 0x200, 276 }; 277 278 /* BRCMNAND v6.0 - v7.1 */ 279 static const u16 brcmnand_regs_v60[] = { 280 [BRCMNAND_CMD_START] = 0x04, 281 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 282 [BRCMNAND_CMD_ADDRESS] = 0x0c, 283 [BRCMNAND_INTFC_STATUS] = 0x14, 284 [BRCMNAND_CS_SELECT] = 0x18, 285 [BRCMNAND_CS_XOR] = 0x1c, 286 [BRCMNAND_LL_OP] = 0x20, 287 [BRCMNAND_CS0_BASE] = 0x50, 288 [BRCMNAND_CS1_BASE] = 0, 289 [BRCMNAND_CORR_THRESHOLD] = 0xc0, 290 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4, 291 [BRCMNAND_UNCORR_COUNT] = 0xfc, 292 [BRCMNAND_CORR_COUNT] = 0x100, 293 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 294 [BRCMNAND_CORR_ADDR] = 0x110, 295 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 296 [BRCMNAND_UNCORR_ADDR] = 0x118, 297 [BRCMNAND_SEMAPHORE] = 0x150, 298 [BRCMNAND_ID] = 0x194, 299 [BRCMNAND_ID_EXT] = 0x198, 300 [BRCMNAND_LL_RDATA] = 0x19c, 301 [BRCMNAND_OOB_READ_BASE] = 0x200, 302 [BRCMNAND_OOB_READ_10_BASE] = 0, 303 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 304 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 305 [BRCMNAND_FC_BASE] = 0x400, 306 }; 307 308 /* BRCMNAND v7.1 */ 309 static const u16 brcmnand_regs_v71[] = { 310 [BRCMNAND_CMD_START] = 0x04, 311 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 312 [BRCMNAND_CMD_ADDRESS] = 0x0c, 313 [BRCMNAND_INTFC_STATUS] = 0x14, 314 [BRCMNAND_CS_SELECT] = 0x18, 315 [BRCMNAND_CS_XOR] = 0x1c, 316 [BRCMNAND_LL_OP] = 0x20, 317 [BRCMNAND_CS0_BASE] = 0x50, 318 [BRCMNAND_CS1_BASE] = 0, 319 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 320 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 321 [BRCMNAND_UNCORR_COUNT] = 0xfc, 322 [BRCMNAND_CORR_COUNT] = 0x100, 323 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 324 [BRCMNAND_CORR_ADDR] = 0x110, 325 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 326 [BRCMNAND_UNCORR_ADDR] = 0x118, 327 [BRCMNAND_SEMAPHORE] = 0x150, 328 [BRCMNAND_ID] = 0x194, 329 [BRCMNAND_ID_EXT] = 0x198, 330 [BRCMNAND_LL_RDATA] = 0x19c, 331 [BRCMNAND_OOB_READ_BASE] = 0x200, 332 [BRCMNAND_OOB_READ_10_BASE] = 0, 333 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 334 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 335 [BRCMNAND_FC_BASE] = 0x400, 336 }; 337 338 /* BRCMNAND v7.2 */ 339 static const u16 brcmnand_regs_v72[] = { 340 [BRCMNAND_CMD_START] = 0x04, 341 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 342 [BRCMNAND_CMD_ADDRESS] = 0x0c, 343 [BRCMNAND_INTFC_STATUS] = 0x14, 344 [BRCMNAND_CS_SELECT] = 0x18, 345 [BRCMNAND_CS_XOR] = 0x1c, 346 [BRCMNAND_LL_OP] = 0x20, 347 [BRCMNAND_CS0_BASE] = 0x50, 348 [BRCMNAND_CS1_BASE] = 0, 349 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 350 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 351 [BRCMNAND_UNCORR_COUNT] = 0xfc, 352 [BRCMNAND_CORR_COUNT] = 0x100, 353 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 354 [BRCMNAND_CORR_ADDR] = 0x110, 355 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 356 [BRCMNAND_UNCORR_ADDR] = 0x118, 357 [BRCMNAND_SEMAPHORE] = 0x150, 358 [BRCMNAND_ID] = 0x194, 359 [BRCMNAND_ID_EXT] = 0x198, 360 [BRCMNAND_LL_RDATA] = 0x19c, 361 [BRCMNAND_OOB_READ_BASE] = 0x200, 362 [BRCMNAND_OOB_READ_10_BASE] = 0, 363 [BRCMNAND_OOB_WRITE_BASE] = 0x400, 364 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 365 [BRCMNAND_FC_BASE] = 0x600, 366 }; 367 368 enum brcmnand_cs_reg { 369 BRCMNAND_CS_CFG_EXT = 0, 370 BRCMNAND_CS_CFG, 371 BRCMNAND_CS_ACC_CONTROL, 372 BRCMNAND_CS_TIMING1, 373 BRCMNAND_CS_TIMING2, 374 }; 375 376 /* Per chip-select offsets for v7.1 */ 377 static const u8 brcmnand_cs_offsets_v71[] = { 378 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 379 [BRCMNAND_CS_CFG_EXT] = 0x04, 380 [BRCMNAND_CS_CFG] = 0x08, 381 [BRCMNAND_CS_TIMING1] = 0x0c, 382 [BRCMNAND_CS_TIMING2] = 0x10, 383 }; 384 385 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */ 386 static const u8 brcmnand_cs_offsets[] = { 387 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 388 [BRCMNAND_CS_CFG_EXT] = 0x04, 389 [BRCMNAND_CS_CFG] = 0x04, 390 [BRCMNAND_CS_TIMING1] = 0x08, 391 [BRCMNAND_CS_TIMING2] = 0x0c, 392 }; 393 394 /* Per chip-select offset for <= v5.0 on CS0 only */ 395 static const u8 brcmnand_cs_offsets_cs0[] = { 396 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 397 [BRCMNAND_CS_CFG_EXT] = 0x08, 398 [BRCMNAND_CS_CFG] = 0x08, 399 [BRCMNAND_CS_TIMING1] = 0x10, 400 [BRCMNAND_CS_TIMING2] = 0x14, 401 }; 402 403 /* 404 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had 405 * one config register, but once the bitfields overflowed, newer controllers 406 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around. 407 */ 408 enum { 409 CFG_BLK_ADR_BYTES_SHIFT = 8, 410 CFG_COL_ADR_BYTES_SHIFT = 12, 411 CFG_FUL_ADR_BYTES_SHIFT = 16, 412 CFG_BUS_WIDTH_SHIFT = 23, 413 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT), 414 CFG_DEVICE_SIZE_SHIFT = 24, 415 416 /* Only for pre-v7.1 (with no CFG_EXT register) */ 417 CFG_PAGE_SIZE_SHIFT = 20, 418 CFG_BLK_SIZE_SHIFT = 28, 419 420 /* Only for v7.1+ (with CFG_EXT register) */ 421 CFG_EXT_PAGE_SIZE_SHIFT = 0, 422 CFG_EXT_BLK_SIZE_SHIFT = 4, 423 }; 424 425 /* BRCMNAND_INTFC_STATUS */ 426 enum { 427 INTFC_FLASH_STATUS = GENMASK(7, 0), 428 429 INTFC_ERASED = BIT(27), 430 INTFC_OOB_VALID = BIT(28), 431 INTFC_CACHE_VALID = BIT(29), 432 INTFC_FLASH_READY = BIT(30), 433 INTFC_CTLR_READY = BIT(31), 434 }; 435 436 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) 437 { 438 return brcmnand_readl(ctrl->nand_base + offs); 439 } 440 441 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, 442 u32 val) 443 { 444 brcmnand_writel(val, ctrl->nand_base + offs); 445 } 446 447 static int brcmnand_revision_init(struct brcmnand_controller *ctrl) 448 { 449 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; 450 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; 451 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 }; 452 453 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; 454 455 /* Only support v4.0+? */ 456 if (ctrl->nand_version < 0x0400) { 457 dev_err(ctrl->dev, "version %#x not supported\n", 458 ctrl->nand_version); 459 return -ENODEV; 460 } 461 462 /* Register offsets */ 463 if (ctrl->nand_version >= 0x0702) 464 ctrl->reg_offsets = brcmnand_regs_v72; 465 else if (ctrl->nand_version >= 0x0701) 466 ctrl->reg_offsets = brcmnand_regs_v71; 467 else if (ctrl->nand_version >= 0x0600) 468 ctrl->reg_offsets = brcmnand_regs_v60; 469 else if (ctrl->nand_version >= 0x0500) 470 ctrl->reg_offsets = brcmnand_regs_v50; 471 else if (ctrl->nand_version >= 0x0400) 472 ctrl->reg_offsets = brcmnand_regs_v40; 473 474 /* Chip-select stride */ 475 if (ctrl->nand_version >= 0x0701) 476 ctrl->reg_spacing = 0x14; 477 else 478 ctrl->reg_spacing = 0x10; 479 480 /* Per chip-select registers */ 481 if (ctrl->nand_version >= 0x0701) { 482 ctrl->cs_offsets = brcmnand_cs_offsets_v71; 483 } else { 484 ctrl->cs_offsets = brcmnand_cs_offsets; 485 486 /* v5.0 and earlier has a different CS0 offset layout */ 487 if (ctrl->nand_version <= 0x0500) 488 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; 489 } 490 491 /* Page / block sizes */ 492 if (ctrl->nand_version >= 0x0701) { 493 /* >= v7.1 use nice power-of-2 values! */ 494 ctrl->max_page_size = 16 * 1024; 495 ctrl->max_block_size = 2 * 1024 * 1024; 496 } else { 497 ctrl->page_sizes = page_sizes; 498 if (ctrl->nand_version >= 0x0600) 499 ctrl->block_sizes = block_sizes_v6; 500 else 501 ctrl->block_sizes = block_sizes_v4; 502 503 if (ctrl->nand_version < 0x0400) { 504 ctrl->max_page_size = 4096; 505 ctrl->max_block_size = 512 * 1024; 506 } 507 } 508 509 /* Maximum spare area sector size (per 512B) */ 510 if (ctrl->nand_version >= 0x0702) 511 ctrl->max_oob = 128; 512 else if (ctrl->nand_version >= 0x0600) 513 ctrl->max_oob = 64; 514 else if (ctrl->nand_version >= 0x0500) 515 ctrl->max_oob = 32; 516 else 517 ctrl->max_oob = 16; 518 519 /* v6.0 and newer (except v6.1) have prefetch support */ 520 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601) 521 ctrl->features |= BRCMNAND_HAS_PREFETCH; 522 523 /* 524 * v6.x has cache mode, but it's implemented differently. Ignore it for 525 * now. 526 */ 527 if (ctrl->nand_version >= 0x0700) 528 ctrl->features |= BRCMNAND_HAS_CACHE_MODE; 529 530 if (ctrl->nand_version >= 0x0500) 531 ctrl->features |= BRCMNAND_HAS_1K_SECTORS; 532 533 if (ctrl->nand_version >= 0x0700) 534 ctrl->features |= BRCMNAND_HAS_WP; 535 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) 536 ctrl->features |= BRCMNAND_HAS_WP; 537 538 return 0; 539 } 540 541 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, 542 enum brcmnand_reg reg) 543 { 544 u16 offs = ctrl->reg_offsets[reg]; 545 546 if (offs) 547 return nand_readreg(ctrl, offs); 548 else 549 return 0; 550 } 551 552 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl, 553 enum brcmnand_reg reg, u32 val) 554 { 555 u16 offs = ctrl->reg_offsets[reg]; 556 557 if (offs) 558 nand_writereg(ctrl, offs, val); 559 } 560 561 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, 562 enum brcmnand_reg reg, u32 mask, unsigned 563 int shift, u32 val) 564 { 565 u32 tmp = brcmnand_read_reg(ctrl, reg); 566 567 tmp &= ~mask; 568 tmp |= val << shift; 569 brcmnand_write_reg(ctrl, reg, tmp); 570 } 571 572 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word) 573 { 574 return __raw_readl(ctrl->nand_fc + word * 4); 575 } 576 577 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, 578 int word, u32 val) 579 { 580 __raw_writel(val, ctrl->nand_fc + word * 4); 581 } 582 583 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, 584 enum brcmnand_cs_reg reg) 585 { 586 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE]; 587 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE]; 588 u8 cs_offs; 589 590 if (cs == 0 && ctrl->cs0_offsets) 591 cs_offs = ctrl->cs0_offsets[reg]; 592 else 593 cs_offs = ctrl->cs_offsets[reg]; 594 595 if (cs && offs_cs1) 596 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; 597 598 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; 599 } 600 601 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl) 602 { 603 if (ctrl->nand_version < 0x0600) 604 return 1; 605 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT); 606 } 607 608 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) 609 { 610 struct brcmnand_controller *ctrl = host->ctrl; 611 unsigned int shift = 0, bits; 612 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; 613 int cs = host->cs; 614 615 if (ctrl->nand_version >= 0x0702) 616 bits = 7; 617 else if (ctrl->nand_version >= 0x0600) 618 bits = 6; 619 else if (ctrl->nand_version >= 0x0500) 620 bits = 5; 621 else 622 bits = 4; 623 624 if (ctrl->nand_version >= 0x0702) { 625 if (cs >= 4) 626 reg = BRCMNAND_CORR_THRESHOLD_EXT; 627 shift = (cs % 4) * bits; 628 } else if (ctrl->nand_version >= 0x0600) { 629 if (cs >= 5) 630 reg = BRCMNAND_CORR_THRESHOLD_EXT; 631 shift = (cs % 5) * bits; 632 } 633 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val); 634 } 635 636 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) 637 { 638 if (ctrl->nand_version < 0x0602) 639 return 24; 640 return 0; 641 } 642 643 /*********************************************************************** 644 * NAND ACC CONTROL bitfield 645 * 646 * Some bits have remained constant throughout hardware revision, while 647 * others have shifted around. 648 ***********************************************************************/ 649 650 /* Constant for all versions (where supported) */ 651 enum { 652 /* See BRCMNAND_HAS_CACHE_MODE */ 653 ACC_CONTROL_CACHE_MODE = BIT(22), 654 655 /* See BRCMNAND_HAS_PREFETCH */ 656 ACC_CONTROL_PREFETCH = BIT(23), 657 658 ACC_CONTROL_PAGE_HIT = BIT(24), 659 ACC_CONTROL_WR_PREEMPT = BIT(25), 660 ACC_CONTROL_PARTIAL_PAGE = BIT(26), 661 ACC_CONTROL_RD_ERASED = BIT(27), 662 ACC_CONTROL_FAST_PGM_RDIN = BIT(28), 663 ACC_CONTROL_WR_ECC = BIT(30), 664 ACC_CONTROL_RD_ECC = BIT(31), 665 }; 666 667 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) 668 { 669 if (ctrl->nand_version >= 0x0702) 670 return GENMASK(7, 0); 671 else if (ctrl->nand_version >= 0x0600) 672 return GENMASK(6, 0); 673 else 674 return GENMASK(5, 0); 675 } 676 677 #define NAND_ACC_CONTROL_ECC_SHIFT 16 678 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 679 680 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) 681 { 682 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; 683 684 mask <<= NAND_ACC_CONTROL_ECC_SHIFT; 685 686 /* v7.2 includes additional ECC levels */ 687 if (ctrl->nand_version >= 0x0702) 688 mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; 689 690 return mask; 691 } 692 693 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) 694 { 695 struct brcmnand_controller *ctrl = host->ctrl; 696 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 697 u32 acc_control = nand_readreg(ctrl, offs); 698 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC; 699 700 if (en) { 701 acc_control |= ecc_flags; /* enable RD/WR ECC */ 702 acc_control |= host->hwcfg.ecc_level 703 << NAND_ACC_CONTROL_ECC_SHIFT; 704 } else { 705 acc_control &= ~ecc_flags; /* disable RD/WR ECC */ 706 acc_control &= ~brcmnand_ecc_level_mask(ctrl); 707 } 708 709 nand_writereg(ctrl, offs, acc_control); 710 } 711 712 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) 713 { 714 if (ctrl->nand_version >= 0x0702) 715 return 9; 716 else if (ctrl->nand_version >= 0x0600) 717 return 7; 718 else if (ctrl->nand_version >= 0x0500) 719 return 6; 720 else 721 return -1; 722 } 723 724 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host) 725 { 726 struct brcmnand_controller *ctrl = host->ctrl; 727 int shift = brcmnand_sector_1k_shift(ctrl); 728 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 729 BRCMNAND_CS_ACC_CONTROL); 730 731 if (shift < 0) 732 return 0; 733 734 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; 735 } 736 737 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val) 738 { 739 struct brcmnand_controller *ctrl = host->ctrl; 740 int shift = brcmnand_sector_1k_shift(ctrl); 741 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 742 BRCMNAND_CS_ACC_CONTROL); 743 u32 tmp; 744 745 if (shift < 0) 746 return; 747 748 tmp = nand_readreg(ctrl, acc_control_offs); 749 tmp &= ~(1 << shift); 750 tmp |= (!!val) << shift; 751 nand_writereg(ctrl, acc_control_offs, tmp); 752 } 753 754 /*********************************************************************** 755 * CS_NAND_SELECT 756 ***********************************************************************/ 757 758 enum { 759 CS_SELECT_NAND_WP = BIT(29), 760 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), 761 }; 762 763 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, 764 u32 mask, u32 expected_val, 765 unsigned long timeout_ms) 766 { 767 unsigned long limit; 768 u32 val; 769 770 if (!timeout_ms) 771 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; 772 773 limit = jiffies + msecs_to_jiffies(timeout_ms); 774 do { 775 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 776 if ((val & mask) == expected_val) 777 return 0; 778 779 cpu_relax(); 780 } while (time_after(limit, jiffies)); 781 782 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", 783 expected_val, val & mask); 784 785 return -ETIMEDOUT; 786 } 787 788 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) 789 { 790 u32 val = en ? CS_SELECT_NAND_WP : 0; 791 792 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val); 793 } 794 795 /*********************************************************************** 796 * Flash DMA 797 ***********************************************************************/ 798 799 enum flash_dma_reg { 800 FLASH_DMA_REVISION = 0x00, 801 FLASH_DMA_FIRST_DESC = 0x04, 802 FLASH_DMA_FIRST_DESC_EXT = 0x08, 803 FLASH_DMA_CTRL = 0x0c, 804 FLASH_DMA_MODE = 0x10, 805 FLASH_DMA_STATUS = 0x14, 806 FLASH_DMA_INTERRUPT_DESC = 0x18, 807 FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c, 808 FLASH_DMA_ERROR_STATUS = 0x20, 809 FLASH_DMA_CURRENT_DESC = 0x24, 810 FLASH_DMA_CURRENT_DESC_EXT = 0x28, 811 }; 812 813 static inline bool has_flash_dma(struct brcmnand_controller *ctrl) 814 { 815 return ctrl->flash_dma_base; 816 } 817 818 static inline bool flash_dma_buf_ok(const void *buf) 819 { 820 return buf && !is_vmalloc_addr(buf) && 821 likely(IS_ALIGNED((uintptr_t)buf, 4)); 822 } 823 824 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs, 825 u32 val) 826 { 827 brcmnand_writel(val, ctrl->flash_dma_base + offs); 828 } 829 830 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs) 831 { 832 return brcmnand_readl(ctrl->flash_dma_base + offs); 833 } 834 835 /* Low-level operation types: command, address, write, or read */ 836 enum brcmnand_llop_type { 837 LL_OP_CMD, 838 LL_OP_ADDR, 839 LL_OP_WR, 840 LL_OP_RD, 841 }; 842 843 /*********************************************************************** 844 * Internal support functions 845 ***********************************************************************/ 846 847 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, 848 struct brcmnand_cfg *cfg) 849 { 850 if (ctrl->nand_version <= 0x0701) 851 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && 852 cfg->ecc_level == 15; 853 else 854 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && 855 cfg->ecc_level == 15) || 856 (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); 857 } 858 859 /* 860 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given 861 * the layout/configuration. 862 * Returns -ERRCODE on failure. 863 */ 864 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section, 865 struct mtd_oob_region *oobregion) 866 { 867 struct nand_chip *chip = mtd_to_nand(mtd); 868 struct brcmnand_host *host = nand_get_controller_data(chip); 869 struct brcmnand_cfg *cfg = &host->hwcfg; 870 int sas = cfg->spare_area_size << cfg->sector_size_1k; 871 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 872 873 if (section >= sectors) 874 return -ERANGE; 875 876 oobregion->offset = (section * sas) + 6; 877 oobregion->length = 3; 878 879 return 0; 880 } 881 882 static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, 883 struct mtd_oob_region *oobregion) 884 { 885 struct nand_chip *chip = mtd_to_nand(mtd); 886 struct brcmnand_host *host = nand_get_controller_data(chip); 887 struct brcmnand_cfg *cfg = &host->hwcfg; 888 int sas = cfg->spare_area_size << cfg->sector_size_1k; 889 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 890 891 if (section >= sectors * 2) 892 return -ERANGE; 893 894 oobregion->offset = (section / 2) * sas; 895 896 if (section & 1) { 897 oobregion->offset += 9; 898 oobregion->length = 7; 899 } else { 900 oobregion->length = 6; 901 902 /* First sector of each page may have BBI */ 903 if (!section) { 904 /* 905 * Small-page NAND use byte 6 for BBI while large-page 906 * NAND use byte 0. 907 */ 908 if (cfg->page_size > 512) 909 oobregion->offset++; 910 oobregion->length--; 911 } 912 } 913 914 return 0; 915 } 916 917 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = { 918 .ecc = brcmnand_hamming_ooblayout_ecc, 919 .free = brcmnand_hamming_ooblayout_free, 920 }; 921 922 static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, 923 struct mtd_oob_region *oobregion) 924 { 925 struct nand_chip *chip = mtd_to_nand(mtd); 926 struct brcmnand_host *host = nand_get_controller_data(chip); 927 struct brcmnand_cfg *cfg = &host->hwcfg; 928 int sas = cfg->spare_area_size << cfg->sector_size_1k; 929 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 930 931 if (section >= sectors) 932 return -ERANGE; 933 934 oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes; 935 oobregion->length = chip->ecc.bytes; 936 937 return 0; 938 } 939 940 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section, 941 struct mtd_oob_region *oobregion) 942 { 943 struct nand_chip *chip = mtd_to_nand(mtd); 944 struct brcmnand_host *host = nand_get_controller_data(chip); 945 struct brcmnand_cfg *cfg = &host->hwcfg; 946 int sas = cfg->spare_area_size << cfg->sector_size_1k; 947 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 948 949 if (section >= sectors) 950 return -ERANGE; 951 952 if (sas <= chip->ecc.bytes) 953 return 0; 954 955 oobregion->offset = section * sas; 956 oobregion->length = sas - chip->ecc.bytes; 957 958 if (!section) { 959 oobregion->offset++; 960 oobregion->length--; 961 } 962 963 return 0; 964 } 965 966 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section, 967 struct mtd_oob_region *oobregion) 968 { 969 struct nand_chip *chip = mtd_to_nand(mtd); 970 struct brcmnand_host *host = nand_get_controller_data(chip); 971 struct brcmnand_cfg *cfg = &host->hwcfg; 972 int sas = cfg->spare_area_size << cfg->sector_size_1k; 973 974 if (section > 1 || sas - chip->ecc.bytes < 6 || 975 (section && sas - chip->ecc.bytes == 6)) 976 return -ERANGE; 977 978 if (!section) { 979 oobregion->offset = 0; 980 oobregion->length = 5; 981 } else { 982 oobregion->offset = 6; 983 oobregion->length = sas - chip->ecc.bytes - 6; 984 } 985 986 return 0; 987 } 988 989 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = { 990 .ecc = brcmnand_bch_ooblayout_ecc, 991 .free = brcmnand_bch_ooblayout_free_lp, 992 }; 993 994 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = { 995 .ecc = brcmnand_bch_ooblayout_ecc, 996 .free = brcmnand_bch_ooblayout_free_sp, 997 }; 998 999 static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) 1000 { 1001 struct brcmnand_cfg *p = &host->hwcfg; 1002 struct mtd_info *mtd = nand_to_mtd(&host->chip); 1003 struct nand_ecc_ctrl *ecc = &host->chip.ecc; 1004 unsigned int ecc_level = p->ecc_level; 1005 int sas = p->spare_area_size << p->sector_size_1k; 1006 int sectors = p->page_size / (512 << p->sector_size_1k); 1007 1008 if (p->sector_size_1k) 1009 ecc_level <<= 1; 1010 1011 if (is_hamming_ecc(host->ctrl, p)) { 1012 ecc->bytes = 3 * sectors; 1013 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); 1014 return 0; 1015 } 1016 1017 /* 1018 * CONTROLLER_VERSION: 1019 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8) 1020 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) 1021 * But we will just be conservative. 1022 */ 1023 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); 1024 if (p->page_size == 512) 1025 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops); 1026 else 1027 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops); 1028 1029 if (ecc->bytes >= sas) { 1030 dev_err(&host->pdev->dev, 1031 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", 1032 ecc->bytes, sas); 1033 return -EINVAL; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static void brcmnand_wp(struct mtd_info *mtd, int wp) 1040 { 1041 struct nand_chip *chip = mtd_to_nand(mtd); 1042 struct brcmnand_host *host = nand_get_controller_data(chip); 1043 struct brcmnand_controller *ctrl = host->ctrl; 1044 1045 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { 1046 static int old_wp = -1; 1047 int ret; 1048 1049 if (old_wp != wp) { 1050 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); 1051 old_wp = wp; 1052 } 1053 1054 /* 1055 * make sure ctrl/flash ready before and after 1056 * changing state of #WP pin 1057 */ 1058 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | 1059 NAND_STATUS_READY, 1060 NAND_CTRL_RDY | 1061 NAND_STATUS_READY, 0); 1062 if (ret) 1063 return; 1064 1065 brcmnand_set_wp(ctrl, wp); 1066 nand_status_op(chip, NULL); 1067 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ 1068 ret = bcmnand_ctrl_poll_status(ctrl, 1069 NAND_CTRL_RDY | 1070 NAND_STATUS_READY | 1071 NAND_STATUS_WP, 1072 NAND_CTRL_RDY | 1073 NAND_STATUS_READY | 1074 (wp ? 0 : NAND_STATUS_WP), 0); 1075 1076 if (ret) 1077 dev_err_ratelimited(&host->pdev->dev, 1078 "nand #WP expected %s\n", 1079 wp ? "on" : "off"); 1080 } 1081 } 1082 1083 /* Helper functions for reading and writing OOB registers */ 1084 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs) 1085 { 1086 u16 offset0, offset10, reg_offs; 1087 1088 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE]; 1089 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE]; 1090 1091 if (offs >= ctrl->max_oob) 1092 return 0x77; 1093 1094 if (offs >= 16 && offset10) 1095 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1096 else 1097 reg_offs = offset0 + (offs & ~0x03); 1098 1099 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3)); 1100 } 1101 1102 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs, 1103 u32 data) 1104 { 1105 u16 offset0, offset10, reg_offs; 1106 1107 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE]; 1108 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE]; 1109 1110 if (offs >= ctrl->max_oob) 1111 return; 1112 1113 if (offs >= 16 && offset10) 1114 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1115 else 1116 reg_offs = offset0 + (offs & ~0x03); 1117 1118 nand_writereg(ctrl, reg_offs, data); 1119 } 1120 1121 /* 1122 * read_oob_from_regs - read data from OOB registers 1123 * @ctrl: NAND controller 1124 * @i: sub-page sector index 1125 * @oob: buffer to read to 1126 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1127 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1128 */ 1129 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob, 1130 int sas, int sector_1k) 1131 { 1132 int tbytes = sas << sector_1k; 1133 int j; 1134 1135 /* Adjust OOB values for 1K sector size */ 1136 if (sector_1k && (i & 0x01)) 1137 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1138 tbytes = min_t(int, tbytes, ctrl->max_oob); 1139 1140 for (j = 0; j < tbytes; j++) 1141 oob[j] = oob_reg_read(ctrl, j); 1142 return tbytes; 1143 } 1144 1145 /* 1146 * write_oob_to_regs - write data to OOB registers 1147 * @i: sub-page sector index 1148 * @oob: buffer to write from 1149 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1150 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1151 */ 1152 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, 1153 const u8 *oob, int sas, int sector_1k) 1154 { 1155 int tbytes = sas << sector_1k; 1156 int j; 1157 1158 /* Adjust OOB values for 1K sector size */ 1159 if (sector_1k && (i & 0x01)) 1160 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1161 tbytes = min_t(int, tbytes, ctrl->max_oob); 1162 1163 for (j = 0; j < tbytes; j += 4) 1164 oob_reg_write(ctrl, j, 1165 (oob[j + 0] << 24) | 1166 (oob[j + 1] << 16) | 1167 (oob[j + 2] << 8) | 1168 (oob[j + 3] << 0)); 1169 return tbytes; 1170 } 1171 1172 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data) 1173 { 1174 struct brcmnand_controller *ctrl = data; 1175 1176 /* Discard all NAND_CTLRDY interrupts during DMA */ 1177 if (ctrl->dma_pending) 1178 return IRQ_HANDLED; 1179 1180 complete(&ctrl->done); 1181 return IRQ_HANDLED; 1182 } 1183 1184 /* Handle SoC-specific interrupt hardware */ 1185 static irqreturn_t brcmnand_irq(int irq, void *data) 1186 { 1187 struct brcmnand_controller *ctrl = data; 1188 1189 if (ctrl->soc->ctlrdy_ack(ctrl->soc)) 1190 return brcmnand_ctlrdy_irq(irq, data); 1191 1192 return IRQ_NONE; 1193 } 1194 1195 static irqreturn_t brcmnand_dma_irq(int irq, void *data) 1196 { 1197 struct brcmnand_controller *ctrl = data; 1198 1199 complete(&ctrl->dma_done); 1200 1201 return IRQ_HANDLED; 1202 } 1203 1204 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) 1205 { 1206 struct brcmnand_controller *ctrl = host->ctrl; 1207 int ret; 1208 1209 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, 1210 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); 1211 BUG_ON(ctrl->cmd_pending != 0); 1212 ctrl->cmd_pending = cmd; 1213 1214 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 1215 WARN_ON(ret); 1216 1217 mb(); /* flush previous writes */ 1218 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, 1219 cmd << brcmnand_cmd_shift(ctrl)); 1220 } 1221 1222 /*********************************************************************** 1223 * NAND MTD API: read/program/erase 1224 ***********************************************************************/ 1225 1226 static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, 1227 unsigned int ctrl) 1228 { 1229 /* intentionally left blank */ 1230 } 1231 1232 static int brcmnand_waitfunc(struct nand_chip *chip) 1233 { 1234 struct brcmnand_host *host = nand_get_controller_data(chip); 1235 struct brcmnand_controller *ctrl = host->ctrl; 1236 unsigned long timeo = msecs_to_jiffies(100); 1237 1238 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); 1239 if (ctrl->cmd_pending && 1240 wait_for_completion_timeout(&ctrl->done, timeo) <= 0) { 1241 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) 1242 >> brcmnand_cmd_shift(ctrl); 1243 1244 dev_err_ratelimited(ctrl->dev, 1245 "timeout waiting for command %#02x\n", cmd); 1246 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", 1247 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS)); 1248 } 1249 ctrl->cmd_pending = 0; 1250 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1251 INTFC_FLASH_STATUS; 1252 } 1253 1254 enum { 1255 LLOP_RE = BIT(16), 1256 LLOP_WE = BIT(17), 1257 LLOP_ALE = BIT(18), 1258 LLOP_CLE = BIT(19), 1259 LLOP_RETURN_IDLE = BIT(31), 1260 1261 LLOP_DATA_MASK = GENMASK(15, 0), 1262 }; 1263 1264 static int brcmnand_low_level_op(struct brcmnand_host *host, 1265 enum brcmnand_llop_type type, u32 data, 1266 bool last_op) 1267 { 1268 struct nand_chip *chip = &host->chip; 1269 struct brcmnand_controller *ctrl = host->ctrl; 1270 u32 tmp; 1271 1272 tmp = data & LLOP_DATA_MASK; 1273 switch (type) { 1274 case LL_OP_CMD: 1275 tmp |= LLOP_WE | LLOP_CLE; 1276 break; 1277 case LL_OP_ADDR: 1278 /* WE | ALE */ 1279 tmp |= LLOP_WE | LLOP_ALE; 1280 break; 1281 case LL_OP_WR: 1282 /* WE */ 1283 tmp |= LLOP_WE; 1284 break; 1285 case LL_OP_RD: 1286 /* RE */ 1287 tmp |= LLOP_RE; 1288 break; 1289 } 1290 if (last_op) 1291 /* RETURN_IDLE */ 1292 tmp |= LLOP_RETURN_IDLE; 1293 1294 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp); 1295 1296 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp); 1297 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP); 1298 1299 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP); 1300 return brcmnand_waitfunc(chip); 1301 } 1302 1303 static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, 1304 int column, int page_addr) 1305 { 1306 struct mtd_info *mtd = nand_to_mtd(chip); 1307 struct brcmnand_host *host = nand_get_controller_data(chip); 1308 struct brcmnand_controller *ctrl = host->ctrl; 1309 u64 addr = (u64)page_addr << chip->page_shift; 1310 int native_cmd = 0; 1311 1312 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM || 1313 command == NAND_CMD_RNDOUT) 1314 addr = (u64)column; 1315 /* Avoid propagating a negative, don't-care address */ 1316 else if (page_addr < 0) 1317 addr = 0; 1318 1319 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, 1320 (unsigned long long)addr); 1321 1322 host->last_cmd = command; 1323 host->last_byte = 0; 1324 host->last_addr = addr; 1325 1326 switch (command) { 1327 case NAND_CMD_RESET: 1328 native_cmd = CMD_FLASH_RESET; 1329 break; 1330 case NAND_CMD_STATUS: 1331 native_cmd = CMD_STATUS_READ; 1332 break; 1333 case NAND_CMD_READID: 1334 native_cmd = CMD_DEVICE_ID_READ; 1335 break; 1336 case NAND_CMD_READOOB: 1337 native_cmd = CMD_SPARE_AREA_READ; 1338 break; 1339 case NAND_CMD_ERASE1: 1340 native_cmd = CMD_BLOCK_ERASE; 1341 brcmnand_wp(mtd, 0); 1342 break; 1343 case NAND_CMD_PARAM: 1344 native_cmd = CMD_PARAMETER_READ; 1345 break; 1346 case NAND_CMD_SET_FEATURES: 1347 case NAND_CMD_GET_FEATURES: 1348 brcmnand_low_level_op(host, LL_OP_CMD, command, false); 1349 brcmnand_low_level_op(host, LL_OP_ADDR, column, false); 1350 break; 1351 case NAND_CMD_RNDOUT: 1352 native_cmd = CMD_PARAMETER_CHANGE_COL; 1353 addr &= ~((u64)(FC_BYTES - 1)); 1354 /* 1355 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0 1356 * NB: hwcfg.sector_size_1k may not be initialized yet 1357 */ 1358 if (brcmnand_get_sector_size_1k(host)) { 1359 host->hwcfg.sector_size_1k = 1360 brcmnand_get_sector_size_1k(host); 1361 brcmnand_set_sector_size_1k(host, 0); 1362 } 1363 break; 1364 } 1365 1366 if (!native_cmd) 1367 return; 1368 1369 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 1370 (host->cs << 16) | ((addr >> 32) & 0xffff)); 1371 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); 1372 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr)); 1373 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 1374 1375 brcmnand_send_cmd(host, native_cmd); 1376 brcmnand_waitfunc(chip); 1377 1378 if (native_cmd == CMD_PARAMETER_READ || 1379 native_cmd == CMD_PARAMETER_CHANGE_COL) { 1380 /* Copy flash cache word-wise */ 1381 u32 *flash_cache = (u32 *)ctrl->flash_cache; 1382 int i; 1383 1384 brcmnand_soc_data_bus_prepare(ctrl->soc, true); 1385 1386 /* 1387 * Must cache the FLASH_CACHE now, since changes in 1388 * SECTOR_SIZE_1K may invalidate it 1389 */ 1390 for (i = 0; i < FC_WORDS; i++) 1391 /* 1392 * Flash cache is big endian for parameter pages, at 1393 * least on STB SoCs 1394 */ 1395 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i)); 1396 1397 brcmnand_soc_data_bus_unprepare(ctrl->soc, true); 1398 1399 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */ 1400 if (host->hwcfg.sector_size_1k) 1401 brcmnand_set_sector_size_1k(host, 1402 host->hwcfg.sector_size_1k); 1403 } 1404 1405 /* Re-enable protection is necessary only after erase */ 1406 if (command == NAND_CMD_ERASE1) 1407 brcmnand_wp(mtd, 1); 1408 } 1409 1410 static uint8_t brcmnand_read_byte(struct nand_chip *chip) 1411 { 1412 struct brcmnand_host *host = nand_get_controller_data(chip); 1413 struct brcmnand_controller *ctrl = host->ctrl; 1414 uint8_t ret = 0; 1415 int addr, offs; 1416 1417 switch (host->last_cmd) { 1418 case NAND_CMD_READID: 1419 if (host->last_byte < 4) 1420 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> 1421 (24 - (host->last_byte << 3)); 1422 else if (host->last_byte < 8) 1423 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> 1424 (56 - (host->last_byte << 3)); 1425 break; 1426 1427 case NAND_CMD_READOOB: 1428 ret = oob_reg_read(ctrl, host->last_byte); 1429 break; 1430 1431 case NAND_CMD_STATUS: 1432 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1433 INTFC_FLASH_STATUS; 1434 if (wp_on) /* hide WP status */ 1435 ret |= NAND_STATUS_WP; 1436 break; 1437 1438 case NAND_CMD_PARAM: 1439 case NAND_CMD_RNDOUT: 1440 addr = host->last_addr + host->last_byte; 1441 offs = addr & (FC_BYTES - 1); 1442 1443 /* At FC_BYTES boundary, switch to next column */ 1444 if (host->last_byte > 0 && offs == 0) 1445 nand_change_read_column_op(chip, addr, NULL, 0, false); 1446 1447 ret = ctrl->flash_cache[offs]; 1448 break; 1449 case NAND_CMD_GET_FEATURES: 1450 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { 1451 ret = 0; 1452 } else { 1453 bool last = host->last_byte == 1454 ONFI_SUBFEATURE_PARAM_LEN - 1; 1455 brcmnand_low_level_op(host, LL_OP_RD, 0, last); 1456 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; 1457 } 1458 } 1459 1460 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); 1461 host->last_byte++; 1462 1463 return ret; 1464 } 1465 1466 static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 1467 { 1468 int i; 1469 1470 for (i = 0; i < len; i++, buf++) 1471 *buf = brcmnand_read_byte(chip); 1472 } 1473 1474 static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, 1475 int len) 1476 { 1477 int i; 1478 struct brcmnand_host *host = nand_get_controller_data(chip); 1479 1480 switch (host->last_cmd) { 1481 case NAND_CMD_SET_FEATURES: 1482 for (i = 0; i < len; i++) 1483 brcmnand_low_level_op(host, LL_OP_WR, buf[i], 1484 (i + 1) == len); 1485 break; 1486 default: 1487 BUG(); 1488 break; 1489 } 1490 } 1491 1492 /** 1493 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the 1494 * following ahead of time: 1495 * - Is this descriptor the beginning or end of a linked list? 1496 * - What is the (DMA) address of the next descriptor in the linked list? 1497 */ 1498 static int brcmnand_fill_dma_desc(struct brcmnand_host *host, 1499 struct brcm_nand_dma_desc *desc, u64 addr, 1500 dma_addr_t buf, u32 len, u8 dma_cmd, 1501 bool begin, bool end, 1502 dma_addr_t next_desc) 1503 { 1504 memset(desc, 0, sizeof(*desc)); 1505 /* Descriptors are written in native byte order (wordwise) */ 1506 desc->next_desc = lower_32_bits(next_desc); 1507 desc->next_desc_ext = upper_32_bits(next_desc); 1508 desc->cmd_irq = (dma_cmd << 24) | 1509 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */ 1510 (!!begin) | ((!!end) << 1); /* head, tail */ 1511 #ifdef CONFIG_CPU_BIG_ENDIAN 1512 desc->cmd_irq |= 0x01 << 12; 1513 #endif 1514 desc->dram_addr = lower_32_bits(buf); 1515 desc->dram_addr_ext = upper_32_bits(buf); 1516 desc->tfr_len = len; 1517 desc->total_len = len; 1518 desc->flash_addr = lower_32_bits(addr); 1519 desc->flash_addr_ext = upper_32_bits(addr); 1520 desc->cs = host->cs; 1521 desc->status_valid = 0x01; 1522 return 0; 1523 } 1524 1525 /** 1526 * Kick the FLASH_DMA engine, with a given DMA descriptor 1527 */ 1528 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) 1529 { 1530 struct brcmnand_controller *ctrl = host->ctrl; 1531 unsigned long timeo = msecs_to_jiffies(100); 1532 1533 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); 1534 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); 1535 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); 1536 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); 1537 1538 /* Start FLASH_DMA engine */ 1539 ctrl->dma_pending = true; 1540 mb(); /* flush previous writes */ 1541 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */ 1542 1543 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) { 1544 dev_err(ctrl->dev, 1545 "timeout waiting for DMA; status %#x, error status %#x\n", 1546 flash_dma_readl(ctrl, FLASH_DMA_STATUS), 1547 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS)); 1548 } 1549 ctrl->dma_pending = false; 1550 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */ 1551 } 1552 1553 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf, 1554 u32 len, u8 dma_cmd) 1555 { 1556 struct brcmnand_controller *ctrl = host->ctrl; 1557 dma_addr_t buf_pa; 1558 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1559 1560 buf_pa = dma_map_single(ctrl->dev, buf, len, dir); 1561 if (dma_mapping_error(ctrl->dev, buf_pa)) { 1562 dev_err(ctrl->dev, "unable to map buffer for DMA\n"); 1563 return -ENOMEM; 1564 } 1565 1566 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len, 1567 dma_cmd, true, true, 0); 1568 1569 brcmnand_dma_run(host, ctrl->dma_pa); 1570 1571 dma_unmap_single(ctrl->dev, buf_pa, len, dir); 1572 1573 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR) 1574 return -EBADMSG; 1575 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR) 1576 return -EUCLEAN; 1577 1578 return 0; 1579 } 1580 1581 /* 1582 * Assumes proper CS is already set 1583 */ 1584 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, 1585 u64 addr, unsigned int trans, u32 *buf, 1586 u8 *oob, u64 *err_addr) 1587 { 1588 struct brcmnand_host *host = nand_get_controller_data(chip); 1589 struct brcmnand_controller *ctrl = host->ctrl; 1590 int i, j, ret = 0; 1591 1592 /* Clear error addresses */ 1593 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); 1594 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); 1595 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); 1596 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); 1597 1598 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 1599 (host->cs << 16) | ((addr >> 32) & 0xffff)); 1600 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); 1601 1602 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1603 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 1604 lower_32_bits(addr)); 1605 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 1606 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ 1607 brcmnand_send_cmd(host, CMD_PAGE_READ); 1608 brcmnand_waitfunc(chip); 1609 1610 if (likely(buf)) { 1611 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1612 1613 for (j = 0; j < FC_WORDS; j++, buf++) 1614 *buf = brcmnand_read_fc(ctrl, j); 1615 1616 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 1617 } 1618 1619 if (oob) 1620 oob += read_oob_from_regs(ctrl, i, oob, 1621 mtd->oobsize / trans, 1622 host->hwcfg.sector_size_1k); 1623 1624 if (!ret) { 1625 *err_addr = brcmnand_read_reg(ctrl, 1626 BRCMNAND_UNCORR_ADDR) | 1627 ((u64)(brcmnand_read_reg(ctrl, 1628 BRCMNAND_UNCORR_EXT_ADDR) 1629 & 0xffff) << 32); 1630 if (*err_addr) 1631 ret = -EBADMSG; 1632 } 1633 1634 if (!ret) { 1635 *err_addr = brcmnand_read_reg(ctrl, 1636 BRCMNAND_CORR_ADDR) | 1637 ((u64)(brcmnand_read_reg(ctrl, 1638 BRCMNAND_CORR_EXT_ADDR) 1639 & 0xffff) << 32); 1640 if (*err_addr) 1641 ret = -EUCLEAN; 1642 } 1643 } 1644 1645 return ret; 1646 } 1647 1648 /* 1649 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC 1650 * error 1651 * 1652 * Because the HW ECC signals an ECC error if an erase paged has even a single 1653 * bitflip, we must check each ECC error to see if it is actually an erased 1654 * page with bitflips, not a truly corrupted page. 1655 * 1656 * On a real error, return a negative error code (-EBADMSG for ECC error), and 1657 * buf will contain raw data. 1658 * Otherwise, buf gets filled with 0xffs and return the maximum number of 1659 * bitflips-per-ECC-sector to the caller. 1660 * 1661 */ 1662 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, 1663 struct nand_chip *chip, void *buf, u64 addr) 1664 { 1665 int i, sas; 1666 void *oob = chip->oob_poi; 1667 int bitflips = 0; 1668 int page = addr >> chip->page_shift; 1669 int ret; 1670 1671 if (!buf) 1672 buf = nand_get_data_buf(chip); 1673 1674 sas = mtd->oobsize / chip->ecc.steps; 1675 1676 /* read without ecc for verification */ 1677 ret = chip->ecc.read_page_raw(chip, buf, true, page); 1678 if (ret) 1679 return ret; 1680 1681 for (i = 0; i < chip->ecc.steps; i++, oob += sas) { 1682 ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, 1683 oob, sas, NULL, 0, 1684 chip->ecc.strength); 1685 if (ret < 0) 1686 return ret; 1687 1688 bitflips = max(bitflips, ret); 1689 } 1690 1691 return bitflips; 1692 } 1693 1694 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, 1695 u64 addr, unsigned int trans, u32 *buf, u8 *oob) 1696 { 1697 struct brcmnand_host *host = nand_get_controller_data(chip); 1698 struct brcmnand_controller *ctrl = host->ctrl; 1699 u64 err_addr = 0; 1700 int err; 1701 bool retry = true; 1702 1703 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); 1704 1705 try_dmaread: 1706 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); 1707 1708 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1709 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES, 1710 CMD_PAGE_READ); 1711 if (err) { 1712 if (mtd_is_bitflip_or_eccerr(err)) 1713 err_addr = addr; 1714 else 1715 return -EIO; 1716 } 1717 } else { 1718 if (oob) 1719 memset(oob, 0x99, mtd->oobsize); 1720 1721 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, 1722 oob, &err_addr); 1723 } 1724 1725 if (mtd_is_eccerr(err)) { 1726 /* 1727 * On controller version and 7.0, 7.1 , DMA read after a 1728 * prior PIO read that reported uncorrectable error, 1729 * the DMA engine captures this error following DMA read 1730 * cleared only on subsequent DMA read, so just retry once 1731 * to clear a possible false error reported for current DMA 1732 * read 1733 */ 1734 if ((ctrl->nand_version == 0x0700) || 1735 (ctrl->nand_version == 0x0701)) { 1736 if (retry) { 1737 retry = false; 1738 goto try_dmaread; 1739 } 1740 } 1741 1742 /* 1743 * Controller version 7.2 has hw encoder to detect erased page 1744 * bitflips, apply sw verification for older controllers only 1745 */ 1746 if (ctrl->nand_version < 0x0702) { 1747 err = brcmstb_nand_verify_erased_page(mtd, chip, buf, 1748 addr); 1749 /* erased page bitflips corrected */ 1750 if (err >= 0) 1751 return err; 1752 } 1753 1754 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", 1755 (unsigned long long)err_addr); 1756 mtd->ecc_stats.failed++; 1757 /* NAND layer expects zero on ECC errors */ 1758 return 0; 1759 } 1760 1761 if (mtd_is_bitflip(err)) { 1762 unsigned int corrected = brcmnand_count_corrected(ctrl); 1763 1764 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", 1765 (unsigned long long)err_addr); 1766 mtd->ecc_stats.corrected += corrected; 1767 /* Always exceed the software-imposed threshold */ 1768 return max(mtd->bitflip_threshold, corrected); 1769 } 1770 1771 return 0; 1772 } 1773 1774 static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, 1775 int oob_required, int page) 1776 { 1777 struct mtd_info *mtd = nand_to_mtd(chip); 1778 struct brcmnand_host *host = nand_get_controller_data(chip); 1779 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1780 1781 nand_read_page_op(chip, page, 0, NULL, 0); 1782 1783 return brcmnand_read(mtd, chip, host->last_addr, 1784 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1785 } 1786 1787 static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, 1788 int oob_required, int page) 1789 { 1790 struct brcmnand_host *host = nand_get_controller_data(chip); 1791 struct mtd_info *mtd = nand_to_mtd(chip); 1792 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1793 int ret; 1794 1795 nand_read_page_op(chip, page, 0, NULL, 0); 1796 1797 brcmnand_set_ecc_enabled(host, 0); 1798 ret = brcmnand_read(mtd, chip, host->last_addr, 1799 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1800 brcmnand_set_ecc_enabled(host, 1); 1801 return ret; 1802 } 1803 1804 static int brcmnand_read_oob(struct nand_chip *chip, int page) 1805 { 1806 struct mtd_info *mtd = nand_to_mtd(chip); 1807 1808 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1809 mtd->writesize >> FC_SHIFT, 1810 NULL, (u8 *)chip->oob_poi); 1811 } 1812 1813 static int brcmnand_read_oob_raw(struct nand_chip *chip, int page) 1814 { 1815 struct mtd_info *mtd = nand_to_mtd(chip); 1816 struct brcmnand_host *host = nand_get_controller_data(chip); 1817 1818 brcmnand_set_ecc_enabled(host, 0); 1819 brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1820 mtd->writesize >> FC_SHIFT, 1821 NULL, (u8 *)chip->oob_poi); 1822 brcmnand_set_ecc_enabled(host, 1); 1823 return 0; 1824 } 1825 1826 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip, 1827 u64 addr, const u32 *buf, u8 *oob) 1828 { 1829 struct brcmnand_host *host = nand_get_controller_data(chip); 1830 struct brcmnand_controller *ctrl = host->ctrl; 1831 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT; 1832 int status, ret = 0; 1833 1834 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf); 1835 1836 if (unlikely((unsigned long)buf & 0x03)) { 1837 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf); 1838 buf = (u32 *)((unsigned long)buf & ~0x03); 1839 } 1840 1841 brcmnand_wp(mtd, 0); 1842 1843 for (i = 0; i < ctrl->max_oob; i += 4) 1844 oob_reg_write(ctrl, i, 0xffffffff); 1845 1846 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1847 if (brcmnand_dma_trans(host, addr, (u32 *)buf, 1848 mtd->writesize, CMD_PROGRAM_PAGE)) 1849 ret = -EIO; 1850 goto out; 1851 } 1852 1853 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 1854 (host->cs << 16) | ((addr >> 32) & 0xffff)); 1855 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); 1856 1857 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1858 /* full address MUST be set before populating FC */ 1859 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 1860 lower_32_bits(addr)); 1861 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 1862 1863 if (buf) { 1864 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1865 1866 for (j = 0; j < FC_WORDS; j++, buf++) 1867 brcmnand_write_fc(ctrl, j, *buf); 1868 1869 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 1870 } else if (oob) { 1871 for (j = 0; j < FC_WORDS; j++) 1872 brcmnand_write_fc(ctrl, j, 0xffffffff); 1873 } 1874 1875 if (oob) { 1876 oob += write_oob_to_regs(ctrl, i, oob, 1877 mtd->oobsize / trans, 1878 host->hwcfg.sector_size_1k); 1879 } 1880 1881 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */ 1882 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE); 1883 status = brcmnand_waitfunc(chip); 1884 1885 if (status & NAND_STATUS_FAIL) { 1886 dev_info(ctrl->dev, "program failed at %llx\n", 1887 (unsigned long long)addr); 1888 ret = -EIO; 1889 goto out; 1890 } 1891 } 1892 out: 1893 brcmnand_wp(mtd, 1); 1894 return ret; 1895 } 1896 1897 static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, 1898 int oob_required, int page) 1899 { 1900 struct mtd_info *mtd = nand_to_mtd(chip); 1901 struct brcmnand_host *host = nand_get_controller_data(chip); 1902 void *oob = oob_required ? chip->oob_poi : NULL; 1903 1904 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 1905 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 1906 1907 return nand_prog_page_end_op(chip); 1908 } 1909 1910 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 1911 int oob_required, int page) 1912 { 1913 struct mtd_info *mtd = nand_to_mtd(chip); 1914 struct brcmnand_host *host = nand_get_controller_data(chip); 1915 void *oob = oob_required ? chip->oob_poi : NULL; 1916 1917 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 1918 brcmnand_set_ecc_enabled(host, 0); 1919 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 1920 brcmnand_set_ecc_enabled(host, 1); 1921 1922 return nand_prog_page_end_op(chip); 1923 } 1924 1925 static int brcmnand_write_oob(struct nand_chip *chip, int page) 1926 { 1927 return brcmnand_write(nand_to_mtd(chip), chip, 1928 (u64)page << chip->page_shift, NULL, 1929 chip->oob_poi); 1930 } 1931 1932 static int brcmnand_write_oob_raw(struct nand_chip *chip, int page) 1933 { 1934 struct mtd_info *mtd = nand_to_mtd(chip); 1935 struct brcmnand_host *host = nand_get_controller_data(chip); 1936 int ret; 1937 1938 brcmnand_set_ecc_enabled(host, 0); 1939 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL, 1940 (u8 *)chip->oob_poi); 1941 brcmnand_set_ecc_enabled(host, 1); 1942 1943 return ret; 1944 } 1945 1946 /*********************************************************************** 1947 * Per-CS setup (1 NAND device) 1948 ***********************************************************************/ 1949 1950 static int brcmnand_set_cfg(struct brcmnand_host *host, 1951 struct brcmnand_cfg *cfg) 1952 { 1953 struct brcmnand_controller *ctrl = host->ctrl; 1954 struct nand_chip *chip = &host->chip; 1955 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 1956 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 1957 BRCMNAND_CS_CFG_EXT); 1958 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 1959 BRCMNAND_CS_ACC_CONTROL); 1960 u8 block_size = 0, page_size = 0, device_size = 0; 1961 u32 tmp; 1962 1963 if (ctrl->block_sizes) { 1964 int i, found; 1965 1966 for (i = 0, found = 0; ctrl->block_sizes[i]; i++) 1967 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) { 1968 block_size = i; 1969 found = 1; 1970 } 1971 if (!found) { 1972 dev_warn(ctrl->dev, "invalid block size %u\n", 1973 cfg->block_size); 1974 return -EINVAL; 1975 } 1976 } else { 1977 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE); 1978 } 1979 1980 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size && 1981 cfg->block_size > ctrl->max_block_size)) { 1982 dev_warn(ctrl->dev, "invalid block size %u\n", 1983 cfg->block_size); 1984 block_size = 0; 1985 } 1986 1987 if (ctrl->page_sizes) { 1988 int i, found; 1989 1990 for (i = 0, found = 0; ctrl->page_sizes[i]; i++) 1991 if (ctrl->page_sizes[i] == cfg->page_size) { 1992 page_size = i; 1993 found = 1; 1994 } 1995 if (!found) { 1996 dev_warn(ctrl->dev, "invalid page size %u\n", 1997 cfg->page_size); 1998 return -EINVAL; 1999 } 2000 } else { 2001 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE); 2002 } 2003 2004 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size && 2005 cfg->page_size > ctrl->max_page_size)) { 2006 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size); 2007 return -EINVAL; 2008 } 2009 2010 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) { 2011 dev_warn(ctrl->dev, "invalid device size 0x%llx\n", 2012 (unsigned long long)cfg->device_size); 2013 return -EINVAL; 2014 } 2015 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE); 2016 2017 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) | 2018 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) | 2019 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) | 2020 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | 2021 (device_size << CFG_DEVICE_SIZE_SHIFT); 2022 if (cfg_offs == cfg_ext_offs) { 2023 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) | 2024 (block_size << CFG_BLK_SIZE_SHIFT); 2025 nand_writereg(ctrl, cfg_offs, tmp); 2026 } else { 2027 nand_writereg(ctrl, cfg_offs, tmp); 2028 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) | 2029 (block_size << CFG_EXT_BLK_SIZE_SHIFT); 2030 nand_writereg(ctrl, cfg_ext_offs, tmp); 2031 } 2032 2033 tmp = nand_readreg(ctrl, acc_control_offs); 2034 tmp &= ~brcmnand_ecc_level_mask(ctrl); 2035 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; 2036 tmp &= ~brcmnand_spare_area_mask(ctrl); 2037 tmp |= cfg->spare_area_size; 2038 nand_writereg(ctrl, acc_control_offs, tmp); 2039 2040 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); 2041 2042 /* threshold = ceil(BCH-level * 0.75) */ 2043 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4)); 2044 2045 return 0; 2046 } 2047 2048 static void brcmnand_print_cfg(struct brcmnand_host *host, 2049 char *buf, struct brcmnand_cfg *cfg) 2050 { 2051 buf += sprintf(buf, 2052 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", 2053 (unsigned long long)cfg->device_size >> 20, 2054 cfg->block_size >> 10, 2055 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size, 2056 cfg->page_size >= 1024 ? "KiB" : "B", 2057 cfg->spare_area_size, cfg->device_width); 2058 2059 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ 2060 if (is_hamming_ecc(host->ctrl, cfg)) 2061 sprintf(buf, ", Hamming ECC"); 2062 else if (cfg->sector_size_1k) 2063 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); 2064 else 2065 sprintf(buf, ", BCH-%u", cfg->ecc_level); 2066 } 2067 2068 /* 2069 * Minimum number of bytes to address a page. Calculated as: 2070 * roundup(log2(size / page-size) / 8) 2071 * 2072 * NB: the following does not "round up" for non-power-of-2 'size'; but this is 2073 * OK because many other things will break if 'size' is irregular... 2074 */ 2075 static inline int get_blk_adr_bytes(u64 size, u32 writesize) 2076 { 2077 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3; 2078 } 2079 2080 static int brcmnand_setup_dev(struct brcmnand_host *host) 2081 { 2082 struct mtd_info *mtd = nand_to_mtd(&host->chip); 2083 struct nand_chip *chip = &host->chip; 2084 struct brcmnand_controller *ctrl = host->ctrl; 2085 struct brcmnand_cfg *cfg = &host->hwcfg; 2086 char msg[128]; 2087 u32 offs, tmp, oob_sector; 2088 int ret; 2089 2090 memset(cfg, 0, sizeof(*cfg)); 2091 2092 ret = of_property_read_u32(nand_get_flash_node(chip), 2093 "brcm,nand-oob-sector-size", 2094 &oob_sector); 2095 if (ret) { 2096 /* Use detected size */ 2097 cfg->spare_area_size = mtd->oobsize / 2098 (mtd->writesize >> FC_SHIFT); 2099 } else { 2100 cfg->spare_area_size = oob_sector; 2101 } 2102 if (cfg->spare_area_size > ctrl->max_oob) 2103 cfg->spare_area_size = ctrl->max_oob; 2104 /* 2105 * Set oobsize to be consistent with controller's spare_area_size, as 2106 * the rest is inaccessible. 2107 */ 2108 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT); 2109 2110 cfg->device_size = mtd->size; 2111 cfg->block_size = mtd->erasesize; 2112 cfg->page_size = mtd->writesize; 2113 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8; 2114 cfg->col_adr_bytes = 2; 2115 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); 2116 2117 if (chip->ecc.mode != NAND_ECC_HW) { 2118 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", 2119 chip->ecc.mode); 2120 return -EINVAL; 2121 } 2122 2123 if (chip->ecc.algo == NAND_ECC_UNKNOWN) { 2124 if (chip->ecc.strength == 1 && chip->ecc.size == 512) 2125 /* Default to Hamming for 1-bit ECC, if unspecified */ 2126 chip->ecc.algo = NAND_ECC_HAMMING; 2127 else 2128 /* Otherwise, BCH */ 2129 chip->ecc.algo = NAND_ECC_BCH; 2130 } 2131 2132 if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 || 2133 chip->ecc.size != 512)) { 2134 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", 2135 chip->ecc.strength, chip->ecc.size); 2136 return -EINVAL; 2137 } 2138 2139 switch (chip->ecc.size) { 2140 case 512: 2141 if (chip->ecc.algo == NAND_ECC_HAMMING) 2142 cfg->ecc_level = 15; 2143 else 2144 cfg->ecc_level = chip->ecc.strength; 2145 cfg->sector_size_1k = 0; 2146 break; 2147 case 1024: 2148 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) { 2149 dev_err(ctrl->dev, "1KB sectors not supported\n"); 2150 return -EINVAL; 2151 } 2152 if (chip->ecc.strength & 0x1) { 2153 dev_err(ctrl->dev, 2154 "odd ECC not supported with 1KB sectors\n"); 2155 return -EINVAL; 2156 } 2157 2158 cfg->ecc_level = chip->ecc.strength >> 1; 2159 cfg->sector_size_1k = 1; 2160 break; 2161 default: 2162 dev_err(ctrl->dev, "unsupported ECC size: %d\n", 2163 chip->ecc.size); 2164 return -EINVAL; 2165 } 2166 2167 cfg->ful_adr_bytes = cfg->blk_adr_bytes; 2168 if (mtd->writesize > 512) 2169 cfg->ful_adr_bytes += cfg->col_adr_bytes; 2170 else 2171 cfg->ful_adr_bytes += 1; 2172 2173 ret = brcmnand_set_cfg(host, cfg); 2174 if (ret) 2175 return ret; 2176 2177 brcmnand_set_ecc_enabled(host, 1); 2178 2179 brcmnand_print_cfg(host, msg, cfg); 2180 dev_info(ctrl->dev, "detected %s\n", msg); 2181 2182 /* Configure ACC_CONTROL */ 2183 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 2184 tmp = nand_readreg(ctrl, offs); 2185 tmp &= ~ACC_CONTROL_PARTIAL_PAGE; 2186 tmp &= ~ACC_CONTROL_RD_ERASED; 2187 2188 /* We need to turn on Read from erased paged protected by ECC */ 2189 if (ctrl->nand_version >= 0x0702) 2190 tmp |= ACC_CONTROL_RD_ERASED; 2191 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 2192 if (ctrl->features & BRCMNAND_HAS_PREFETCH) 2193 tmp &= ~ACC_CONTROL_PREFETCH; 2194 2195 nand_writereg(ctrl, offs, tmp); 2196 2197 return 0; 2198 } 2199 2200 static int brcmnand_attach_chip(struct nand_chip *chip) 2201 { 2202 struct mtd_info *mtd = nand_to_mtd(chip); 2203 struct brcmnand_host *host = nand_get_controller_data(chip); 2204 int ret; 2205 2206 chip->options |= NAND_NO_SUBPAGE_WRITE; 2207 /* 2208 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA 2209 * to/from, and have nand_base pass us a bounce buffer instead, as 2210 * needed. 2211 */ 2212 chip->options |= NAND_USE_BOUNCE_BUFFER; 2213 2214 if (chip->bbt_options & NAND_BBT_USE_FLASH) 2215 chip->bbt_options |= NAND_BBT_NO_OOB; 2216 2217 if (brcmnand_setup_dev(host)) 2218 return -ENXIO; 2219 2220 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512; 2221 2222 /* only use our internal HW threshold */ 2223 mtd->bitflip_threshold = 1; 2224 2225 ret = brcmstb_choose_ecc_layout(host); 2226 2227 return ret; 2228 } 2229 2230 static const struct nand_controller_ops brcmnand_controller_ops = { 2231 .attach_chip = brcmnand_attach_chip, 2232 }; 2233 2234 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn) 2235 { 2236 struct brcmnand_controller *ctrl = host->ctrl; 2237 struct platform_device *pdev = host->pdev; 2238 struct mtd_info *mtd; 2239 struct nand_chip *chip; 2240 int ret; 2241 u16 cfg_offs; 2242 2243 ret = of_property_read_u32(dn, "reg", &host->cs); 2244 if (ret) { 2245 dev_err(&pdev->dev, "can't get chip-select\n"); 2246 return -ENXIO; 2247 } 2248 2249 mtd = nand_to_mtd(&host->chip); 2250 chip = &host->chip; 2251 2252 nand_set_flash_node(chip, dn); 2253 nand_set_controller_data(chip, host); 2254 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d", 2255 host->cs); 2256 if (!mtd->name) 2257 return -ENOMEM; 2258 2259 mtd->owner = THIS_MODULE; 2260 mtd->dev.parent = &pdev->dev; 2261 2262 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; 2263 chip->legacy.cmdfunc = brcmnand_cmdfunc; 2264 chip->legacy.waitfunc = brcmnand_waitfunc; 2265 chip->legacy.read_byte = brcmnand_read_byte; 2266 chip->legacy.read_buf = brcmnand_read_buf; 2267 chip->legacy.write_buf = brcmnand_write_buf; 2268 2269 chip->ecc.mode = NAND_ECC_HW; 2270 chip->ecc.read_page = brcmnand_read_page; 2271 chip->ecc.write_page = brcmnand_write_page; 2272 chip->ecc.read_page_raw = brcmnand_read_page_raw; 2273 chip->ecc.write_page_raw = brcmnand_write_page_raw; 2274 chip->ecc.write_oob_raw = brcmnand_write_oob_raw; 2275 chip->ecc.read_oob_raw = brcmnand_read_oob_raw; 2276 chip->ecc.read_oob = brcmnand_read_oob; 2277 chip->ecc.write_oob = brcmnand_write_oob; 2278 2279 chip->controller = &ctrl->controller; 2280 2281 /* 2282 * The bootloader might have configured 16bit mode but 2283 * NAND READID command only works in 8bit mode. We force 2284 * 8bit mode here to ensure that NAND READID commands works. 2285 */ 2286 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2287 nand_writereg(ctrl, cfg_offs, 2288 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH); 2289 2290 ret = nand_scan(chip, 1); 2291 if (ret) 2292 return ret; 2293 2294 ret = mtd_device_register(mtd, NULL, 0); 2295 if (ret) 2296 nand_cleanup(chip); 2297 2298 return ret; 2299 } 2300 2301 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host, 2302 int restore) 2303 { 2304 struct brcmnand_controller *ctrl = host->ctrl; 2305 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2306 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 2307 BRCMNAND_CS_CFG_EXT); 2308 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 2309 BRCMNAND_CS_ACC_CONTROL); 2310 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); 2311 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); 2312 2313 if (restore) { 2314 nand_writereg(ctrl, cfg_offs, host->hwcfg.config); 2315 if (cfg_offs != cfg_ext_offs) 2316 nand_writereg(ctrl, cfg_ext_offs, 2317 host->hwcfg.config_ext); 2318 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control); 2319 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1); 2320 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2); 2321 } else { 2322 host->hwcfg.config = nand_readreg(ctrl, cfg_offs); 2323 if (cfg_offs != cfg_ext_offs) 2324 host->hwcfg.config_ext = 2325 nand_readreg(ctrl, cfg_ext_offs); 2326 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs); 2327 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs); 2328 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs); 2329 } 2330 } 2331 2332 static int brcmnand_suspend(struct device *dev) 2333 { 2334 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2335 struct brcmnand_host *host; 2336 2337 list_for_each_entry(host, &ctrl->host_list, node) 2338 brcmnand_save_restore_cs_config(host, 0); 2339 2340 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT); 2341 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR); 2342 ctrl->corr_stat_threshold = 2343 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD); 2344 2345 if (has_flash_dma(ctrl)) 2346 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE); 2347 2348 return 0; 2349 } 2350 2351 static int brcmnand_resume(struct device *dev) 2352 { 2353 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2354 struct brcmnand_host *host; 2355 2356 if (has_flash_dma(ctrl)) { 2357 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode); 2358 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2359 } 2360 2361 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select); 2362 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor); 2363 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD, 2364 ctrl->corr_stat_threshold); 2365 if (ctrl->soc) { 2366 /* Clear/re-enable interrupt */ 2367 ctrl->soc->ctlrdy_ack(ctrl->soc); 2368 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2369 } 2370 2371 list_for_each_entry(host, &ctrl->host_list, node) { 2372 struct nand_chip *chip = &host->chip; 2373 2374 brcmnand_save_restore_cs_config(host, 1); 2375 2376 /* Reset the chip, required by some chips after power-up */ 2377 nand_reset_op(chip); 2378 } 2379 2380 return 0; 2381 } 2382 2383 const struct dev_pm_ops brcmnand_pm_ops = { 2384 .suspend = brcmnand_suspend, 2385 .resume = brcmnand_resume, 2386 }; 2387 EXPORT_SYMBOL_GPL(brcmnand_pm_ops); 2388 2389 static const struct of_device_id brcmnand_of_match[] = { 2390 { .compatible = "brcm,brcmnand-v4.0" }, 2391 { .compatible = "brcm,brcmnand-v5.0" }, 2392 { .compatible = "brcm,brcmnand-v6.0" }, 2393 { .compatible = "brcm,brcmnand-v6.1" }, 2394 { .compatible = "brcm,brcmnand-v6.2" }, 2395 { .compatible = "brcm,brcmnand-v7.0" }, 2396 { .compatible = "brcm,brcmnand-v7.1" }, 2397 { .compatible = "brcm,brcmnand-v7.2" }, 2398 {}, 2399 }; 2400 MODULE_DEVICE_TABLE(of, brcmnand_of_match); 2401 2402 /*********************************************************************** 2403 * Platform driver setup (per controller) 2404 ***********************************************************************/ 2405 2406 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) 2407 { 2408 struct device *dev = &pdev->dev; 2409 struct device_node *dn = dev->of_node, *child; 2410 struct brcmnand_controller *ctrl; 2411 struct resource *res; 2412 int ret; 2413 2414 /* We only support device-tree instantiation */ 2415 if (!dn) 2416 return -ENODEV; 2417 2418 if (!of_match_node(brcmnand_of_match, dn)) 2419 return -ENODEV; 2420 2421 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 2422 if (!ctrl) 2423 return -ENOMEM; 2424 2425 dev_set_drvdata(dev, ctrl); 2426 ctrl->dev = dev; 2427 2428 init_completion(&ctrl->done); 2429 init_completion(&ctrl->dma_done); 2430 nand_controller_init(&ctrl->controller); 2431 ctrl->controller.ops = &brcmnand_controller_ops; 2432 INIT_LIST_HEAD(&ctrl->host_list); 2433 2434 /* NAND register range */ 2435 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2436 ctrl->nand_base = devm_ioremap_resource(dev, res); 2437 if (IS_ERR(ctrl->nand_base)) 2438 return PTR_ERR(ctrl->nand_base); 2439 2440 /* Enable clock before using NAND registers */ 2441 ctrl->clk = devm_clk_get(dev, "nand"); 2442 if (!IS_ERR(ctrl->clk)) { 2443 ret = clk_prepare_enable(ctrl->clk); 2444 if (ret) 2445 return ret; 2446 } else { 2447 ret = PTR_ERR(ctrl->clk); 2448 if (ret == -EPROBE_DEFER) 2449 return ret; 2450 2451 ctrl->clk = NULL; 2452 } 2453 2454 /* Initialize NAND revision */ 2455 ret = brcmnand_revision_init(ctrl); 2456 if (ret) 2457 goto err; 2458 2459 /* 2460 * Most chips have this cache at a fixed offset within 'nand' block. 2461 * Some must specify this region separately. 2462 */ 2463 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache"); 2464 if (res) { 2465 ctrl->nand_fc = devm_ioremap_resource(dev, res); 2466 if (IS_ERR(ctrl->nand_fc)) { 2467 ret = PTR_ERR(ctrl->nand_fc); 2468 goto err; 2469 } 2470 } else { 2471 ctrl->nand_fc = ctrl->nand_base + 2472 ctrl->reg_offsets[BRCMNAND_FC_BASE]; 2473 } 2474 2475 /* FLASH_DMA */ 2476 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma"); 2477 if (res) { 2478 ctrl->flash_dma_base = devm_ioremap_resource(dev, res); 2479 if (IS_ERR(ctrl->flash_dma_base)) { 2480 ret = PTR_ERR(ctrl->flash_dma_base); 2481 goto err; 2482 } 2483 2484 flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */ 2485 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2486 2487 /* Allocate descriptor(s) */ 2488 ctrl->dma_desc = dmam_alloc_coherent(dev, 2489 sizeof(*ctrl->dma_desc), 2490 &ctrl->dma_pa, GFP_KERNEL); 2491 if (!ctrl->dma_desc) { 2492 ret = -ENOMEM; 2493 goto err; 2494 } 2495 2496 ctrl->dma_irq = platform_get_irq(pdev, 1); 2497 if ((int)ctrl->dma_irq < 0) { 2498 dev_err(dev, "missing FLASH_DMA IRQ\n"); 2499 ret = -ENODEV; 2500 goto err; 2501 } 2502 2503 ret = devm_request_irq(dev, ctrl->dma_irq, 2504 brcmnand_dma_irq, 0, DRV_NAME, 2505 ctrl); 2506 if (ret < 0) { 2507 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2508 ctrl->dma_irq, ret); 2509 goto err; 2510 } 2511 2512 dev_info(dev, "enabling FLASH_DMA\n"); 2513 } 2514 2515 /* Disable automatic device ID config, direct addressing */ 2516 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, 2517 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0); 2518 /* Disable XOR addressing */ 2519 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0); 2520 2521 if (ctrl->features & BRCMNAND_HAS_WP) { 2522 /* Permanently disable write protection */ 2523 if (wp_on == 2) 2524 brcmnand_set_wp(ctrl, false); 2525 } else { 2526 wp_on = 0; 2527 } 2528 2529 /* IRQ */ 2530 ctrl->irq = platform_get_irq(pdev, 0); 2531 if ((int)ctrl->irq < 0) { 2532 dev_err(dev, "no IRQ defined\n"); 2533 ret = -ENODEV; 2534 goto err; 2535 } 2536 2537 /* 2538 * Some SoCs integrate this controller (e.g., its interrupt bits) in 2539 * interesting ways 2540 */ 2541 if (soc) { 2542 ctrl->soc = soc; 2543 2544 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0, 2545 DRV_NAME, ctrl); 2546 2547 /* Enable interrupt */ 2548 ctrl->soc->ctlrdy_ack(ctrl->soc); 2549 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2550 } else { 2551 /* Use standard interrupt infrastructure */ 2552 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0, 2553 DRV_NAME, ctrl); 2554 } 2555 if (ret < 0) { 2556 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2557 ctrl->irq, ret); 2558 goto err; 2559 } 2560 2561 for_each_available_child_of_node(dn, child) { 2562 if (of_device_is_compatible(child, "brcm,nandcs")) { 2563 struct brcmnand_host *host; 2564 2565 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2566 if (!host) { 2567 of_node_put(child); 2568 ret = -ENOMEM; 2569 goto err; 2570 } 2571 host->pdev = pdev; 2572 host->ctrl = ctrl; 2573 2574 ret = brcmnand_init_cs(host, child); 2575 if (ret) { 2576 devm_kfree(dev, host); 2577 continue; /* Try all chip-selects */ 2578 } 2579 2580 list_add_tail(&host->node, &ctrl->host_list); 2581 } 2582 } 2583 2584 /* No chip-selects could initialize properly */ 2585 if (list_empty(&ctrl->host_list)) { 2586 ret = -ENODEV; 2587 goto err; 2588 } 2589 2590 return 0; 2591 2592 err: 2593 clk_disable_unprepare(ctrl->clk); 2594 return ret; 2595 2596 } 2597 EXPORT_SYMBOL_GPL(brcmnand_probe); 2598 2599 int brcmnand_remove(struct platform_device *pdev) 2600 { 2601 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); 2602 struct brcmnand_host *host; 2603 2604 list_for_each_entry(host, &ctrl->host_list, node) 2605 nand_release(&host->chip); 2606 2607 clk_disable_unprepare(ctrl->clk); 2608 2609 dev_set_drvdata(&pdev->dev, NULL); 2610 2611 return 0; 2612 } 2613 EXPORT_SYMBOL_GPL(brcmnand_remove); 2614 2615 MODULE_LICENSE("GPL v2"); 2616 MODULE_AUTHOR("Kevin Cernekee"); 2617 MODULE_AUTHOR("Brian Norris"); 2618 MODULE_DESCRIPTION("NAND driver for Broadcom chips"); 2619 MODULE_ALIAS("platform:brcmnand"); 2620