1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2010-2015 Broadcom Corporation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/version.h> 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/platform_device.h> 13 #include <linux/err.h> 14 #include <linux/completion.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/ioport.h> 19 #include <linux/bug.h> 20 #include <linux/kernel.h> 21 #include <linux/bitops.h> 22 #include <linux/mm.h> 23 #include <linux/mtd/mtd.h> 24 #include <linux/mtd/rawnand.h> 25 #include <linux/mtd/partitions.h> 26 #include <linux/of.h> 27 #include <linux/of_platform.h> 28 #include <linux/slab.h> 29 #include <linux/list.h> 30 #include <linux/log2.h> 31 32 #include "brcmnand.h" 33 34 /* 35 * This flag controls if WP stays on between erase/write commands to mitigate 36 * flash corruption due to power glitches. Values: 37 * 0: NAND_WP is not used or not available 38 * 1: NAND_WP is set by default, cleared for erase/write operations 39 * 2: NAND_WP is always cleared 40 */ 41 static int wp_on = 1; 42 module_param(wp_on, int, 0444); 43 44 /*********************************************************************** 45 * Definitions 46 ***********************************************************************/ 47 48 #define DRV_NAME "brcmnand" 49 50 #define CMD_NULL 0x00 51 #define CMD_PAGE_READ 0x01 52 #define CMD_SPARE_AREA_READ 0x02 53 #define CMD_STATUS_READ 0x03 54 #define CMD_PROGRAM_PAGE 0x04 55 #define CMD_PROGRAM_SPARE_AREA 0x05 56 #define CMD_COPY_BACK 0x06 57 #define CMD_DEVICE_ID_READ 0x07 58 #define CMD_BLOCK_ERASE 0x08 59 #define CMD_FLASH_RESET 0x09 60 #define CMD_BLOCKS_LOCK 0x0a 61 #define CMD_BLOCKS_LOCK_DOWN 0x0b 62 #define CMD_BLOCKS_UNLOCK 0x0c 63 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d 64 #define CMD_PARAMETER_READ 0x0e 65 #define CMD_PARAMETER_CHANGE_COL 0x0f 66 #define CMD_LOW_LEVEL_OP 0x10 67 68 struct brcm_nand_dma_desc { 69 u32 next_desc; 70 u32 next_desc_ext; 71 u32 cmd_irq; 72 u32 dram_addr; 73 u32 dram_addr_ext; 74 u32 tfr_len; 75 u32 total_len; 76 u32 flash_addr; 77 u32 flash_addr_ext; 78 u32 cs; 79 u32 pad2[5]; 80 u32 status_valid; 81 } __packed; 82 83 /* Bitfields for brcm_nand_dma_desc::status_valid */ 84 #define FLASH_DMA_ECC_ERROR (1 << 8) 85 #define FLASH_DMA_CORR_ERROR (1 << 9) 86 87 /* Bitfields for DMA_MODE */ 88 #define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */ 89 #define FLASH_DMA_MODE_MODE BIT(0) /* link list */ 90 #define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \ 91 FLASH_DMA_MODE_MODE) 92 93 /* 512B flash cache in the NAND controller HW */ 94 #define FC_SHIFT 9U 95 #define FC_BYTES 512U 96 #define FC_WORDS (FC_BYTES >> 2) 97 98 #define BRCMNAND_MIN_PAGESIZE 512 99 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) 100 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) 101 102 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) 103 #define NAND_POLL_STATUS_TIMEOUT_MS 100 104 105 /* flash_dma registers */ 106 enum flash_dma_reg { 107 FLASH_DMA_REVISION = 0, 108 FLASH_DMA_FIRST_DESC, 109 FLASH_DMA_FIRST_DESC_EXT, 110 FLASH_DMA_CTRL, 111 FLASH_DMA_MODE, 112 FLASH_DMA_STATUS, 113 FLASH_DMA_INTERRUPT_DESC, 114 FLASH_DMA_INTERRUPT_DESC_EXT, 115 FLASH_DMA_ERROR_STATUS, 116 FLASH_DMA_CURRENT_DESC, 117 FLASH_DMA_CURRENT_DESC_EXT, 118 }; 119 120 /* flash_dma registers v1*/ 121 static const u16 flash_dma_regs_v1[] = { 122 [FLASH_DMA_REVISION] = 0x00, 123 [FLASH_DMA_FIRST_DESC] = 0x04, 124 [FLASH_DMA_FIRST_DESC_EXT] = 0x08, 125 [FLASH_DMA_CTRL] = 0x0c, 126 [FLASH_DMA_MODE] = 0x10, 127 [FLASH_DMA_STATUS] = 0x14, 128 [FLASH_DMA_INTERRUPT_DESC] = 0x18, 129 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c, 130 [FLASH_DMA_ERROR_STATUS] = 0x20, 131 [FLASH_DMA_CURRENT_DESC] = 0x24, 132 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28, 133 }; 134 135 /* flash_dma registers v4 */ 136 static const u16 flash_dma_regs_v4[] = { 137 [FLASH_DMA_REVISION] = 0x00, 138 [FLASH_DMA_FIRST_DESC] = 0x08, 139 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c, 140 [FLASH_DMA_CTRL] = 0x10, 141 [FLASH_DMA_MODE] = 0x14, 142 [FLASH_DMA_STATUS] = 0x18, 143 [FLASH_DMA_INTERRUPT_DESC] = 0x20, 144 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24, 145 [FLASH_DMA_ERROR_STATUS] = 0x28, 146 [FLASH_DMA_CURRENT_DESC] = 0x30, 147 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34, 148 }; 149 150 /* Controller feature flags */ 151 enum { 152 BRCMNAND_HAS_1K_SECTORS = BIT(0), 153 BRCMNAND_HAS_PREFETCH = BIT(1), 154 BRCMNAND_HAS_CACHE_MODE = BIT(2), 155 BRCMNAND_HAS_WP = BIT(3), 156 }; 157 158 struct brcmnand_controller { 159 struct device *dev; 160 struct nand_controller controller; 161 void __iomem *nand_base; 162 void __iomem *nand_fc; /* flash cache */ 163 void __iomem *flash_dma_base; 164 unsigned int irq; 165 unsigned int dma_irq; 166 int nand_version; 167 168 /* Some SoCs provide custom interrupt status register(s) */ 169 struct brcmnand_soc *soc; 170 171 /* Some SoCs have a gateable clock for the controller */ 172 struct clk *clk; 173 174 int cmd_pending; 175 bool dma_pending; 176 struct completion done; 177 struct completion dma_done; 178 179 /* List of NAND hosts (one for each chip-select) */ 180 struct list_head host_list; 181 182 /* flash_dma reg */ 183 const u16 *flash_dma_offsets; 184 struct brcm_nand_dma_desc *dma_desc; 185 dma_addr_t dma_pa; 186 187 /* in-memory cache of the FLASH_CACHE, used only for some commands */ 188 u8 flash_cache[FC_BYTES]; 189 190 /* Controller revision details */ 191 const u16 *reg_offsets; 192 unsigned int reg_spacing; /* between CS1, CS2, ... regs */ 193 const u8 *cs_offsets; /* within each chip-select */ 194 const u8 *cs0_offsets; /* within CS0, if different */ 195 unsigned int max_block_size; 196 const unsigned int *block_sizes; 197 unsigned int max_page_size; 198 const unsigned int *page_sizes; 199 unsigned int max_oob; 200 u32 features; 201 202 /* for low-power standby/resume only */ 203 u32 nand_cs_nand_select; 204 u32 nand_cs_nand_xor; 205 u32 corr_stat_threshold; 206 u32 flash_dma_mode; 207 bool pio_poll_mode; 208 }; 209 210 struct brcmnand_cfg { 211 u64 device_size; 212 unsigned int block_size; 213 unsigned int page_size; 214 unsigned int spare_area_size; 215 unsigned int device_width; 216 unsigned int col_adr_bytes; 217 unsigned int blk_adr_bytes; 218 unsigned int ful_adr_bytes; 219 unsigned int sector_size_1k; 220 unsigned int ecc_level; 221 /* use for low-power standby/resume only */ 222 u32 acc_control; 223 u32 config; 224 u32 config_ext; 225 u32 timing_1; 226 u32 timing_2; 227 }; 228 229 struct brcmnand_host { 230 struct list_head node; 231 232 struct nand_chip chip; 233 struct platform_device *pdev; 234 int cs; 235 236 unsigned int last_cmd; 237 unsigned int last_byte; 238 u64 last_addr; 239 struct brcmnand_cfg hwcfg; 240 struct brcmnand_controller *ctrl; 241 }; 242 243 enum brcmnand_reg { 244 BRCMNAND_CMD_START = 0, 245 BRCMNAND_CMD_EXT_ADDRESS, 246 BRCMNAND_CMD_ADDRESS, 247 BRCMNAND_INTFC_STATUS, 248 BRCMNAND_CS_SELECT, 249 BRCMNAND_CS_XOR, 250 BRCMNAND_LL_OP, 251 BRCMNAND_CS0_BASE, 252 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */ 253 BRCMNAND_CORR_THRESHOLD, 254 BRCMNAND_CORR_THRESHOLD_EXT, 255 BRCMNAND_UNCORR_COUNT, 256 BRCMNAND_CORR_COUNT, 257 BRCMNAND_CORR_EXT_ADDR, 258 BRCMNAND_CORR_ADDR, 259 BRCMNAND_UNCORR_EXT_ADDR, 260 BRCMNAND_UNCORR_ADDR, 261 BRCMNAND_SEMAPHORE, 262 BRCMNAND_ID, 263 BRCMNAND_ID_EXT, 264 BRCMNAND_LL_RDATA, 265 BRCMNAND_OOB_READ_BASE, 266 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */ 267 BRCMNAND_OOB_WRITE_BASE, 268 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */ 269 BRCMNAND_FC_BASE, 270 }; 271 272 /* BRCMNAND v4.0 */ 273 static const u16 brcmnand_regs_v40[] = { 274 [BRCMNAND_CMD_START] = 0x04, 275 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 276 [BRCMNAND_CMD_ADDRESS] = 0x0c, 277 [BRCMNAND_INTFC_STATUS] = 0x6c, 278 [BRCMNAND_CS_SELECT] = 0x14, 279 [BRCMNAND_CS_XOR] = 0x18, 280 [BRCMNAND_LL_OP] = 0x178, 281 [BRCMNAND_CS0_BASE] = 0x40, 282 [BRCMNAND_CS1_BASE] = 0xd0, 283 [BRCMNAND_CORR_THRESHOLD] = 0x84, 284 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 285 [BRCMNAND_UNCORR_COUNT] = 0, 286 [BRCMNAND_CORR_COUNT] = 0, 287 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 288 [BRCMNAND_CORR_ADDR] = 0x74, 289 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 290 [BRCMNAND_UNCORR_ADDR] = 0x7c, 291 [BRCMNAND_SEMAPHORE] = 0x58, 292 [BRCMNAND_ID] = 0x60, 293 [BRCMNAND_ID_EXT] = 0x64, 294 [BRCMNAND_LL_RDATA] = 0x17c, 295 [BRCMNAND_OOB_READ_BASE] = 0x20, 296 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 297 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 298 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 299 [BRCMNAND_FC_BASE] = 0x200, 300 }; 301 302 /* BRCMNAND v5.0 */ 303 static const u16 brcmnand_regs_v50[] = { 304 [BRCMNAND_CMD_START] = 0x04, 305 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 306 [BRCMNAND_CMD_ADDRESS] = 0x0c, 307 [BRCMNAND_INTFC_STATUS] = 0x6c, 308 [BRCMNAND_CS_SELECT] = 0x14, 309 [BRCMNAND_CS_XOR] = 0x18, 310 [BRCMNAND_LL_OP] = 0x178, 311 [BRCMNAND_CS0_BASE] = 0x40, 312 [BRCMNAND_CS1_BASE] = 0xd0, 313 [BRCMNAND_CORR_THRESHOLD] = 0x84, 314 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 315 [BRCMNAND_UNCORR_COUNT] = 0, 316 [BRCMNAND_CORR_COUNT] = 0, 317 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 318 [BRCMNAND_CORR_ADDR] = 0x74, 319 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 320 [BRCMNAND_UNCORR_ADDR] = 0x7c, 321 [BRCMNAND_SEMAPHORE] = 0x58, 322 [BRCMNAND_ID] = 0x60, 323 [BRCMNAND_ID_EXT] = 0x64, 324 [BRCMNAND_LL_RDATA] = 0x17c, 325 [BRCMNAND_OOB_READ_BASE] = 0x20, 326 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 327 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 328 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140, 329 [BRCMNAND_FC_BASE] = 0x200, 330 }; 331 332 /* BRCMNAND v6.0 - v7.1 */ 333 static const u16 brcmnand_regs_v60[] = { 334 [BRCMNAND_CMD_START] = 0x04, 335 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 336 [BRCMNAND_CMD_ADDRESS] = 0x0c, 337 [BRCMNAND_INTFC_STATUS] = 0x14, 338 [BRCMNAND_CS_SELECT] = 0x18, 339 [BRCMNAND_CS_XOR] = 0x1c, 340 [BRCMNAND_LL_OP] = 0x20, 341 [BRCMNAND_CS0_BASE] = 0x50, 342 [BRCMNAND_CS1_BASE] = 0, 343 [BRCMNAND_CORR_THRESHOLD] = 0xc0, 344 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4, 345 [BRCMNAND_UNCORR_COUNT] = 0xfc, 346 [BRCMNAND_CORR_COUNT] = 0x100, 347 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 348 [BRCMNAND_CORR_ADDR] = 0x110, 349 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 350 [BRCMNAND_UNCORR_ADDR] = 0x118, 351 [BRCMNAND_SEMAPHORE] = 0x150, 352 [BRCMNAND_ID] = 0x194, 353 [BRCMNAND_ID_EXT] = 0x198, 354 [BRCMNAND_LL_RDATA] = 0x19c, 355 [BRCMNAND_OOB_READ_BASE] = 0x200, 356 [BRCMNAND_OOB_READ_10_BASE] = 0, 357 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 358 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 359 [BRCMNAND_FC_BASE] = 0x400, 360 }; 361 362 /* BRCMNAND v7.1 */ 363 static const u16 brcmnand_regs_v71[] = { 364 [BRCMNAND_CMD_START] = 0x04, 365 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 366 [BRCMNAND_CMD_ADDRESS] = 0x0c, 367 [BRCMNAND_INTFC_STATUS] = 0x14, 368 [BRCMNAND_CS_SELECT] = 0x18, 369 [BRCMNAND_CS_XOR] = 0x1c, 370 [BRCMNAND_LL_OP] = 0x20, 371 [BRCMNAND_CS0_BASE] = 0x50, 372 [BRCMNAND_CS1_BASE] = 0, 373 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 374 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 375 [BRCMNAND_UNCORR_COUNT] = 0xfc, 376 [BRCMNAND_CORR_COUNT] = 0x100, 377 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 378 [BRCMNAND_CORR_ADDR] = 0x110, 379 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 380 [BRCMNAND_UNCORR_ADDR] = 0x118, 381 [BRCMNAND_SEMAPHORE] = 0x150, 382 [BRCMNAND_ID] = 0x194, 383 [BRCMNAND_ID_EXT] = 0x198, 384 [BRCMNAND_LL_RDATA] = 0x19c, 385 [BRCMNAND_OOB_READ_BASE] = 0x200, 386 [BRCMNAND_OOB_READ_10_BASE] = 0, 387 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 388 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 389 [BRCMNAND_FC_BASE] = 0x400, 390 }; 391 392 /* BRCMNAND v7.2 */ 393 static const u16 brcmnand_regs_v72[] = { 394 [BRCMNAND_CMD_START] = 0x04, 395 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 396 [BRCMNAND_CMD_ADDRESS] = 0x0c, 397 [BRCMNAND_INTFC_STATUS] = 0x14, 398 [BRCMNAND_CS_SELECT] = 0x18, 399 [BRCMNAND_CS_XOR] = 0x1c, 400 [BRCMNAND_LL_OP] = 0x20, 401 [BRCMNAND_CS0_BASE] = 0x50, 402 [BRCMNAND_CS1_BASE] = 0, 403 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 404 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 405 [BRCMNAND_UNCORR_COUNT] = 0xfc, 406 [BRCMNAND_CORR_COUNT] = 0x100, 407 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 408 [BRCMNAND_CORR_ADDR] = 0x110, 409 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 410 [BRCMNAND_UNCORR_ADDR] = 0x118, 411 [BRCMNAND_SEMAPHORE] = 0x150, 412 [BRCMNAND_ID] = 0x194, 413 [BRCMNAND_ID_EXT] = 0x198, 414 [BRCMNAND_LL_RDATA] = 0x19c, 415 [BRCMNAND_OOB_READ_BASE] = 0x200, 416 [BRCMNAND_OOB_READ_10_BASE] = 0, 417 [BRCMNAND_OOB_WRITE_BASE] = 0x400, 418 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 419 [BRCMNAND_FC_BASE] = 0x600, 420 }; 421 422 enum brcmnand_cs_reg { 423 BRCMNAND_CS_CFG_EXT = 0, 424 BRCMNAND_CS_CFG, 425 BRCMNAND_CS_ACC_CONTROL, 426 BRCMNAND_CS_TIMING1, 427 BRCMNAND_CS_TIMING2, 428 }; 429 430 /* Per chip-select offsets for v7.1 */ 431 static const u8 brcmnand_cs_offsets_v71[] = { 432 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 433 [BRCMNAND_CS_CFG_EXT] = 0x04, 434 [BRCMNAND_CS_CFG] = 0x08, 435 [BRCMNAND_CS_TIMING1] = 0x0c, 436 [BRCMNAND_CS_TIMING2] = 0x10, 437 }; 438 439 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */ 440 static const u8 brcmnand_cs_offsets[] = { 441 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 442 [BRCMNAND_CS_CFG_EXT] = 0x04, 443 [BRCMNAND_CS_CFG] = 0x04, 444 [BRCMNAND_CS_TIMING1] = 0x08, 445 [BRCMNAND_CS_TIMING2] = 0x0c, 446 }; 447 448 /* Per chip-select offset for <= v5.0 on CS0 only */ 449 static const u8 brcmnand_cs_offsets_cs0[] = { 450 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 451 [BRCMNAND_CS_CFG_EXT] = 0x08, 452 [BRCMNAND_CS_CFG] = 0x08, 453 [BRCMNAND_CS_TIMING1] = 0x10, 454 [BRCMNAND_CS_TIMING2] = 0x14, 455 }; 456 457 /* 458 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had 459 * one config register, but once the bitfields overflowed, newer controllers 460 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around. 461 */ 462 enum { 463 CFG_BLK_ADR_BYTES_SHIFT = 8, 464 CFG_COL_ADR_BYTES_SHIFT = 12, 465 CFG_FUL_ADR_BYTES_SHIFT = 16, 466 CFG_BUS_WIDTH_SHIFT = 23, 467 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT), 468 CFG_DEVICE_SIZE_SHIFT = 24, 469 470 /* Only for pre-v7.1 (with no CFG_EXT register) */ 471 CFG_PAGE_SIZE_SHIFT = 20, 472 CFG_BLK_SIZE_SHIFT = 28, 473 474 /* Only for v7.1+ (with CFG_EXT register) */ 475 CFG_EXT_PAGE_SIZE_SHIFT = 0, 476 CFG_EXT_BLK_SIZE_SHIFT = 4, 477 }; 478 479 /* BRCMNAND_INTFC_STATUS */ 480 enum { 481 INTFC_FLASH_STATUS = GENMASK(7, 0), 482 483 INTFC_ERASED = BIT(27), 484 INTFC_OOB_VALID = BIT(28), 485 INTFC_CACHE_VALID = BIT(29), 486 INTFC_FLASH_READY = BIT(30), 487 INTFC_CTLR_READY = BIT(31), 488 }; 489 490 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) 491 { 492 return brcmnand_readl(ctrl->nand_base + offs); 493 } 494 495 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, 496 u32 val) 497 { 498 brcmnand_writel(val, ctrl->nand_base + offs); 499 } 500 501 static int brcmnand_revision_init(struct brcmnand_controller *ctrl) 502 { 503 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; 504 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; 505 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 }; 506 507 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; 508 509 /* Only support v4.0+? */ 510 if (ctrl->nand_version < 0x0400) { 511 dev_err(ctrl->dev, "version %#x not supported\n", 512 ctrl->nand_version); 513 return -ENODEV; 514 } 515 516 /* Register offsets */ 517 if (ctrl->nand_version >= 0x0702) 518 ctrl->reg_offsets = brcmnand_regs_v72; 519 else if (ctrl->nand_version == 0x0701) 520 ctrl->reg_offsets = brcmnand_regs_v71; 521 else if (ctrl->nand_version >= 0x0600) 522 ctrl->reg_offsets = brcmnand_regs_v60; 523 else if (ctrl->nand_version >= 0x0500) 524 ctrl->reg_offsets = brcmnand_regs_v50; 525 else if (ctrl->nand_version >= 0x0400) 526 ctrl->reg_offsets = brcmnand_regs_v40; 527 528 /* Chip-select stride */ 529 if (ctrl->nand_version >= 0x0701) 530 ctrl->reg_spacing = 0x14; 531 else 532 ctrl->reg_spacing = 0x10; 533 534 /* Per chip-select registers */ 535 if (ctrl->nand_version >= 0x0701) { 536 ctrl->cs_offsets = brcmnand_cs_offsets_v71; 537 } else { 538 ctrl->cs_offsets = brcmnand_cs_offsets; 539 540 /* v5.0 and earlier has a different CS0 offset layout */ 541 if (ctrl->nand_version <= 0x0500) 542 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; 543 } 544 545 /* Page / block sizes */ 546 if (ctrl->nand_version >= 0x0701) { 547 /* >= v7.1 use nice power-of-2 values! */ 548 ctrl->max_page_size = 16 * 1024; 549 ctrl->max_block_size = 2 * 1024 * 1024; 550 } else { 551 ctrl->page_sizes = page_sizes; 552 if (ctrl->nand_version >= 0x0600) 553 ctrl->block_sizes = block_sizes_v6; 554 else 555 ctrl->block_sizes = block_sizes_v4; 556 557 if (ctrl->nand_version < 0x0400) { 558 ctrl->max_page_size = 4096; 559 ctrl->max_block_size = 512 * 1024; 560 } 561 } 562 563 /* Maximum spare area sector size (per 512B) */ 564 if (ctrl->nand_version == 0x0702) 565 ctrl->max_oob = 128; 566 else if (ctrl->nand_version >= 0x0600) 567 ctrl->max_oob = 64; 568 else if (ctrl->nand_version >= 0x0500) 569 ctrl->max_oob = 32; 570 else 571 ctrl->max_oob = 16; 572 573 /* v6.0 and newer (except v6.1) have prefetch support */ 574 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601) 575 ctrl->features |= BRCMNAND_HAS_PREFETCH; 576 577 /* 578 * v6.x has cache mode, but it's implemented differently. Ignore it for 579 * now. 580 */ 581 if (ctrl->nand_version >= 0x0700) 582 ctrl->features |= BRCMNAND_HAS_CACHE_MODE; 583 584 if (ctrl->nand_version >= 0x0500) 585 ctrl->features |= BRCMNAND_HAS_1K_SECTORS; 586 587 if (ctrl->nand_version >= 0x0700) 588 ctrl->features |= BRCMNAND_HAS_WP; 589 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) 590 ctrl->features |= BRCMNAND_HAS_WP; 591 592 return 0; 593 } 594 595 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl) 596 { 597 /* flash_dma register offsets */ 598 if (ctrl->nand_version >= 0x0703) 599 ctrl->flash_dma_offsets = flash_dma_regs_v4; 600 else 601 ctrl->flash_dma_offsets = flash_dma_regs_v1; 602 } 603 604 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, 605 enum brcmnand_reg reg) 606 { 607 u16 offs = ctrl->reg_offsets[reg]; 608 609 if (offs) 610 return nand_readreg(ctrl, offs); 611 else 612 return 0; 613 } 614 615 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl, 616 enum brcmnand_reg reg, u32 val) 617 { 618 u16 offs = ctrl->reg_offsets[reg]; 619 620 if (offs) 621 nand_writereg(ctrl, offs, val); 622 } 623 624 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, 625 enum brcmnand_reg reg, u32 mask, unsigned 626 int shift, u32 val) 627 { 628 u32 tmp = brcmnand_read_reg(ctrl, reg); 629 630 tmp &= ~mask; 631 tmp |= val << shift; 632 brcmnand_write_reg(ctrl, reg, tmp); 633 } 634 635 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word) 636 { 637 return __raw_readl(ctrl->nand_fc + word * 4); 638 } 639 640 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, 641 int word, u32 val) 642 { 643 __raw_writel(val, ctrl->nand_fc + word * 4); 644 } 645 646 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl) 647 { 648 649 /* Clear error addresses */ 650 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); 651 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); 652 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); 653 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); 654 } 655 656 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl) 657 { 658 u64 err_addr; 659 660 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR); 661 err_addr |= ((u64)(brcmnand_read_reg(ctrl, 662 BRCMNAND_UNCORR_EXT_ADDR) 663 & 0xffff) << 32); 664 665 return err_addr; 666 } 667 668 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl) 669 { 670 u64 err_addr; 671 672 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR); 673 err_addr |= ((u64)(brcmnand_read_reg(ctrl, 674 BRCMNAND_CORR_EXT_ADDR) 675 & 0xffff) << 32); 676 677 return err_addr; 678 } 679 680 static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr) 681 { 682 struct nand_chip *chip = mtd_to_nand(mtd); 683 struct brcmnand_host *host = nand_get_controller_data(chip); 684 struct brcmnand_controller *ctrl = host->ctrl; 685 686 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 687 (host->cs << 16) | ((addr >> 32) & 0xffff)); 688 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); 689 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 690 lower_32_bits(addr)); 691 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 692 } 693 694 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, 695 enum brcmnand_cs_reg reg) 696 { 697 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE]; 698 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE]; 699 u8 cs_offs; 700 701 if (cs == 0 && ctrl->cs0_offsets) 702 cs_offs = ctrl->cs0_offsets[reg]; 703 else 704 cs_offs = ctrl->cs_offsets[reg]; 705 706 if (cs && offs_cs1) 707 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; 708 709 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; 710 } 711 712 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl) 713 { 714 if (ctrl->nand_version < 0x0600) 715 return 1; 716 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT); 717 } 718 719 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) 720 { 721 struct brcmnand_controller *ctrl = host->ctrl; 722 unsigned int shift = 0, bits; 723 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; 724 int cs = host->cs; 725 726 if (ctrl->nand_version == 0x0702) 727 bits = 7; 728 else if (ctrl->nand_version >= 0x0600) 729 bits = 6; 730 else if (ctrl->nand_version >= 0x0500) 731 bits = 5; 732 else 733 bits = 4; 734 735 if (ctrl->nand_version >= 0x0702) { 736 if (cs >= 4) 737 reg = BRCMNAND_CORR_THRESHOLD_EXT; 738 shift = (cs % 4) * bits; 739 } else if (ctrl->nand_version >= 0x0600) { 740 if (cs >= 5) 741 reg = BRCMNAND_CORR_THRESHOLD_EXT; 742 shift = (cs % 5) * bits; 743 } 744 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val); 745 } 746 747 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) 748 { 749 if (ctrl->nand_version < 0x0602) 750 return 24; 751 return 0; 752 } 753 754 /*********************************************************************** 755 * NAND ACC CONTROL bitfield 756 * 757 * Some bits have remained constant throughout hardware revision, while 758 * others have shifted around. 759 ***********************************************************************/ 760 761 /* Constant for all versions (where supported) */ 762 enum { 763 /* See BRCMNAND_HAS_CACHE_MODE */ 764 ACC_CONTROL_CACHE_MODE = BIT(22), 765 766 /* See BRCMNAND_HAS_PREFETCH */ 767 ACC_CONTROL_PREFETCH = BIT(23), 768 769 ACC_CONTROL_PAGE_HIT = BIT(24), 770 ACC_CONTROL_WR_PREEMPT = BIT(25), 771 ACC_CONTROL_PARTIAL_PAGE = BIT(26), 772 ACC_CONTROL_RD_ERASED = BIT(27), 773 ACC_CONTROL_FAST_PGM_RDIN = BIT(28), 774 ACC_CONTROL_WR_ECC = BIT(30), 775 ACC_CONTROL_RD_ECC = BIT(31), 776 }; 777 778 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) 779 { 780 if (ctrl->nand_version == 0x0702) 781 return GENMASK(7, 0); 782 else if (ctrl->nand_version >= 0x0600) 783 return GENMASK(6, 0); 784 else 785 return GENMASK(5, 0); 786 } 787 788 #define NAND_ACC_CONTROL_ECC_SHIFT 16 789 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 790 791 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) 792 { 793 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; 794 795 mask <<= NAND_ACC_CONTROL_ECC_SHIFT; 796 797 /* v7.2 includes additional ECC levels */ 798 if (ctrl->nand_version >= 0x0702) 799 mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; 800 801 return mask; 802 } 803 804 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) 805 { 806 struct brcmnand_controller *ctrl = host->ctrl; 807 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 808 u32 acc_control = nand_readreg(ctrl, offs); 809 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC; 810 811 if (en) { 812 acc_control |= ecc_flags; /* enable RD/WR ECC */ 813 acc_control |= host->hwcfg.ecc_level 814 << NAND_ACC_CONTROL_ECC_SHIFT; 815 } else { 816 acc_control &= ~ecc_flags; /* disable RD/WR ECC */ 817 acc_control &= ~brcmnand_ecc_level_mask(ctrl); 818 } 819 820 nand_writereg(ctrl, offs, acc_control); 821 } 822 823 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) 824 { 825 if (ctrl->nand_version >= 0x0702) 826 return 9; 827 else if (ctrl->nand_version >= 0x0600) 828 return 7; 829 else if (ctrl->nand_version >= 0x0500) 830 return 6; 831 else 832 return -1; 833 } 834 835 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host) 836 { 837 struct brcmnand_controller *ctrl = host->ctrl; 838 int shift = brcmnand_sector_1k_shift(ctrl); 839 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 840 BRCMNAND_CS_ACC_CONTROL); 841 842 if (shift < 0) 843 return 0; 844 845 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; 846 } 847 848 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val) 849 { 850 struct brcmnand_controller *ctrl = host->ctrl; 851 int shift = brcmnand_sector_1k_shift(ctrl); 852 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 853 BRCMNAND_CS_ACC_CONTROL); 854 u32 tmp; 855 856 if (shift < 0) 857 return; 858 859 tmp = nand_readreg(ctrl, acc_control_offs); 860 tmp &= ~(1 << shift); 861 tmp |= (!!val) << shift; 862 nand_writereg(ctrl, acc_control_offs, tmp); 863 } 864 865 /*********************************************************************** 866 * CS_NAND_SELECT 867 ***********************************************************************/ 868 869 enum { 870 CS_SELECT_NAND_WP = BIT(29), 871 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), 872 }; 873 874 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, 875 u32 mask, u32 expected_val, 876 unsigned long timeout_ms) 877 { 878 unsigned long limit; 879 u32 val; 880 881 if (!timeout_ms) 882 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; 883 884 limit = jiffies + msecs_to_jiffies(timeout_ms); 885 do { 886 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 887 if ((val & mask) == expected_val) 888 return 0; 889 890 cpu_relax(); 891 } while (time_after(limit, jiffies)); 892 893 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", 894 expected_val, val & mask); 895 896 return -ETIMEDOUT; 897 } 898 899 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) 900 { 901 u32 val = en ? CS_SELECT_NAND_WP : 0; 902 903 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val); 904 } 905 906 /*********************************************************************** 907 * Flash DMA 908 ***********************************************************************/ 909 910 static inline bool has_flash_dma(struct brcmnand_controller *ctrl) 911 { 912 return ctrl->flash_dma_base; 913 } 914 915 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl) 916 { 917 if (ctrl->pio_poll_mode) 918 return; 919 920 if (has_flash_dma(ctrl)) { 921 ctrl->flash_dma_base = 0; 922 disable_irq(ctrl->dma_irq); 923 } 924 925 disable_irq(ctrl->irq); 926 ctrl->pio_poll_mode = true; 927 } 928 929 static inline bool flash_dma_buf_ok(const void *buf) 930 { 931 return buf && !is_vmalloc_addr(buf) && 932 likely(IS_ALIGNED((uintptr_t)buf, 4)); 933 } 934 935 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, 936 enum flash_dma_reg dma_reg, u32 val) 937 { 938 u16 offs = ctrl->flash_dma_offsets[dma_reg]; 939 940 brcmnand_writel(val, ctrl->flash_dma_base + offs); 941 } 942 943 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, 944 enum flash_dma_reg dma_reg) 945 { 946 u16 offs = ctrl->flash_dma_offsets[dma_reg]; 947 948 return brcmnand_readl(ctrl->flash_dma_base + offs); 949 } 950 951 /* Low-level operation types: command, address, write, or read */ 952 enum brcmnand_llop_type { 953 LL_OP_CMD, 954 LL_OP_ADDR, 955 LL_OP_WR, 956 LL_OP_RD, 957 }; 958 959 /*********************************************************************** 960 * Internal support functions 961 ***********************************************************************/ 962 963 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, 964 struct brcmnand_cfg *cfg) 965 { 966 if (ctrl->nand_version <= 0x0701) 967 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && 968 cfg->ecc_level == 15; 969 else 970 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && 971 cfg->ecc_level == 15) || 972 (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); 973 } 974 975 /* 976 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given 977 * the layout/configuration. 978 * Returns -ERRCODE on failure. 979 */ 980 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section, 981 struct mtd_oob_region *oobregion) 982 { 983 struct nand_chip *chip = mtd_to_nand(mtd); 984 struct brcmnand_host *host = nand_get_controller_data(chip); 985 struct brcmnand_cfg *cfg = &host->hwcfg; 986 int sas = cfg->spare_area_size << cfg->sector_size_1k; 987 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 988 989 if (section >= sectors) 990 return -ERANGE; 991 992 oobregion->offset = (section * sas) + 6; 993 oobregion->length = 3; 994 995 return 0; 996 } 997 998 static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, 999 struct mtd_oob_region *oobregion) 1000 { 1001 struct nand_chip *chip = mtd_to_nand(mtd); 1002 struct brcmnand_host *host = nand_get_controller_data(chip); 1003 struct brcmnand_cfg *cfg = &host->hwcfg; 1004 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1005 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1006 1007 if (section >= sectors * 2) 1008 return -ERANGE; 1009 1010 oobregion->offset = (section / 2) * sas; 1011 1012 if (section & 1) { 1013 oobregion->offset += 9; 1014 oobregion->length = 7; 1015 } else { 1016 oobregion->length = 6; 1017 1018 /* First sector of each page may have BBI */ 1019 if (!section) { 1020 /* 1021 * Small-page NAND use byte 6 for BBI while large-page 1022 * NAND use byte 0. 1023 */ 1024 if (cfg->page_size > 512) 1025 oobregion->offset++; 1026 oobregion->length--; 1027 } 1028 } 1029 1030 return 0; 1031 } 1032 1033 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = { 1034 .ecc = brcmnand_hamming_ooblayout_ecc, 1035 .free = brcmnand_hamming_ooblayout_free, 1036 }; 1037 1038 static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, 1039 struct mtd_oob_region *oobregion) 1040 { 1041 struct nand_chip *chip = mtd_to_nand(mtd); 1042 struct brcmnand_host *host = nand_get_controller_data(chip); 1043 struct brcmnand_cfg *cfg = &host->hwcfg; 1044 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1045 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1046 1047 if (section >= sectors) 1048 return -ERANGE; 1049 1050 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes; 1051 oobregion->length = chip->ecc.bytes; 1052 1053 return 0; 1054 } 1055 1056 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section, 1057 struct mtd_oob_region *oobregion) 1058 { 1059 struct nand_chip *chip = mtd_to_nand(mtd); 1060 struct brcmnand_host *host = nand_get_controller_data(chip); 1061 struct brcmnand_cfg *cfg = &host->hwcfg; 1062 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1063 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1064 1065 if (section >= sectors) 1066 return -ERANGE; 1067 1068 if (sas <= chip->ecc.bytes) 1069 return 0; 1070 1071 oobregion->offset = section * sas; 1072 oobregion->length = sas - chip->ecc.bytes; 1073 1074 if (!section) { 1075 oobregion->offset++; 1076 oobregion->length--; 1077 } 1078 1079 return 0; 1080 } 1081 1082 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section, 1083 struct mtd_oob_region *oobregion) 1084 { 1085 struct nand_chip *chip = mtd_to_nand(mtd); 1086 struct brcmnand_host *host = nand_get_controller_data(chip); 1087 struct brcmnand_cfg *cfg = &host->hwcfg; 1088 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1089 1090 if (section > 1 || sas - chip->ecc.bytes < 6 || 1091 (section && sas - chip->ecc.bytes == 6)) 1092 return -ERANGE; 1093 1094 if (!section) { 1095 oobregion->offset = 0; 1096 oobregion->length = 5; 1097 } else { 1098 oobregion->offset = 6; 1099 oobregion->length = sas - chip->ecc.bytes - 6; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = { 1106 .ecc = brcmnand_bch_ooblayout_ecc, 1107 .free = brcmnand_bch_ooblayout_free_lp, 1108 }; 1109 1110 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = { 1111 .ecc = brcmnand_bch_ooblayout_ecc, 1112 .free = brcmnand_bch_ooblayout_free_sp, 1113 }; 1114 1115 static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) 1116 { 1117 struct brcmnand_cfg *p = &host->hwcfg; 1118 struct mtd_info *mtd = nand_to_mtd(&host->chip); 1119 struct nand_ecc_ctrl *ecc = &host->chip.ecc; 1120 unsigned int ecc_level = p->ecc_level; 1121 int sas = p->spare_area_size << p->sector_size_1k; 1122 int sectors = p->page_size / (512 << p->sector_size_1k); 1123 1124 if (p->sector_size_1k) 1125 ecc_level <<= 1; 1126 1127 if (is_hamming_ecc(host->ctrl, p)) { 1128 ecc->bytes = 3 * sectors; 1129 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); 1130 return 0; 1131 } 1132 1133 /* 1134 * CONTROLLER_VERSION: 1135 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8) 1136 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) 1137 * But we will just be conservative. 1138 */ 1139 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); 1140 if (p->page_size == 512) 1141 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops); 1142 else 1143 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops); 1144 1145 if (ecc->bytes >= sas) { 1146 dev_err(&host->pdev->dev, 1147 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", 1148 ecc->bytes, sas); 1149 return -EINVAL; 1150 } 1151 1152 return 0; 1153 } 1154 1155 static void brcmnand_wp(struct mtd_info *mtd, int wp) 1156 { 1157 struct nand_chip *chip = mtd_to_nand(mtd); 1158 struct brcmnand_host *host = nand_get_controller_data(chip); 1159 struct brcmnand_controller *ctrl = host->ctrl; 1160 1161 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { 1162 static int old_wp = -1; 1163 int ret; 1164 1165 if (old_wp != wp) { 1166 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); 1167 old_wp = wp; 1168 } 1169 1170 /* 1171 * make sure ctrl/flash ready before and after 1172 * changing state of #WP pin 1173 */ 1174 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | 1175 NAND_STATUS_READY, 1176 NAND_CTRL_RDY | 1177 NAND_STATUS_READY, 0); 1178 if (ret) 1179 return; 1180 1181 brcmnand_set_wp(ctrl, wp); 1182 nand_status_op(chip, NULL); 1183 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ 1184 ret = bcmnand_ctrl_poll_status(ctrl, 1185 NAND_CTRL_RDY | 1186 NAND_STATUS_READY | 1187 NAND_STATUS_WP, 1188 NAND_CTRL_RDY | 1189 NAND_STATUS_READY | 1190 (wp ? 0 : NAND_STATUS_WP), 0); 1191 1192 if (ret) 1193 dev_err_ratelimited(&host->pdev->dev, 1194 "nand #WP expected %s\n", 1195 wp ? "on" : "off"); 1196 } 1197 } 1198 1199 /* Helper functions for reading and writing OOB registers */ 1200 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs) 1201 { 1202 u16 offset0, offset10, reg_offs; 1203 1204 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE]; 1205 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE]; 1206 1207 if (offs >= ctrl->max_oob) 1208 return 0x77; 1209 1210 if (offs >= 16 && offset10) 1211 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1212 else 1213 reg_offs = offset0 + (offs & ~0x03); 1214 1215 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3)); 1216 } 1217 1218 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs, 1219 u32 data) 1220 { 1221 u16 offset0, offset10, reg_offs; 1222 1223 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE]; 1224 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE]; 1225 1226 if (offs >= ctrl->max_oob) 1227 return; 1228 1229 if (offs >= 16 && offset10) 1230 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1231 else 1232 reg_offs = offset0 + (offs & ~0x03); 1233 1234 nand_writereg(ctrl, reg_offs, data); 1235 } 1236 1237 /* 1238 * read_oob_from_regs - read data from OOB registers 1239 * @ctrl: NAND controller 1240 * @i: sub-page sector index 1241 * @oob: buffer to read to 1242 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1243 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1244 */ 1245 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob, 1246 int sas, int sector_1k) 1247 { 1248 int tbytes = sas << sector_1k; 1249 int j; 1250 1251 /* Adjust OOB values for 1K sector size */ 1252 if (sector_1k && (i & 0x01)) 1253 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1254 tbytes = min_t(int, tbytes, ctrl->max_oob); 1255 1256 for (j = 0; j < tbytes; j++) 1257 oob[j] = oob_reg_read(ctrl, j); 1258 return tbytes; 1259 } 1260 1261 /* 1262 * write_oob_to_regs - write data to OOB registers 1263 * @i: sub-page sector index 1264 * @oob: buffer to write from 1265 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1266 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1267 */ 1268 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, 1269 const u8 *oob, int sas, int sector_1k) 1270 { 1271 int tbytes = sas << sector_1k; 1272 int j; 1273 1274 /* Adjust OOB values for 1K sector size */ 1275 if (sector_1k && (i & 0x01)) 1276 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1277 tbytes = min_t(int, tbytes, ctrl->max_oob); 1278 1279 for (j = 0; j < tbytes; j += 4) 1280 oob_reg_write(ctrl, j, 1281 (oob[j + 0] << 24) | 1282 (oob[j + 1] << 16) | 1283 (oob[j + 2] << 8) | 1284 (oob[j + 3] << 0)); 1285 return tbytes; 1286 } 1287 1288 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data) 1289 { 1290 struct brcmnand_controller *ctrl = data; 1291 1292 /* Discard all NAND_CTLRDY interrupts during DMA */ 1293 if (ctrl->dma_pending) 1294 return IRQ_HANDLED; 1295 1296 complete(&ctrl->done); 1297 return IRQ_HANDLED; 1298 } 1299 1300 /* Handle SoC-specific interrupt hardware */ 1301 static irqreturn_t brcmnand_irq(int irq, void *data) 1302 { 1303 struct brcmnand_controller *ctrl = data; 1304 1305 if (ctrl->soc->ctlrdy_ack(ctrl->soc)) 1306 return brcmnand_ctlrdy_irq(irq, data); 1307 1308 return IRQ_NONE; 1309 } 1310 1311 static irqreturn_t brcmnand_dma_irq(int irq, void *data) 1312 { 1313 struct brcmnand_controller *ctrl = data; 1314 1315 complete(&ctrl->dma_done); 1316 1317 return IRQ_HANDLED; 1318 } 1319 1320 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) 1321 { 1322 struct brcmnand_controller *ctrl = host->ctrl; 1323 int ret; 1324 u64 cmd_addr; 1325 1326 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 1327 1328 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); 1329 1330 BUG_ON(ctrl->cmd_pending != 0); 1331 ctrl->cmd_pending = cmd; 1332 1333 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 1334 WARN_ON(ret); 1335 1336 mb(); /* flush previous writes */ 1337 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, 1338 cmd << brcmnand_cmd_shift(ctrl)); 1339 } 1340 1341 /*********************************************************************** 1342 * NAND MTD API: read/program/erase 1343 ***********************************************************************/ 1344 1345 static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, 1346 unsigned int ctrl) 1347 { 1348 /* intentionally left blank */ 1349 } 1350 1351 static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) 1352 { 1353 struct brcmnand_host *host = nand_get_controller_data(chip); 1354 struct brcmnand_controller *ctrl = host->ctrl; 1355 struct mtd_info *mtd = nand_to_mtd(chip); 1356 bool err = false; 1357 int sts; 1358 1359 if (mtd->oops_panic_write) { 1360 /* switch to interrupt polling and PIO mode */ 1361 disable_ctrl_irqs(ctrl); 1362 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, 1363 NAND_CTRL_RDY, 0); 1364 err = (sts < 0) ? true : false; 1365 } else { 1366 unsigned long timeo = msecs_to_jiffies( 1367 NAND_POLL_STATUS_TIMEOUT_MS); 1368 /* wait for completion interrupt */ 1369 sts = wait_for_completion_timeout(&ctrl->done, timeo); 1370 err = (sts <= 0) ? true : false; 1371 } 1372 1373 return err; 1374 } 1375 1376 static int brcmnand_waitfunc(struct nand_chip *chip) 1377 { 1378 struct brcmnand_host *host = nand_get_controller_data(chip); 1379 struct brcmnand_controller *ctrl = host->ctrl; 1380 bool err = false; 1381 1382 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); 1383 if (ctrl->cmd_pending) 1384 err = brcmstb_nand_wait_for_completion(chip); 1385 1386 if (err) { 1387 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) 1388 >> brcmnand_cmd_shift(ctrl); 1389 1390 dev_err_ratelimited(ctrl->dev, 1391 "timeout waiting for command %#02x\n", cmd); 1392 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", 1393 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS)); 1394 } 1395 ctrl->cmd_pending = 0; 1396 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1397 INTFC_FLASH_STATUS; 1398 } 1399 1400 enum { 1401 LLOP_RE = BIT(16), 1402 LLOP_WE = BIT(17), 1403 LLOP_ALE = BIT(18), 1404 LLOP_CLE = BIT(19), 1405 LLOP_RETURN_IDLE = BIT(31), 1406 1407 LLOP_DATA_MASK = GENMASK(15, 0), 1408 }; 1409 1410 static int brcmnand_low_level_op(struct brcmnand_host *host, 1411 enum brcmnand_llop_type type, u32 data, 1412 bool last_op) 1413 { 1414 struct nand_chip *chip = &host->chip; 1415 struct brcmnand_controller *ctrl = host->ctrl; 1416 u32 tmp; 1417 1418 tmp = data & LLOP_DATA_MASK; 1419 switch (type) { 1420 case LL_OP_CMD: 1421 tmp |= LLOP_WE | LLOP_CLE; 1422 break; 1423 case LL_OP_ADDR: 1424 /* WE | ALE */ 1425 tmp |= LLOP_WE | LLOP_ALE; 1426 break; 1427 case LL_OP_WR: 1428 /* WE */ 1429 tmp |= LLOP_WE; 1430 break; 1431 case LL_OP_RD: 1432 /* RE */ 1433 tmp |= LLOP_RE; 1434 break; 1435 } 1436 if (last_op) 1437 /* RETURN_IDLE */ 1438 tmp |= LLOP_RETURN_IDLE; 1439 1440 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp); 1441 1442 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp); 1443 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP); 1444 1445 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP); 1446 return brcmnand_waitfunc(chip); 1447 } 1448 1449 static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, 1450 int column, int page_addr) 1451 { 1452 struct mtd_info *mtd = nand_to_mtd(chip); 1453 struct brcmnand_host *host = nand_get_controller_data(chip); 1454 struct brcmnand_controller *ctrl = host->ctrl; 1455 u64 addr = (u64)page_addr << chip->page_shift; 1456 int native_cmd = 0; 1457 1458 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM || 1459 command == NAND_CMD_RNDOUT) 1460 addr = (u64)column; 1461 /* Avoid propagating a negative, don't-care address */ 1462 else if (page_addr < 0) 1463 addr = 0; 1464 1465 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, 1466 (unsigned long long)addr); 1467 1468 host->last_cmd = command; 1469 host->last_byte = 0; 1470 host->last_addr = addr; 1471 1472 switch (command) { 1473 case NAND_CMD_RESET: 1474 native_cmd = CMD_FLASH_RESET; 1475 break; 1476 case NAND_CMD_STATUS: 1477 native_cmd = CMD_STATUS_READ; 1478 break; 1479 case NAND_CMD_READID: 1480 native_cmd = CMD_DEVICE_ID_READ; 1481 break; 1482 case NAND_CMD_READOOB: 1483 native_cmd = CMD_SPARE_AREA_READ; 1484 break; 1485 case NAND_CMD_ERASE1: 1486 native_cmd = CMD_BLOCK_ERASE; 1487 brcmnand_wp(mtd, 0); 1488 break; 1489 case NAND_CMD_PARAM: 1490 native_cmd = CMD_PARAMETER_READ; 1491 break; 1492 case NAND_CMD_SET_FEATURES: 1493 case NAND_CMD_GET_FEATURES: 1494 brcmnand_low_level_op(host, LL_OP_CMD, command, false); 1495 brcmnand_low_level_op(host, LL_OP_ADDR, column, false); 1496 break; 1497 case NAND_CMD_RNDOUT: 1498 native_cmd = CMD_PARAMETER_CHANGE_COL; 1499 addr &= ~((u64)(FC_BYTES - 1)); 1500 /* 1501 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0 1502 * NB: hwcfg.sector_size_1k may not be initialized yet 1503 */ 1504 if (brcmnand_get_sector_size_1k(host)) { 1505 host->hwcfg.sector_size_1k = 1506 brcmnand_get_sector_size_1k(host); 1507 brcmnand_set_sector_size_1k(host, 0); 1508 } 1509 break; 1510 } 1511 1512 if (!native_cmd) 1513 return; 1514 1515 brcmnand_set_cmd_addr(mtd, addr); 1516 brcmnand_send_cmd(host, native_cmd); 1517 brcmnand_waitfunc(chip); 1518 1519 if (native_cmd == CMD_PARAMETER_READ || 1520 native_cmd == CMD_PARAMETER_CHANGE_COL) { 1521 /* Copy flash cache word-wise */ 1522 u32 *flash_cache = (u32 *)ctrl->flash_cache; 1523 int i; 1524 1525 brcmnand_soc_data_bus_prepare(ctrl->soc, true); 1526 1527 /* 1528 * Must cache the FLASH_CACHE now, since changes in 1529 * SECTOR_SIZE_1K may invalidate it 1530 */ 1531 for (i = 0; i < FC_WORDS; i++) 1532 /* 1533 * Flash cache is big endian for parameter pages, at 1534 * least on STB SoCs 1535 */ 1536 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i)); 1537 1538 brcmnand_soc_data_bus_unprepare(ctrl->soc, true); 1539 1540 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */ 1541 if (host->hwcfg.sector_size_1k) 1542 brcmnand_set_sector_size_1k(host, 1543 host->hwcfg.sector_size_1k); 1544 } 1545 1546 /* Re-enable protection is necessary only after erase */ 1547 if (command == NAND_CMD_ERASE1) 1548 brcmnand_wp(mtd, 1); 1549 } 1550 1551 static uint8_t brcmnand_read_byte(struct nand_chip *chip) 1552 { 1553 struct brcmnand_host *host = nand_get_controller_data(chip); 1554 struct brcmnand_controller *ctrl = host->ctrl; 1555 uint8_t ret = 0; 1556 int addr, offs; 1557 1558 switch (host->last_cmd) { 1559 case NAND_CMD_READID: 1560 if (host->last_byte < 4) 1561 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> 1562 (24 - (host->last_byte << 3)); 1563 else if (host->last_byte < 8) 1564 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> 1565 (56 - (host->last_byte << 3)); 1566 break; 1567 1568 case NAND_CMD_READOOB: 1569 ret = oob_reg_read(ctrl, host->last_byte); 1570 break; 1571 1572 case NAND_CMD_STATUS: 1573 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1574 INTFC_FLASH_STATUS; 1575 if (wp_on) /* hide WP status */ 1576 ret |= NAND_STATUS_WP; 1577 break; 1578 1579 case NAND_CMD_PARAM: 1580 case NAND_CMD_RNDOUT: 1581 addr = host->last_addr + host->last_byte; 1582 offs = addr & (FC_BYTES - 1); 1583 1584 /* At FC_BYTES boundary, switch to next column */ 1585 if (host->last_byte > 0 && offs == 0) 1586 nand_change_read_column_op(chip, addr, NULL, 0, false); 1587 1588 ret = ctrl->flash_cache[offs]; 1589 break; 1590 case NAND_CMD_GET_FEATURES: 1591 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { 1592 ret = 0; 1593 } else { 1594 bool last = host->last_byte == 1595 ONFI_SUBFEATURE_PARAM_LEN - 1; 1596 brcmnand_low_level_op(host, LL_OP_RD, 0, last); 1597 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; 1598 } 1599 } 1600 1601 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); 1602 host->last_byte++; 1603 1604 return ret; 1605 } 1606 1607 static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 1608 { 1609 int i; 1610 1611 for (i = 0; i < len; i++, buf++) 1612 *buf = brcmnand_read_byte(chip); 1613 } 1614 1615 static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, 1616 int len) 1617 { 1618 int i; 1619 struct brcmnand_host *host = nand_get_controller_data(chip); 1620 1621 switch (host->last_cmd) { 1622 case NAND_CMD_SET_FEATURES: 1623 for (i = 0; i < len; i++) 1624 brcmnand_low_level_op(host, LL_OP_WR, buf[i], 1625 (i + 1) == len); 1626 break; 1627 default: 1628 BUG(); 1629 break; 1630 } 1631 } 1632 1633 /** 1634 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the 1635 * following ahead of time: 1636 * - Is this descriptor the beginning or end of a linked list? 1637 * - What is the (DMA) address of the next descriptor in the linked list? 1638 */ 1639 static int brcmnand_fill_dma_desc(struct brcmnand_host *host, 1640 struct brcm_nand_dma_desc *desc, u64 addr, 1641 dma_addr_t buf, u32 len, u8 dma_cmd, 1642 bool begin, bool end, 1643 dma_addr_t next_desc) 1644 { 1645 memset(desc, 0, sizeof(*desc)); 1646 /* Descriptors are written in native byte order (wordwise) */ 1647 desc->next_desc = lower_32_bits(next_desc); 1648 desc->next_desc_ext = upper_32_bits(next_desc); 1649 desc->cmd_irq = (dma_cmd << 24) | 1650 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */ 1651 (!!begin) | ((!!end) << 1); /* head, tail */ 1652 #ifdef CONFIG_CPU_BIG_ENDIAN 1653 desc->cmd_irq |= 0x01 << 12; 1654 #endif 1655 desc->dram_addr = lower_32_bits(buf); 1656 desc->dram_addr_ext = upper_32_bits(buf); 1657 desc->tfr_len = len; 1658 desc->total_len = len; 1659 desc->flash_addr = lower_32_bits(addr); 1660 desc->flash_addr_ext = upper_32_bits(addr); 1661 desc->cs = host->cs; 1662 desc->status_valid = 0x01; 1663 return 0; 1664 } 1665 1666 /** 1667 * Kick the FLASH_DMA engine, with a given DMA descriptor 1668 */ 1669 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) 1670 { 1671 struct brcmnand_controller *ctrl = host->ctrl; 1672 unsigned long timeo = msecs_to_jiffies(100); 1673 1674 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); 1675 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); 1676 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); 1677 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); 1678 1679 /* Start FLASH_DMA engine */ 1680 ctrl->dma_pending = true; 1681 mb(); /* flush previous writes */ 1682 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */ 1683 1684 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) { 1685 dev_err(ctrl->dev, 1686 "timeout waiting for DMA; status %#x, error status %#x\n", 1687 flash_dma_readl(ctrl, FLASH_DMA_STATUS), 1688 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS)); 1689 } 1690 ctrl->dma_pending = false; 1691 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */ 1692 } 1693 1694 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf, 1695 u32 len, u8 dma_cmd) 1696 { 1697 struct brcmnand_controller *ctrl = host->ctrl; 1698 dma_addr_t buf_pa; 1699 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1700 1701 buf_pa = dma_map_single(ctrl->dev, buf, len, dir); 1702 if (dma_mapping_error(ctrl->dev, buf_pa)) { 1703 dev_err(ctrl->dev, "unable to map buffer for DMA\n"); 1704 return -ENOMEM; 1705 } 1706 1707 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len, 1708 dma_cmd, true, true, 0); 1709 1710 brcmnand_dma_run(host, ctrl->dma_pa); 1711 1712 dma_unmap_single(ctrl->dev, buf_pa, len, dir); 1713 1714 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR) 1715 return -EBADMSG; 1716 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR) 1717 return -EUCLEAN; 1718 1719 return 0; 1720 } 1721 1722 /* 1723 * Assumes proper CS is already set 1724 */ 1725 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, 1726 u64 addr, unsigned int trans, u32 *buf, 1727 u8 *oob, u64 *err_addr) 1728 { 1729 struct brcmnand_host *host = nand_get_controller_data(chip); 1730 struct brcmnand_controller *ctrl = host->ctrl; 1731 int i, j, ret = 0; 1732 1733 brcmnand_clear_ecc_addr(ctrl); 1734 1735 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1736 brcmnand_set_cmd_addr(mtd, addr); 1737 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ 1738 brcmnand_send_cmd(host, CMD_PAGE_READ); 1739 brcmnand_waitfunc(chip); 1740 1741 if (likely(buf)) { 1742 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1743 1744 for (j = 0; j < FC_WORDS; j++, buf++) 1745 *buf = brcmnand_read_fc(ctrl, j); 1746 1747 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 1748 } 1749 1750 if (oob) 1751 oob += read_oob_from_regs(ctrl, i, oob, 1752 mtd->oobsize / trans, 1753 host->hwcfg.sector_size_1k); 1754 1755 if (!ret) { 1756 *err_addr = brcmnand_get_uncorrecc_addr(ctrl); 1757 1758 if (*err_addr) 1759 ret = -EBADMSG; 1760 } 1761 1762 if (!ret) { 1763 *err_addr = brcmnand_get_correcc_addr(ctrl); 1764 1765 if (*err_addr) 1766 ret = -EUCLEAN; 1767 } 1768 } 1769 1770 return ret; 1771 } 1772 1773 /* 1774 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC 1775 * error 1776 * 1777 * Because the HW ECC signals an ECC error if an erase paged has even a single 1778 * bitflip, we must check each ECC error to see if it is actually an erased 1779 * page with bitflips, not a truly corrupted page. 1780 * 1781 * On a real error, return a negative error code (-EBADMSG for ECC error), and 1782 * buf will contain raw data. 1783 * Otherwise, buf gets filled with 0xffs and return the maximum number of 1784 * bitflips-per-ECC-sector to the caller. 1785 * 1786 */ 1787 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, 1788 struct nand_chip *chip, void *buf, u64 addr) 1789 { 1790 int i, sas; 1791 void *oob = chip->oob_poi; 1792 int bitflips = 0; 1793 int page = addr >> chip->page_shift; 1794 int ret; 1795 1796 if (!buf) 1797 buf = nand_get_data_buf(chip); 1798 1799 sas = mtd->oobsize / chip->ecc.steps; 1800 1801 /* read without ecc for verification */ 1802 ret = chip->ecc.read_page_raw(chip, buf, true, page); 1803 if (ret) 1804 return ret; 1805 1806 for (i = 0; i < chip->ecc.steps; i++, oob += sas) { 1807 ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, 1808 oob, sas, NULL, 0, 1809 chip->ecc.strength); 1810 if (ret < 0) 1811 return ret; 1812 1813 bitflips = max(bitflips, ret); 1814 } 1815 1816 return bitflips; 1817 } 1818 1819 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, 1820 u64 addr, unsigned int trans, u32 *buf, u8 *oob) 1821 { 1822 struct brcmnand_host *host = nand_get_controller_data(chip); 1823 struct brcmnand_controller *ctrl = host->ctrl; 1824 u64 err_addr = 0; 1825 int err; 1826 bool retry = true; 1827 1828 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); 1829 1830 try_dmaread: 1831 brcmnand_clear_ecc_addr(ctrl); 1832 1833 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1834 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES, 1835 CMD_PAGE_READ); 1836 if (err) { 1837 if (mtd_is_bitflip_or_eccerr(err)) 1838 err_addr = addr; 1839 else 1840 return -EIO; 1841 } 1842 } else { 1843 if (oob) 1844 memset(oob, 0x99, mtd->oobsize); 1845 1846 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, 1847 oob, &err_addr); 1848 } 1849 1850 if (mtd_is_eccerr(err)) { 1851 /* 1852 * On controller version and 7.0, 7.1 , DMA read after a 1853 * prior PIO read that reported uncorrectable error, 1854 * the DMA engine captures this error following DMA read 1855 * cleared only on subsequent DMA read, so just retry once 1856 * to clear a possible false error reported for current DMA 1857 * read 1858 */ 1859 if ((ctrl->nand_version == 0x0700) || 1860 (ctrl->nand_version == 0x0701)) { 1861 if (retry) { 1862 retry = false; 1863 goto try_dmaread; 1864 } 1865 } 1866 1867 /* 1868 * Controller version 7.2 has hw encoder to detect erased page 1869 * bitflips, apply sw verification for older controllers only 1870 */ 1871 if (ctrl->nand_version < 0x0702) { 1872 err = brcmstb_nand_verify_erased_page(mtd, chip, buf, 1873 addr); 1874 /* erased page bitflips corrected */ 1875 if (err >= 0) 1876 return err; 1877 } 1878 1879 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", 1880 (unsigned long long)err_addr); 1881 mtd->ecc_stats.failed++; 1882 /* NAND layer expects zero on ECC errors */ 1883 return 0; 1884 } 1885 1886 if (mtd_is_bitflip(err)) { 1887 unsigned int corrected = brcmnand_count_corrected(ctrl); 1888 1889 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", 1890 (unsigned long long)err_addr); 1891 mtd->ecc_stats.corrected += corrected; 1892 /* Always exceed the software-imposed threshold */ 1893 return max(mtd->bitflip_threshold, corrected); 1894 } 1895 1896 return 0; 1897 } 1898 1899 static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, 1900 int oob_required, int page) 1901 { 1902 struct mtd_info *mtd = nand_to_mtd(chip); 1903 struct brcmnand_host *host = nand_get_controller_data(chip); 1904 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1905 1906 nand_read_page_op(chip, page, 0, NULL, 0); 1907 1908 return brcmnand_read(mtd, chip, host->last_addr, 1909 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1910 } 1911 1912 static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, 1913 int oob_required, int page) 1914 { 1915 struct brcmnand_host *host = nand_get_controller_data(chip); 1916 struct mtd_info *mtd = nand_to_mtd(chip); 1917 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1918 int ret; 1919 1920 nand_read_page_op(chip, page, 0, NULL, 0); 1921 1922 brcmnand_set_ecc_enabled(host, 0); 1923 ret = brcmnand_read(mtd, chip, host->last_addr, 1924 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1925 brcmnand_set_ecc_enabled(host, 1); 1926 return ret; 1927 } 1928 1929 static int brcmnand_read_oob(struct nand_chip *chip, int page) 1930 { 1931 struct mtd_info *mtd = nand_to_mtd(chip); 1932 1933 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1934 mtd->writesize >> FC_SHIFT, 1935 NULL, (u8 *)chip->oob_poi); 1936 } 1937 1938 static int brcmnand_read_oob_raw(struct nand_chip *chip, int page) 1939 { 1940 struct mtd_info *mtd = nand_to_mtd(chip); 1941 struct brcmnand_host *host = nand_get_controller_data(chip); 1942 1943 brcmnand_set_ecc_enabled(host, 0); 1944 brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1945 mtd->writesize >> FC_SHIFT, 1946 NULL, (u8 *)chip->oob_poi); 1947 brcmnand_set_ecc_enabled(host, 1); 1948 return 0; 1949 } 1950 1951 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip, 1952 u64 addr, const u32 *buf, u8 *oob) 1953 { 1954 struct brcmnand_host *host = nand_get_controller_data(chip); 1955 struct brcmnand_controller *ctrl = host->ctrl; 1956 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT; 1957 int status, ret = 0; 1958 1959 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf); 1960 1961 if (unlikely((unsigned long)buf & 0x03)) { 1962 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf); 1963 buf = (u32 *)((unsigned long)buf & ~0x03); 1964 } 1965 1966 brcmnand_wp(mtd, 0); 1967 1968 for (i = 0; i < ctrl->max_oob; i += 4) 1969 oob_reg_write(ctrl, i, 0xffffffff); 1970 1971 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1972 if (brcmnand_dma_trans(host, addr, (u32 *)buf, 1973 mtd->writesize, CMD_PROGRAM_PAGE)) 1974 ret = -EIO; 1975 goto out; 1976 } 1977 1978 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1979 /* full address MUST be set before populating FC */ 1980 brcmnand_set_cmd_addr(mtd, addr); 1981 1982 if (buf) { 1983 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1984 1985 for (j = 0; j < FC_WORDS; j++, buf++) 1986 brcmnand_write_fc(ctrl, j, *buf); 1987 1988 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 1989 } else if (oob) { 1990 for (j = 0; j < FC_WORDS; j++) 1991 brcmnand_write_fc(ctrl, j, 0xffffffff); 1992 } 1993 1994 if (oob) { 1995 oob += write_oob_to_regs(ctrl, i, oob, 1996 mtd->oobsize / trans, 1997 host->hwcfg.sector_size_1k); 1998 } 1999 2000 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */ 2001 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE); 2002 status = brcmnand_waitfunc(chip); 2003 2004 if (status & NAND_STATUS_FAIL) { 2005 dev_info(ctrl->dev, "program failed at %llx\n", 2006 (unsigned long long)addr); 2007 ret = -EIO; 2008 goto out; 2009 } 2010 } 2011 out: 2012 brcmnand_wp(mtd, 1); 2013 return ret; 2014 } 2015 2016 static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, 2017 int oob_required, int page) 2018 { 2019 struct mtd_info *mtd = nand_to_mtd(chip); 2020 struct brcmnand_host *host = nand_get_controller_data(chip); 2021 void *oob = oob_required ? chip->oob_poi : NULL; 2022 2023 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2024 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2025 2026 return nand_prog_page_end_op(chip); 2027 } 2028 2029 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 2030 int oob_required, int page) 2031 { 2032 struct mtd_info *mtd = nand_to_mtd(chip); 2033 struct brcmnand_host *host = nand_get_controller_data(chip); 2034 void *oob = oob_required ? chip->oob_poi : NULL; 2035 2036 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2037 brcmnand_set_ecc_enabled(host, 0); 2038 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2039 brcmnand_set_ecc_enabled(host, 1); 2040 2041 return nand_prog_page_end_op(chip); 2042 } 2043 2044 static int brcmnand_write_oob(struct nand_chip *chip, int page) 2045 { 2046 return brcmnand_write(nand_to_mtd(chip), chip, 2047 (u64)page << chip->page_shift, NULL, 2048 chip->oob_poi); 2049 } 2050 2051 static int brcmnand_write_oob_raw(struct nand_chip *chip, int page) 2052 { 2053 struct mtd_info *mtd = nand_to_mtd(chip); 2054 struct brcmnand_host *host = nand_get_controller_data(chip); 2055 int ret; 2056 2057 brcmnand_set_ecc_enabled(host, 0); 2058 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL, 2059 (u8 *)chip->oob_poi); 2060 brcmnand_set_ecc_enabled(host, 1); 2061 2062 return ret; 2063 } 2064 2065 /*********************************************************************** 2066 * Per-CS setup (1 NAND device) 2067 ***********************************************************************/ 2068 2069 static int brcmnand_set_cfg(struct brcmnand_host *host, 2070 struct brcmnand_cfg *cfg) 2071 { 2072 struct brcmnand_controller *ctrl = host->ctrl; 2073 struct nand_chip *chip = &host->chip; 2074 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2075 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 2076 BRCMNAND_CS_CFG_EXT); 2077 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 2078 BRCMNAND_CS_ACC_CONTROL); 2079 u8 block_size = 0, page_size = 0, device_size = 0; 2080 u32 tmp; 2081 2082 if (ctrl->block_sizes) { 2083 int i, found; 2084 2085 for (i = 0, found = 0; ctrl->block_sizes[i]; i++) 2086 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) { 2087 block_size = i; 2088 found = 1; 2089 } 2090 if (!found) { 2091 dev_warn(ctrl->dev, "invalid block size %u\n", 2092 cfg->block_size); 2093 return -EINVAL; 2094 } 2095 } else { 2096 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE); 2097 } 2098 2099 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size && 2100 cfg->block_size > ctrl->max_block_size)) { 2101 dev_warn(ctrl->dev, "invalid block size %u\n", 2102 cfg->block_size); 2103 block_size = 0; 2104 } 2105 2106 if (ctrl->page_sizes) { 2107 int i, found; 2108 2109 for (i = 0, found = 0; ctrl->page_sizes[i]; i++) 2110 if (ctrl->page_sizes[i] == cfg->page_size) { 2111 page_size = i; 2112 found = 1; 2113 } 2114 if (!found) { 2115 dev_warn(ctrl->dev, "invalid page size %u\n", 2116 cfg->page_size); 2117 return -EINVAL; 2118 } 2119 } else { 2120 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE); 2121 } 2122 2123 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size && 2124 cfg->page_size > ctrl->max_page_size)) { 2125 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size); 2126 return -EINVAL; 2127 } 2128 2129 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) { 2130 dev_warn(ctrl->dev, "invalid device size 0x%llx\n", 2131 (unsigned long long)cfg->device_size); 2132 return -EINVAL; 2133 } 2134 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE); 2135 2136 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) | 2137 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) | 2138 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) | 2139 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | 2140 (device_size << CFG_DEVICE_SIZE_SHIFT); 2141 if (cfg_offs == cfg_ext_offs) { 2142 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) | 2143 (block_size << CFG_BLK_SIZE_SHIFT); 2144 nand_writereg(ctrl, cfg_offs, tmp); 2145 } else { 2146 nand_writereg(ctrl, cfg_offs, tmp); 2147 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) | 2148 (block_size << CFG_EXT_BLK_SIZE_SHIFT); 2149 nand_writereg(ctrl, cfg_ext_offs, tmp); 2150 } 2151 2152 tmp = nand_readreg(ctrl, acc_control_offs); 2153 tmp &= ~brcmnand_ecc_level_mask(ctrl); 2154 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; 2155 tmp &= ~brcmnand_spare_area_mask(ctrl); 2156 tmp |= cfg->spare_area_size; 2157 nand_writereg(ctrl, acc_control_offs, tmp); 2158 2159 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); 2160 2161 /* threshold = ceil(BCH-level * 0.75) */ 2162 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4)); 2163 2164 return 0; 2165 } 2166 2167 static void brcmnand_print_cfg(struct brcmnand_host *host, 2168 char *buf, struct brcmnand_cfg *cfg) 2169 { 2170 buf += sprintf(buf, 2171 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", 2172 (unsigned long long)cfg->device_size >> 20, 2173 cfg->block_size >> 10, 2174 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size, 2175 cfg->page_size >= 1024 ? "KiB" : "B", 2176 cfg->spare_area_size, cfg->device_width); 2177 2178 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ 2179 if (is_hamming_ecc(host->ctrl, cfg)) 2180 sprintf(buf, ", Hamming ECC"); 2181 else if (cfg->sector_size_1k) 2182 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); 2183 else 2184 sprintf(buf, ", BCH-%u", cfg->ecc_level); 2185 } 2186 2187 /* 2188 * Minimum number of bytes to address a page. Calculated as: 2189 * roundup(log2(size / page-size) / 8) 2190 * 2191 * NB: the following does not "round up" for non-power-of-2 'size'; but this is 2192 * OK because many other things will break if 'size' is irregular... 2193 */ 2194 static inline int get_blk_adr_bytes(u64 size, u32 writesize) 2195 { 2196 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3; 2197 } 2198 2199 static int brcmnand_setup_dev(struct brcmnand_host *host) 2200 { 2201 struct mtd_info *mtd = nand_to_mtd(&host->chip); 2202 struct nand_chip *chip = &host->chip; 2203 struct brcmnand_controller *ctrl = host->ctrl; 2204 struct brcmnand_cfg *cfg = &host->hwcfg; 2205 char msg[128]; 2206 u32 offs, tmp, oob_sector; 2207 int ret; 2208 2209 memset(cfg, 0, sizeof(*cfg)); 2210 2211 ret = of_property_read_u32(nand_get_flash_node(chip), 2212 "brcm,nand-oob-sector-size", 2213 &oob_sector); 2214 if (ret) { 2215 /* Use detected size */ 2216 cfg->spare_area_size = mtd->oobsize / 2217 (mtd->writesize >> FC_SHIFT); 2218 } else { 2219 cfg->spare_area_size = oob_sector; 2220 } 2221 if (cfg->spare_area_size > ctrl->max_oob) 2222 cfg->spare_area_size = ctrl->max_oob; 2223 /* 2224 * Set oobsize to be consistent with controller's spare_area_size, as 2225 * the rest is inaccessible. 2226 */ 2227 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT); 2228 2229 cfg->device_size = mtd->size; 2230 cfg->block_size = mtd->erasesize; 2231 cfg->page_size = mtd->writesize; 2232 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8; 2233 cfg->col_adr_bytes = 2; 2234 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); 2235 2236 if (chip->ecc.mode != NAND_ECC_HW) { 2237 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", 2238 chip->ecc.mode); 2239 return -EINVAL; 2240 } 2241 2242 if (chip->ecc.algo == NAND_ECC_UNKNOWN) { 2243 if (chip->ecc.strength == 1 && chip->ecc.size == 512) 2244 /* Default to Hamming for 1-bit ECC, if unspecified */ 2245 chip->ecc.algo = NAND_ECC_HAMMING; 2246 else 2247 /* Otherwise, BCH */ 2248 chip->ecc.algo = NAND_ECC_BCH; 2249 } 2250 2251 if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 || 2252 chip->ecc.size != 512)) { 2253 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", 2254 chip->ecc.strength, chip->ecc.size); 2255 return -EINVAL; 2256 } 2257 2258 if (chip->ecc.mode != NAND_ECC_NONE && 2259 (!chip->ecc.size || !chip->ecc.strength)) { 2260 if (chip->base.eccreq.step_size && chip->base.eccreq.strength) { 2261 /* use detected ECC parameters */ 2262 chip->ecc.size = chip->base.eccreq.step_size; 2263 chip->ecc.strength = chip->base.eccreq.strength; 2264 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n", 2265 chip->ecc.size, chip->ecc.strength); 2266 } 2267 } 2268 2269 switch (chip->ecc.size) { 2270 case 512: 2271 if (chip->ecc.algo == NAND_ECC_HAMMING) 2272 cfg->ecc_level = 15; 2273 else 2274 cfg->ecc_level = chip->ecc.strength; 2275 cfg->sector_size_1k = 0; 2276 break; 2277 case 1024: 2278 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) { 2279 dev_err(ctrl->dev, "1KB sectors not supported\n"); 2280 return -EINVAL; 2281 } 2282 if (chip->ecc.strength & 0x1) { 2283 dev_err(ctrl->dev, 2284 "odd ECC not supported with 1KB sectors\n"); 2285 return -EINVAL; 2286 } 2287 2288 cfg->ecc_level = chip->ecc.strength >> 1; 2289 cfg->sector_size_1k = 1; 2290 break; 2291 default: 2292 dev_err(ctrl->dev, "unsupported ECC size: %d\n", 2293 chip->ecc.size); 2294 return -EINVAL; 2295 } 2296 2297 cfg->ful_adr_bytes = cfg->blk_adr_bytes; 2298 if (mtd->writesize > 512) 2299 cfg->ful_adr_bytes += cfg->col_adr_bytes; 2300 else 2301 cfg->ful_adr_bytes += 1; 2302 2303 ret = brcmnand_set_cfg(host, cfg); 2304 if (ret) 2305 return ret; 2306 2307 brcmnand_set_ecc_enabled(host, 1); 2308 2309 brcmnand_print_cfg(host, msg, cfg); 2310 dev_info(ctrl->dev, "detected %s\n", msg); 2311 2312 /* Configure ACC_CONTROL */ 2313 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 2314 tmp = nand_readreg(ctrl, offs); 2315 tmp &= ~ACC_CONTROL_PARTIAL_PAGE; 2316 tmp &= ~ACC_CONTROL_RD_ERASED; 2317 2318 /* We need to turn on Read from erased paged protected by ECC */ 2319 if (ctrl->nand_version >= 0x0702) 2320 tmp |= ACC_CONTROL_RD_ERASED; 2321 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 2322 if (ctrl->features & BRCMNAND_HAS_PREFETCH) 2323 tmp &= ~ACC_CONTROL_PREFETCH; 2324 2325 nand_writereg(ctrl, offs, tmp); 2326 2327 return 0; 2328 } 2329 2330 static int brcmnand_attach_chip(struct nand_chip *chip) 2331 { 2332 struct mtd_info *mtd = nand_to_mtd(chip); 2333 struct brcmnand_host *host = nand_get_controller_data(chip); 2334 int ret; 2335 2336 chip->options |= NAND_NO_SUBPAGE_WRITE; 2337 /* 2338 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA 2339 * to/from, and have nand_base pass us a bounce buffer instead, as 2340 * needed. 2341 */ 2342 chip->options |= NAND_USE_BOUNCE_BUFFER; 2343 2344 if (chip->bbt_options & NAND_BBT_USE_FLASH) 2345 chip->bbt_options |= NAND_BBT_NO_OOB; 2346 2347 if (brcmnand_setup_dev(host)) 2348 return -ENXIO; 2349 2350 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512; 2351 2352 /* only use our internal HW threshold */ 2353 mtd->bitflip_threshold = 1; 2354 2355 ret = brcmstb_choose_ecc_layout(host); 2356 2357 return ret; 2358 } 2359 2360 static const struct nand_controller_ops brcmnand_controller_ops = { 2361 .attach_chip = brcmnand_attach_chip, 2362 }; 2363 2364 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn) 2365 { 2366 struct brcmnand_controller *ctrl = host->ctrl; 2367 struct platform_device *pdev = host->pdev; 2368 struct mtd_info *mtd; 2369 struct nand_chip *chip; 2370 int ret; 2371 u16 cfg_offs; 2372 2373 ret = of_property_read_u32(dn, "reg", &host->cs); 2374 if (ret) { 2375 dev_err(&pdev->dev, "can't get chip-select\n"); 2376 return -ENXIO; 2377 } 2378 2379 mtd = nand_to_mtd(&host->chip); 2380 chip = &host->chip; 2381 2382 nand_set_flash_node(chip, dn); 2383 nand_set_controller_data(chip, host); 2384 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d", 2385 host->cs); 2386 if (!mtd->name) 2387 return -ENOMEM; 2388 2389 mtd->owner = THIS_MODULE; 2390 mtd->dev.parent = &pdev->dev; 2391 2392 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; 2393 chip->legacy.cmdfunc = brcmnand_cmdfunc; 2394 chip->legacy.waitfunc = brcmnand_waitfunc; 2395 chip->legacy.read_byte = brcmnand_read_byte; 2396 chip->legacy.read_buf = brcmnand_read_buf; 2397 chip->legacy.write_buf = brcmnand_write_buf; 2398 2399 chip->ecc.mode = NAND_ECC_HW; 2400 chip->ecc.read_page = brcmnand_read_page; 2401 chip->ecc.write_page = brcmnand_write_page; 2402 chip->ecc.read_page_raw = brcmnand_read_page_raw; 2403 chip->ecc.write_page_raw = brcmnand_write_page_raw; 2404 chip->ecc.write_oob_raw = brcmnand_write_oob_raw; 2405 chip->ecc.read_oob_raw = brcmnand_read_oob_raw; 2406 chip->ecc.read_oob = brcmnand_read_oob; 2407 chip->ecc.write_oob = brcmnand_write_oob; 2408 2409 chip->controller = &ctrl->controller; 2410 2411 /* 2412 * The bootloader might have configured 16bit mode but 2413 * NAND READID command only works in 8bit mode. We force 2414 * 8bit mode here to ensure that NAND READID commands works. 2415 */ 2416 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2417 nand_writereg(ctrl, cfg_offs, 2418 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH); 2419 2420 ret = nand_scan(chip, 1); 2421 if (ret) 2422 return ret; 2423 2424 ret = mtd_device_register(mtd, NULL, 0); 2425 if (ret) 2426 nand_cleanup(chip); 2427 2428 return ret; 2429 } 2430 2431 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host, 2432 int restore) 2433 { 2434 struct brcmnand_controller *ctrl = host->ctrl; 2435 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2436 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 2437 BRCMNAND_CS_CFG_EXT); 2438 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 2439 BRCMNAND_CS_ACC_CONTROL); 2440 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); 2441 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); 2442 2443 if (restore) { 2444 nand_writereg(ctrl, cfg_offs, host->hwcfg.config); 2445 if (cfg_offs != cfg_ext_offs) 2446 nand_writereg(ctrl, cfg_ext_offs, 2447 host->hwcfg.config_ext); 2448 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control); 2449 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1); 2450 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2); 2451 } else { 2452 host->hwcfg.config = nand_readreg(ctrl, cfg_offs); 2453 if (cfg_offs != cfg_ext_offs) 2454 host->hwcfg.config_ext = 2455 nand_readreg(ctrl, cfg_ext_offs); 2456 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs); 2457 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs); 2458 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs); 2459 } 2460 } 2461 2462 static int brcmnand_suspend(struct device *dev) 2463 { 2464 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2465 struct brcmnand_host *host; 2466 2467 list_for_each_entry(host, &ctrl->host_list, node) 2468 brcmnand_save_restore_cs_config(host, 0); 2469 2470 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT); 2471 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR); 2472 ctrl->corr_stat_threshold = 2473 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD); 2474 2475 if (has_flash_dma(ctrl)) 2476 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE); 2477 2478 return 0; 2479 } 2480 2481 static int brcmnand_resume(struct device *dev) 2482 { 2483 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2484 struct brcmnand_host *host; 2485 2486 if (has_flash_dma(ctrl)) { 2487 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode); 2488 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2489 } 2490 2491 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select); 2492 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor); 2493 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD, 2494 ctrl->corr_stat_threshold); 2495 if (ctrl->soc) { 2496 /* Clear/re-enable interrupt */ 2497 ctrl->soc->ctlrdy_ack(ctrl->soc); 2498 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2499 } 2500 2501 list_for_each_entry(host, &ctrl->host_list, node) { 2502 struct nand_chip *chip = &host->chip; 2503 2504 brcmnand_save_restore_cs_config(host, 1); 2505 2506 /* Reset the chip, required by some chips after power-up */ 2507 nand_reset_op(chip); 2508 } 2509 2510 return 0; 2511 } 2512 2513 const struct dev_pm_ops brcmnand_pm_ops = { 2514 .suspend = brcmnand_suspend, 2515 .resume = brcmnand_resume, 2516 }; 2517 EXPORT_SYMBOL_GPL(brcmnand_pm_ops); 2518 2519 static const struct of_device_id brcmnand_of_match[] = { 2520 { .compatible = "brcm,brcmnand-v4.0" }, 2521 { .compatible = "brcm,brcmnand-v5.0" }, 2522 { .compatible = "brcm,brcmnand-v6.0" }, 2523 { .compatible = "brcm,brcmnand-v6.1" }, 2524 { .compatible = "brcm,brcmnand-v6.2" }, 2525 { .compatible = "brcm,brcmnand-v7.0" }, 2526 { .compatible = "brcm,brcmnand-v7.1" }, 2527 { .compatible = "brcm,brcmnand-v7.2" }, 2528 { .compatible = "brcm,brcmnand-v7.3" }, 2529 {}, 2530 }; 2531 MODULE_DEVICE_TABLE(of, brcmnand_of_match); 2532 2533 /*********************************************************************** 2534 * Platform driver setup (per controller) 2535 ***********************************************************************/ 2536 2537 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) 2538 { 2539 struct device *dev = &pdev->dev; 2540 struct device_node *dn = dev->of_node, *child; 2541 struct brcmnand_controller *ctrl; 2542 struct resource *res; 2543 int ret; 2544 2545 /* We only support device-tree instantiation */ 2546 if (!dn) 2547 return -ENODEV; 2548 2549 if (!of_match_node(brcmnand_of_match, dn)) 2550 return -ENODEV; 2551 2552 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 2553 if (!ctrl) 2554 return -ENOMEM; 2555 2556 dev_set_drvdata(dev, ctrl); 2557 ctrl->dev = dev; 2558 2559 init_completion(&ctrl->done); 2560 init_completion(&ctrl->dma_done); 2561 nand_controller_init(&ctrl->controller); 2562 ctrl->controller.ops = &brcmnand_controller_ops; 2563 INIT_LIST_HEAD(&ctrl->host_list); 2564 2565 /* NAND register range */ 2566 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2567 ctrl->nand_base = devm_ioremap_resource(dev, res); 2568 if (IS_ERR(ctrl->nand_base)) 2569 return PTR_ERR(ctrl->nand_base); 2570 2571 /* Enable clock before using NAND registers */ 2572 ctrl->clk = devm_clk_get(dev, "nand"); 2573 if (!IS_ERR(ctrl->clk)) { 2574 ret = clk_prepare_enable(ctrl->clk); 2575 if (ret) 2576 return ret; 2577 } else { 2578 ret = PTR_ERR(ctrl->clk); 2579 if (ret == -EPROBE_DEFER) 2580 return ret; 2581 2582 ctrl->clk = NULL; 2583 } 2584 2585 /* Initialize NAND revision */ 2586 ret = brcmnand_revision_init(ctrl); 2587 if (ret) 2588 goto err; 2589 2590 /* 2591 * Most chips have this cache at a fixed offset within 'nand' block. 2592 * Some must specify this region separately. 2593 */ 2594 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache"); 2595 if (res) { 2596 ctrl->nand_fc = devm_ioremap_resource(dev, res); 2597 if (IS_ERR(ctrl->nand_fc)) { 2598 ret = PTR_ERR(ctrl->nand_fc); 2599 goto err; 2600 } 2601 } else { 2602 ctrl->nand_fc = ctrl->nand_base + 2603 ctrl->reg_offsets[BRCMNAND_FC_BASE]; 2604 } 2605 2606 /* FLASH_DMA */ 2607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma"); 2608 if (res) { 2609 ctrl->flash_dma_base = devm_ioremap_resource(dev, res); 2610 if (IS_ERR(ctrl->flash_dma_base)) { 2611 ret = PTR_ERR(ctrl->flash_dma_base); 2612 goto err; 2613 } 2614 2615 /* initialize the dma version */ 2616 brcmnand_flash_dma_revision_init(ctrl); 2617 2618 /* linked-list and stop on error */ 2619 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK); 2620 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2621 2622 /* Allocate descriptor(s) */ 2623 ctrl->dma_desc = dmam_alloc_coherent(dev, 2624 sizeof(*ctrl->dma_desc), 2625 &ctrl->dma_pa, GFP_KERNEL); 2626 if (!ctrl->dma_desc) { 2627 ret = -ENOMEM; 2628 goto err; 2629 } 2630 2631 ctrl->dma_irq = platform_get_irq(pdev, 1); 2632 if ((int)ctrl->dma_irq < 0) { 2633 dev_err(dev, "missing FLASH_DMA IRQ\n"); 2634 ret = -ENODEV; 2635 goto err; 2636 } 2637 2638 ret = devm_request_irq(dev, ctrl->dma_irq, 2639 brcmnand_dma_irq, 0, DRV_NAME, 2640 ctrl); 2641 if (ret < 0) { 2642 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2643 ctrl->dma_irq, ret); 2644 goto err; 2645 } 2646 2647 dev_info(dev, "enabling FLASH_DMA\n"); 2648 } 2649 2650 /* Disable automatic device ID config, direct addressing */ 2651 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, 2652 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0); 2653 /* Disable XOR addressing */ 2654 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0); 2655 2656 if (ctrl->features & BRCMNAND_HAS_WP) { 2657 /* Permanently disable write protection */ 2658 if (wp_on == 2) 2659 brcmnand_set_wp(ctrl, false); 2660 } else { 2661 wp_on = 0; 2662 } 2663 2664 /* IRQ */ 2665 ctrl->irq = platform_get_irq(pdev, 0); 2666 if ((int)ctrl->irq < 0) { 2667 dev_err(dev, "no IRQ defined\n"); 2668 ret = -ENODEV; 2669 goto err; 2670 } 2671 2672 /* 2673 * Some SoCs integrate this controller (e.g., its interrupt bits) in 2674 * interesting ways 2675 */ 2676 if (soc) { 2677 ctrl->soc = soc; 2678 2679 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0, 2680 DRV_NAME, ctrl); 2681 2682 /* Enable interrupt */ 2683 ctrl->soc->ctlrdy_ack(ctrl->soc); 2684 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2685 } else { 2686 /* Use standard interrupt infrastructure */ 2687 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0, 2688 DRV_NAME, ctrl); 2689 } 2690 if (ret < 0) { 2691 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2692 ctrl->irq, ret); 2693 goto err; 2694 } 2695 2696 for_each_available_child_of_node(dn, child) { 2697 if (of_device_is_compatible(child, "brcm,nandcs")) { 2698 struct brcmnand_host *host; 2699 2700 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2701 if (!host) { 2702 of_node_put(child); 2703 ret = -ENOMEM; 2704 goto err; 2705 } 2706 host->pdev = pdev; 2707 host->ctrl = ctrl; 2708 2709 ret = brcmnand_init_cs(host, child); 2710 if (ret) { 2711 devm_kfree(dev, host); 2712 continue; /* Try all chip-selects */ 2713 } 2714 2715 list_add_tail(&host->node, &ctrl->host_list); 2716 } 2717 } 2718 2719 /* No chip-selects could initialize properly */ 2720 if (list_empty(&ctrl->host_list)) { 2721 ret = -ENODEV; 2722 goto err; 2723 } 2724 2725 return 0; 2726 2727 err: 2728 clk_disable_unprepare(ctrl->clk); 2729 return ret; 2730 2731 } 2732 EXPORT_SYMBOL_GPL(brcmnand_probe); 2733 2734 int brcmnand_remove(struct platform_device *pdev) 2735 { 2736 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); 2737 struct brcmnand_host *host; 2738 2739 list_for_each_entry(host, &ctrl->host_list, node) 2740 nand_release(&host->chip); 2741 2742 clk_disable_unprepare(ctrl->clk); 2743 2744 dev_set_drvdata(&pdev->dev, NULL); 2745 2746 return 0; 2747 } 2748 EXPORT_SYMBOL_GPL(brcmnand_remove); 2749 2750 MODULE_LICENSE("GPL v2"); 2751 MODULE_AUTHOR("Kevin Cernekee"); 2752 MODULE_AUTHOR("Brian Norris"); 2753 MODULE_DESCRIPTION("NAND driver for Broadcom chips"); 2754 MODULE_ALIAS("platform:brcmnand"); 2755