1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2010-2015 Broadcom Corporation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/version.h> 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/platform_device.h> 13 #include <linux/err.h> 14 #include <linux/completion.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/ioport.h> 19 #include <linux/bug.h> 20 #include <linux/kernel.h> 21 #include <linux/bitops.h> 22 #include <linux/mm.h> 23 #include <linux/mtd/mtd.h> 24 #include <linux/mtd/rawnand.h> 25 #include <linux/mtd/partitions.h> 26 #include <linux/of.h> 27 #include <linux/of_platform.h> 28 #include <linux/slab.h> 29 #include <linux/list.h> 30 #include <linux/log2.h> 31 32 #include "brcmnand.h" 33 34 /* 35 * This flag controls if WP stays on between erase/write commands to mitigate 36 * flash corruption due to power glitches. Values: 37 * 0: NAND_WP is not used or not available 38 * 1: NAND_WP is set by default, cleared for erase/write operations 39 * 2: NAND_WP is always cleared 40 */ 41 static int wp_on = 1; 42 module_param(wp_on, int, 0444); 43 44 /*********************************************************************** 45 * Definitions 46 ***********************************************************************/ 47 48 #define DRV_NAME "brcmnand" 49 50 #define CMD_NULL 0x00 51 #define CMD_PAGE_READ 0x01 52 #define CMD_SPARE_AREA_READ 0x02 53 #define CMD_STATUS_READ 0x03 54 #define CMD_PROGRAM_PAGE 0x04 55 #define CMD_PROGRAM_SPARE_AREA 0x05 56 #define CMD_COPY_BACK 0x06 57 #define CMD_DEVICE_ID_READ 0x07 58 #define CMD_BLOCK_ERASE 0x08 59 #define CMD_FLASH_RESET 0x09 60 #define CMD_BLOCKS_LOCK 0x0a 61 #define CMD_BLOCKS_LOCK_DOWN 0x0b 62 #define CMD_BLOCKS_UNLOCK 0x0c 63 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d 64 #define CMD_PARAMETER_READ 0x0e 65 #define CMD_PARAMETER_CHANGE_COL 0x0f 66 #define CMD_LOW_LEVEL_OP 0x10 67 68 struct brcm_nand_dma_desc { 69 u32 next_desc; 70 u32 next_desc_ext; 71 u32 cmd_irq; 72 u32 dram_addr; 73 u32 dram_addr_ext; 74 u32 tfr_len; 75 u32 total_len; 76 u32 flash_addr; 77 u32 flash_addr_ext; 78 u32 cs; 79 u32 pad2[5]; 80 u32 status_valid; 81 } __packed; 82 83 /* Bitfields for brcm_nand_dma_desc::status_valid */ 84 #define FLASH_DMA_ECC_ERROR (1 << 8) 85 #define FLASH_DMA_CORR_ERROR (1 << 9) 86 87 /* Bitfields for DMA_MODE */ 88 #define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */ 89 #define FLASH_DMA_MODE_MODE BIT(0) /* link list */ 90 #define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \ 91 FLASH_DMA_MODE_MODE) 92 93 /* 512B flash cache in the NAND controller HW */ 94 #define FC_SHIFT 9U 95 #define FC_BYTES 512U 96 #define FC_WORDS (FC_BYTES >> 2) 97 98 #define BRCMNAND_MIN_PAGESIZE 512 99 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) 100 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) 101 102 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) 103 #define NAND_POLL_STATUS_TIMEOUT_MS 100 104 105 /* flash_dma registers */ 106 enum flash_dma_reg { 107 FLASH_DMA_REVISION = 0, 108 FLASH_DMA_FIRST_DESC, 109 FLASH_DMA_FIRST_DESC_EXT, 110 FLASH_DMA_CTRL, 111 FLASH_DMA_MODE, 112 FLASH_DMA_STATUS, 113 FLASH_DMA_INTERRUPT_DESC, 114 FLASH_DMA_INTERRUPT_DESC_EXT, 115 FLASH_DMA_ERROR_STATUS, 116 FLASH_DMA_CURRENT_DESC, 117 FLASH_DMA_CURRENT_DESC_EXT, 118 }; 119 120 /* flash_dma registers v0*/ 121 static const u16 flash_dma_regs_v0[] = { 122 [FLASH_DMA_REVISION] = 0x00, 123 [FLASH_DMA_FIRST_DESC] = 0x04, 124 [FLASH_DMA_CTRL] = 0x08, 125 [FLASH_DMA_MODE] = 0x0c, 126 [FLASH_DMA_STATUS] = 0x10, 127 [FLASH_DMA_INTERRUPT_DESC] = 0x14, 128 [FLASH_DMA_ERROR_STATUS] = 0x18, 129 [FLASH_DMA_CURRENT_DESC] = 0x1c, 130 }; 131 132 /* flash_dma registers v1*/ 133 static const u16 flash_dma_regs_v1[] = { 134 [FLASH_DMA_REVISION] = 0x00, 135 [FLASH_DMA_FIRST_DESC] = 0x04, 136 [FLASH_DMA_FIRST_DESC_EXT] = 0x08, 137 [FLASH_DMA_CTRL] = 0x0c, 138 [FLASH_DMA_MODE] = 0x10, 139 [FLASH_DMA_STATUS] = 0x14, 140 [FLASH_DMA_INTERRUPT_DESC] = 0x18, 141 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c, 142 [FLASH_DMA_ERROR_STATUS] = 0x20, 143 [FLASH_DMA_CURRENT_DESC] = 0x24, 144 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28, 145 }; 146 147 /* flash_dma registers v4 */ 148 static const u16 flash_dma_regs_v4[] = { 149 [FLASH_DMA_REVISION] = 0x00, 150 [FLASH_DMA_FIRST_DESC] = 0x08, 151 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c, 152 [FLASH_DMA_CTRL] = 0x10, 153 [FLASH_DMA_MODE] = 0x14, 154 [FLASH_DMA_STATUS] = 0x18, 155 [FLASH_DMA_INTERRUPT_DESC] = 0x20, 156 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24, 157 [FLASH_DMA_ERROR_STATUS] = 0x28, 158 [FLASH_DMA_CURRENT_DESC] = 0x30, 159 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34, 160 }; 161 162 /* Controller feature flags */ 163 enum { 164 BRCMNAND_HAS_1K_SECTORS = BIT(0), 165 BRCMNAND_HAS_PREFETCH = BIT(1), 166 BRCMNAND_HAS_CACHE_MODE = BIT(2), 167 BRCMNAND_HAS_WP = BIT(3), 168 }; 169 170 struct brcmnand_controller { 171 struct device *dev; 172 struct nand_controller controller; 173 void __iomem *nand_base; 174 void __iomem *nand_fc; /* flash cache */ 175 void __iomem *flash_dma_base; 176 unsigned int irq; 177 unsigned int dma_irq; 178 int nand_version; 179 180 /* Some SoCs provide custom interrupt status register(s) */ 181 struct brcmnand_soc *soc; 182 183 /* Some SoCs have a gateable clock for the controller */ 184 struct clk *clk; 185 186 int cmd_pending; 187 bool dma_pending; 188 struct completion done; 189 struct completion dma_done; 190 191 /* List of NAND hosts (one for each chip-select) */ 192 struct list_head host_list; 193 194 /* flash_dma reg */ 195 const u16 *flash_dma_offsets; 196 struct brcm_nand_dma_desc *dma_desc; 197 dma_addr_t dma_pa; 198 199 /* in-memory cache of the FLASH_CACHE, used only for some commands */ 200 u8 flash_cache[FC_BYTES]; 201 202 /* Controller revision details */ 203 const u16 *reg_offsets; 204 unsigned int reg_spacing; /* between CS1, CS2, ... regs */ 205 const u8 *cs_offsets; /* within each chip-select */ 206 const u8 *cs0_offsets; /* within CS0, if different */ 207 unsigned int max_block_size; 208 const unsigned int *block_sizes; 209 unsigned int max_page_size; 210 const unsigned int *page_sizes; 211 unsigned int max_oob; 212 u32 features; 213 214 /* for low-power standby/resume only */ 215 u32 nand_cs_nand_select; 216 u32 nand_cs_nand_xor; 217 u32 corr_stat_threshold; 218 u32 flash_dma_mode; 219 bool pio_poll_mode; 220 }; 221 222 struct brcmnand_cfg { 223 u64 device_size; 224 unsigned int block_size; 225 unsigned int page_size; 226 unsigned int spare_area_size; 227 unsigned int device_width; 228 unsigned int col_adr_bytes; 229 unsigned int blk_adr_bytes; 230 unsigned int ful_adr_bytes; 231 unsigned int sector_size_1k; 232 unsigned int ecc_level; 233 /* use for low-power standby/resume only */ 234 u32 acc_control; 235 u32 config; 236 u32 config_ext; 237 u32 timing_1; 238 u32 timing_2; 239 }; 240 241 struct brcmnand_host { 242 struct list_head node; 243 244 struct nand_chip chip; 245 struct platform_device *pdev; 246 int cs; 247 248 unsigned int last_cmd; 249 unsigned int last_byte; 250 u64 last_addr; 251 struct brcmnand_cfg hwcfg; 252 struct brcmnand_controller *ctrl; 253 }; 254 255 enum brcmnand_reg { 256 BRCMNAND_CMD_START = 0, 257 BRCMNAND_CMD_EXT_ADDRESS, 258 BRCMNAND_CMD_ADDRESS, 259 BRCMNAND_INTFC_STATUS, 260 BRCMNAND_CS_SELECT, 261 BRCMNAND_CS_XOR, 262 BRCMNAND_LL_OP, 263 BRCMNAND_CS0_BASE, 264 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */ 265 BRCMNAND_CORR_THRESHOLD, 266 BRCMNAND_CORR_THRESHOLD_EXT, 267 BRCMNAND_UNCORR_COUNT, 268 BRCMNAND_CORR_COUNT, 269 BRCMNAND_CORR_EXT_ADDR, 270 BRCMNAND_CORR_ADDR, 271 BRCMNAND_UNCORR_EXT_ADDR, 272 BRCMNAND_UNCORR_ADDR, 273 BRCMNAND_SEMAPHORE, 274 BRCMNAND_ID, 275 BRCMNAND_ID_EXT, 276 BRCMNAND_LL_RDATA, 277 BRCMNAND_OOB_READ_BASE, 278 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */ 279 BRCMNAND_OOB_WRITE_BASE, 280 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */ 281 BRCMNAND_FC_BASE, 282 }; 283 284 /* BRCMNAND v4.0 */ 285 static const u16 brcmnand_regs_v40[] = { 286 [BRCMNAND_CMD_START] = 0x04, 287 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 288 [BRCMNAND_CMD_ADDRESS] = 0x0c, 289 [BRCMNAND_INTFC_STATUS] = 0x6c, 290 [BRCMNAND_CS_SELECT] = 0x14, 291 [BRCMNAND_CS_XOR] = 0x18, 292 [BRCMNAND_LL_OP] = 0x178, 293 [BRCMNAND_CS0_BASE] = 0x40, 294 [BRCMNAND_CS1_BASE] = 0xd0, 295 [BRCMNAND_CORR_THRESHOLD] = 0x84, 296 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 297 [BRCMNAND_UNCORR_COUNT] = 0, 298 [BRCMNAND_CORR_COUNT] = 0, 299 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 300 [BRCMNAND_CORR_ADDR] = 0x74, 301 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 302 [BRCMNAND_UNCORR_ADDR] = 0x7c, 303 [BRCMNAND_SEMAPHORE] = 0x58, 304 [BRCMNAND_ID] = 0x60, 305 [BRCMNAND_ID_EXT] = 0x64, 306 [BRCMNAND_LL_RDATA] = 0x17c, 307 [BRCMNAND_OOB_READ_BASE] = 0x20, 308 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 309 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 310 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 311 [BRCMNAND_FC_BASE] = 0x200, 312 }; 313 314 /* BRCMNAND v5.0 */ 315 static const u16 brcmnand_regs_v50[] = { 316 [BRCMNAND_CMD_START] = 0x04, 317 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 318 [BRCMNAND_CMD_ADDRESS] = 0x0c, 319 [BRCMNAND_INTFC_STATUS] = 0x6c, 320 [BRCMNAND_CS_SELECT] = 0x14, 321 [BRCMNAND_CS_XOR] = 0x18, 322 [BRCMNAND_LL_OP] = 0x178, 323 [BRCMNAND_CS0_BASE] = 0x40, 324 [BRCMNAND_CS1_BASE] = 0xd0, 325 [BRCMNAND_CORR_THRESHOLD] = 0x84, 326 [BRCMNAND_CORR_THRESHOLD_EXT] = 0, 327 [BRCMNAND_UNCORR_COUNT] = 0, 328 [BRCMNAND_CORR_COUNT] = 0, 329 [BRCMNAND_CORR_EXT_ADDR] = 0x70, 330 [BRCMNAND_CORR_ADDR] = 0x74, 331 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78, 332 [BRCMNAND_UNCORR_ADDR] = 0x7c, 333 [BRCMNAND_SEMAPHORE] = 0x58, 334 [BRCMNAND_ID] = 0x60, 335 [BRCMNAND_ID_EXT] = 0x64, 336 [BRCMNAND_LL_RDATA] = 0x17c, 337 [BRCMNAND_OOB_READ_BASE] = 0x20, 338 [BRCMNAND_OOB_READ_10_BASE] = 0x130, 339 [BRCMNAND_OOB_WRITE_BASE] = 0x30, 340 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140, 341 [BRCMNAND_FC_BASE] = 0x200, 342 }; 343 344 /* BRCMNAND v6.0 - v7.1 */ 345 static const u16 brcmnand_regs_v60[] = { 346 [BRCMNAND_CMD_START] = 0x04, 347 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 348 [BRCMNAND_CMD_ADDRESS] = 0x0c, 349 [BRCMNAND_INTFC_STATUS] = 0x14, 350 [BRCMNAND_CS_SELECT] = 0x18, 351 [BRCMNAND_CS_XOR] = 0x1c, 352 [BRCMNAND_LL_OP] = 0x20, 353 [BRCMNAND_CS0_BASE] = 0x50, 354 [BRCMNAND_CS1_BASE] = 0, 355 [BRCMNAND_CORR_THRESHOLD] = 0xc0, 356 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4, 357 [BRCMNAND_UNCORR_COUNT] = 0xfc, 358 [BRCMNAND_CORR_COUNT] = 0x100, 359 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 360 [BRCMNAND_CORR_ADDR] = 0x110, 361 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 362 [BRCMNAND_UNCORR_ADDR] = 0x118, 363 [BRCMNAND_SEMAPHORE] = 0x150, 364 [BRCMNAND_ID] = 0x194, 365 [BRCMNAND_ID_EXT] = 0x198, 366 [BRCMNAND_LL_RDATA] = 0x19c, 367 [BRCMNAND_OOB_READ_BASE] = 0x200, 368 [BRCMNAND_OOB_READ_10_BASE] = 0, 369 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 370 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 371 [BRCMNAND_FC_BASE] = 0x400, 372 }; 373 374 /* BRCMNAND v7.1 */ 375 static const u16 brcmnand_regs_v71[] = { 376 [BRCMNAND_CMD_START] = 0x04, 377 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 378 [BRCMNAND_CMD_ADDRESS] = 0x0c, 379 [BRCMNAND_INTFC_STATUS] = 0x14, 380 [BRCMNAND_CS_SELECT] = 0x18, 381 [BRCMNAND_CS_XOR] = 0x1c, 382 [BRCMNAND_LL_OP] = 0x20, 383 [BRCMNAND_CS0_BASE] = 0x50, 384 [BRCMNAND_CS1_BASE] = 0, 385 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 386 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 387 [BRCMNAND_UNCORR_COUNT] = 0xfc, 388 [BRCMNAND_CORR_COUNT] = 0x100, 389 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 390 [BRCMNAND_CORR_ADDR] = 0x110, 391 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 392 [BRCMNAND_UNCORR_ADDR] = 0x118, 393 [BRCMNAND_SEMAPHORE] = 0x150, 394 [BRCMNAND_ID] = 0x194, 395 [BRCMNAND_ID_EXT] = 0x198, 396 [BRCMNAND_LL_RDATA] = 0x19c, 397 [BRCMNAND_OOB_READ_BASE] = 0x200, 398 [BRCMNAND_OOB_READ_10_BASE] = 0, 399 [BRCMNAND_OOB_WRITE_BASE] = 0x280, 400 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 401 [BRCMNAND_FC_BASE] = 0x400, 402 }; 403 404 /* BRCMNAND v7.2 */ 405 static const u16 brcmnand_regs_v72[] = { 406 [BRCMNAND_CMD_START] = 0x04, 407 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 408 [BRCMNAND_CMD_ADDRESS] = 0x0c, 409 [BRCMNAND_INTFC_STATUS] = 0x14, 410 [BRCMNAND_CS_SELECT] = 0x18, 411 [BRCMNAND_CS_XOR] = 0x1c, 412 [BRCMNAND_LL_OP] = 0x20, 413 [BRCMNAND_CS0_BASE] = 0x50, 414 [BRCMNAND_CS1_BASE] = 0, 415 [BRCMNAND_CORR_THRESHOLD] = 0xdc, 416 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 417 [BRCMNAND_UNCORR_COUNT] = 0xfc, 418 [BRCMNAND_CORR_COUNT] = 0x100, 419 [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 420 [BRCMNAND_CORR_ADDR] = 0x110, 421 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 422 [BRCMNAND_UNCORR_ADDR] = 0x118, 423 [BRCMNAND_SEMAPHORE] = 0x150, 424 [BRCMNAND_ID] = 0x194, 425 [BRCMNAND_ID_EXT] = 0x198, 426 [BRCMNAND_LL_RDATA] = 0x19c, 427 [BRCMNAND_OOB_READ_BASE] = 0x200, 428 [BRCMNAND_OOB_READ_10_BASE] = 0, 429 [BRCMNAND_OOB_WRITE_BASE] = 0x400, 430 [BRCMNAND_OOB_WRITE_10_BASE] = 0, 431 [BRCMNAND_FC_BASE] = 0x600, 432 }; 433 434 enum brcmnand_cs_reg { 435 BRCMNAND_CS_CFG_EXT = 0, 436 BRCMNAND_CS_CFG, 437 BRCMNAND_CS_ACC_CONTROL, 438 BRCMNAND_CS_TIMING1, 439 BRCMNAND_CS_TIMING2, 440 }; 441 442 /* Per chip-select offsets for v7.1 */ 443 static const u8 brcmnand_cs_offsets_v71[] = { 444 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 445 [BRCMNAND_CS_CFG_EXT] = 0x04, 446 [BRCMNAND_CS_CFG] = 0x08, 447 [BRCMNAND_CS_TIMING1] = 0x0c, 448 [BRCMNAND_CS_TIMING2] = 0x10, 449 }; 450 451 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */ 452 static const u8 brcmnand_cs_offsets[] = { 453 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 454 [BRCMNAND_CS_CFG_EXT] = 0x04, 455 [BRCMNAND_CS_CFG] = 0x04, 456 [BRCMNAND_CS_TIMING1] = 0x08, 457 [BRCMNAND_CS_TIMING2] = 0x0c, 458 }; 459 460 /* Per chip-select offset for <= v5.0 on CS0 only */ 461 static const u8 brcmnand_cs_offsets_cs0[] = { 462 [BRCMNAND_CS_ACC_CONTROL] = 0x00, 463 [BRCMNAND_CS_CFG_EXT] = 0x08, 464 [BRCMNAND_CS_CFG] = 0x08, 465 [BRCMNAND_CS_TIMING1] = 0x10, 466 [BRCMNAND_CS_TIMING2] = 0x14, 467 }; 468 469 /* 470 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had 471 * one config register, but once the bitfields overflowed, newer controllers 472 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around. 473 */ 474 enum { 475 CFG_BLK_ADR_BYTES_SHIFT = 8, 476 CFG_COL_ADR_BYTES_SHIFT = 12, 477 CFG_FUL_ADR_BYTES_SHIFT = 16, 478 CFG_BUS_WIDTH_SHIFT = 23, 479 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT), 480 CFG_DEVICE_SIZE_SHIFT = 24, 481 482 /* Only for pre-v7.1 (with no CFG_EXT register) */ 483 CFG_PAGE_SIZE_SHIFT = 20, 484 CFG_BLK_SIZE_SHIFT = 28, 485 486 /* Only for v7.1+ (with CFG_EXT register) */ 487 CFG_EXT_PAGE_SIZE_SHIFT = 0, 488 CFG_EXT_BLK_SIZE_SHIFT = 4, 489 }; 490 491 /* BRCMNAND_INTFC_STATUS */ 492 enum { 493 INTFC_FLASH_STATUS = GENMASK(7, 0), 494 495 INTFC_ERASED = BIT(27), 496 INTFC_OOB_VALID = BIT(28), 497 INTFC_CACHE_VALID = BIT(29), 498 INTFC_FLASH_READY = BIT(30), 499 INTFC_CTLR_READY = BIT(31), 500 }; 501 502 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) 503 { 504 return brcmnand_readl(ctrl->nand_base + offs); 505 } 506 507 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, 508 u32 val) 509 { 510 brcmnand_writel(val, ctrl->nand_base + offs); 511 } 512 513 static int brcmnand_revision_init(struct brcmnand_controller *ctrl) 514 { 515 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; 516 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; 517 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 }; 518 519 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; 520 521 /* Only support v4.0+? */ 522 if (ctrl->nand_version < 0x0400) { 523 dev_err(ctrl->dev, "version %#x not supported\n", 524 ctrl->nand_version); 525 return -ENODEV; 526 } 527 528 /* Register offsets */ 529 if (ctrl->nand_version >= 0x0702) 530 ctrl->reg_offsets = brcmnand_regs_v72; 531 else if (ctrl->nand_version == 0x0701) 532 ctrl->reg_offsets = brcmnand_regs_v71; 533 else if (ctrl->nand_version >= 0x0600) 534 ctrl->reg_offsets = brcmnand_regs_v60; 535 else if (ctrl->nand_version >= 0x0500) 536 ctrl->reg_offsets = brcmnand_regs_v50; 537 else if (ctrl->nand_version >= 0x0400) 538 ctrl->reg_offsets = brcmnand_regs_v40; 539 540 /* Chip-select stride */ 541 if (ctrl->nand_version >= 0x0701) 542 ctrl->reg_spacing = 0x14; 543 else 544 ctrl->reg_spacing = 0x10; 545 546 /* Per chip-select registers */ 547 if (ctrl->nand_version >= 0x0701) { 548 ctrl->cs_offsets = brcmnand_cs_offsets_v71; 549 } else { 550 ctrl->cs_offsets = brcmnand_cs_offsets; 551 552 /* v5.0 and earlier has a different CS0 offset layout */ 553 if (ctrl->nand_version <= 0x0500) 554 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; 555 } 556 557 /* Page / block sizes */ 558 if (ctrl->nand_version >= 0x0701) { 559 /* >= v7.1 use nice power-of-2 values! */ 560 ctrl->max_page_size = 16 * 1024; 561 ctrl->max_block_size = 2 * 1024 * 1024; 562 } else { 563 ctrl->page_sizes = page_sizes; 564 if (ctrl->nand_version >= 0x0600) 565 ctrl->block_sizes = block_sizes_v6; 566 else 567 ctrl->block_sizes = block_sizes_v4; 568 569 if (ctrl->nand_version < 0x0400) { 570 ctrl->max_page_size = 4096; 571 ctrl->max_block_size = 512 * 1024; 572 } 573 } 574 575 /* Maximum spare area sector size (per 512B) */ 576 if (ctrl->nand_version == 0x0702) 577 ctrl->max_oob = 128; 578 else if (ctrl->nand_version >= 0x0600) 579 ctrl->max_oob = 64; 580 else if (ctrl->nand_version >= 0x0500) 581 ctrl->max_oob = 32; 582 else 583 ctrl->max_oob = 16; 584 585 /* v6.0 and newer (except v6.1) have prefetch support */ 586 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601) 587 ctrl->features |= BRCMNAND_HAS_PREFETCH; 588 589 /* 590 * v6.x has cache mode, but it's implemented differently. Ignore it for 591 * now. 592 */ 593 if (ctrl->nand_version >= 0x0700) 594 ctrl->features |= BRCMNAND_HAS_CACHE_MODE; 595 596 if (ctrl->nand_version >= 0x0500) 597 ctrl->features |= BRCMNAND_HAS_1K_SECTORS; 598 599 if (ctrl->nand_version >= 0x0700) 600 ctrl->features |= BRCMNAND_HAS_WP; 601 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) 602 ctrl->features |= BRCMNAND_HAS_WP; 603 604 return 0; 605 } 606 607 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl) 608 { 609 /* flash_dma register offsets */ 610 if (ctrl->nand_version >= 0x0703) 611 ctrl->flash_dma_offsets = flash_dma_regs_v4; 612 else if (ctrl->nand_version == 0x0602) 613 ctrl->flash_dma_offsets = flash_dma_regs_v0; 614 else 615 ctrl->flash_dma_offsets = flash_dma_regs_v1; 616 } 617 618 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, 619 enum brcmnand_reg reg) 620 { 621 u16 offs = ctrl->reg_offsets[reg]; 622 623 if (offs) 624 return nand_readreg(ctrl, offs); 625 else 626 return 0; 627 } 628 629 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl, 630 enum brcmnand_reg reg, u32 val) 631 { 632 u16 offs = ctrl->reg_offsets[reg]; 633 634 if (offs) 635 nand_writereg(ctrl, offs, val); 636 } 637 638 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, 639 enum brcmnand_reg reg, u32 mask, unsigned 640 int shift, u32 val) 641 { 642 u32 tmp = brcmnand_read_reg(ctrl, reg); 643 644 tmp &= ~mask; 645 tmp |= val << shift; 646 brcmnand_write_reg(ctrl, reg, tmp); 647 } 648 649 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word) 650 { 651 return __raw_readl(ctrl->nand_fc + word * 4); 652 } 653 654 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, 655 int word, u32 val) 656 { 657 __raw_writel(val, ctrl->nand_fc + word * 4); 658 } 659 660 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl) 661 { 662 663 /* Clear error addresses */ 664 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); 665 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); 666 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); 667 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); 668 } 669 670 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl) 671 { 672 u64 err_addr; 673 674 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR); 675 err_addr |= ((u64)(brcmnand_read_reg(ctrl, 676 BRCMNAND_UNCORR_EXT_ADDR) 677 & 0xffff) << 32); 678 679 return err_addr; 680 } 681 682 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl) 683 { 684 u64 err_addr; 685 686 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR); 687 err_addr |= ((u64)(brcmnand_read_reg(ctrl, 688 BRCMNAND_CORR_EXT_ADDR) 689 & 0xffff) << 32); 690 691 return err_addr; 692 } 693 694 static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr) 695 { 696 struct nand_chip *chip = mtd_to_nand(mtd); 697 struct brcmnand_host *host = nand_get_controller_data(chip); 698 struct brcmnand_controller *ctrl = host->ctrl; 699 700 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 701 (host->cs << 16) | ((addr >> 32) & 0xffff)); 702 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); 703 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 704 lower_32_bits(addr)); 705 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 706 } 707 708 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, 709 enum brcmnand_cs_reg reg) 710 { 711 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE]; 712 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE]; 713 u8 cs_offs; 714 715 if (cs == 0 && ctrl->cs0_offsets) 716 cs_offs = ctrl->cs0_offsets[reg]; 717 else 718 cs_offs = ctrl->cs_offsets[reg]; 719 720 if (cs && offs_cs1) 721 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; 722 723 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; 724 } 725 726 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl) 727 { 728 if (ctrl->nand_version < 0x0600) 729 return 1; 730 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT); 731 } 732 733 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) 734 { 735 struct brcmnand_controller *ctrl = host->ctrl; 736 unsigned int shift = 0, bits; 737 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; 738 int cs = host->cs; 739 740 if (ctrl->nand_version == 0x0702) 741 bits = 7; 742 else if (ctrl->nand_version >= 0x0600) 743 bits = 6; 744 else if (ctrl->nand_version >= 0x0500) 745 bits = 5; 746 else 747 bits = 4; 748 749 if (ctrl->nand_version >= 0x0702) { 750 if (cs >= 4) 751 reg = BRCMNAND_CORR_THRESHOLD_EXT; 752 shift = (cs % 4) * bits; 753 } else if (ctrl->nand_version >= 0x0600) { 754 if (cs >= 5) 755 reg = BRCMNAND_CORR_THRESHOLD_EXT; 756 shift = (cs % 5) * bits; 757 } 758 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val); 759 } 760 761 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) 762 { 763 if (ctrl->nand_version < 0x0602) 764 return 24; 765 return 0; 766 } 767 768 /*********************************************************************** 769 * NAND ACC CONTROL bitfield 770 * 771 * Some bits have remained constant throughout hardware revision, while 772 * others have shifted around. 773 ***********************************************************************/ 774 775 /* Constant for all versions (where supported) */ 776 enum { 777 /* See BRCMNAND_HAS_CACHE_MODE */ 778 ACC_CONTROL_CACHE_MODE = BIT(22), 779 780 /* See BRCMNAND_HAS_PREFETCH */ 781 ACC_CONTROL_PREFETCH = BIT(23), 782 783 ACC_CONTROL_PAGE_HIT = BIT(24), 784 ACC_CONTROL_WR_PREEMPT = BIT(25), 785 ACC_CONTROL_PARTIAL_PAGE = BIT(26), 786 ACC_CONTROL_RD_ERASED = BIT(27), 787 ACC_CONTROL_FAST_PGM_RDIN = BIT(28), 788 ACC_CONTROL_WR_ECC = BIT(30), 789 ACC_CONTROL_RD_ECC = BIT(31), 790 }; 791 792 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) 793 { 794 if (ctrl->nand_version == 0x0702) 795 return GENMASK(7, 0); 796 else if (ctrl->nand_version >= 0x0600) 797 return GENMASK(6, 0); 798 else 799 return GENMASK(5, 0); 800 } 801 802 #define NAND_ACC_CONTROL_ECC_SHIFT 16 803 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 804 805 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) 806 { 807 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; 808 809 mask <<= NAND_ACC_CONTROL_ECC_SHIFT; 810 811 /* v7.2 includes additional ECC levels */ 812 if (ctrl->nand_version >= 0x0702) 813 mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; 814 815 return mask; 816 } 817 818 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) 819 { 820 struct brcmnand_controller *ctrl = host->ctrl; 821 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 822 u32 acc_control = nand_readreg(ctrl, offs); 823 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC; 824 825 if (en) { 826 acc_control |= ecc_flags; /* enable RD/WR ECC */ 827 acc_control |= host->hwcfg.ecc_level 828 << NAND_ACC_CONTROL_ECC_SHIFT; 829 } else { 830 acc_control &= ~ecc_flags; /* disable RD/WR ECC */ 831 acc_control &= ~brcmnand_ecc_level_mask(ctrl); 832 } 833 834 nand_writereg(ctrl, offs, acc_control); 835 } 836 837 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) 838 { 839 if (ctrl->nand_version >= 0x0702) 840 return 9; 841 else if (ctrl->nand_version >= 0x0600) 842 return 7; 843 else if (ctrl->nand_version >= 0x0500) 844 return 6; 845 else 846 return -1; 847 } 848 849 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host) 850 { 851 struct brcmnand_controller *ctrl = host->ctrl; 852 int shift = brcmnand_sector_1k_shift(ctrl); 853 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 854 BRCMNAND_CS_ACC_CONTROL); 855 856 if (shift < 0) 857 return 0; 858 859 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; 860 } 861 862 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val) 863 { 864 struct brcmnand_controller *ctrl = host->ctrl; 865 int shift = brcmnand_sector_1k_shift(ctrl); 866 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 867 BRCMNAND_CS_ACC_CONTROL); 868 u32 tmp; 869 870 if (shift < 0) 871 return; 872 873 tmp = nand_readreg(ctrl, acc_control_offs); 874 tmp &= ~(1 << shift); 875 tmp |= (!!val) << shift; 876 nand_writereg(ctrl, acc_control_offs, tmp); 877 } 878 879 /*********************************************************************** 880 * CS_NAND_SELECT 881 ***********************************************************************/ 882 883 enum { 884 CS_SELECT_NAND_WP = BIT(29), 885 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), 886 }; 887 888 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, 889 u32 mask, u32 expected_val, 890 unsigned long timeout_ms) 891 { 892 unsigned long limit; 893 u32 val; 894 895 if (!timeout_ms) 896 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; 897 898 limit = jiffies + msecs_to_jiffies(timeout_ms); 899 do { 900 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 901 if ((val & mask) == expected_val) 902 return 0; 903 904 cpu_relax(); 905 } while (time_after(limit, jiffies)); 906 907 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", 908 expected_val, val & mask); 909 910 return -ETIMEDOUT; 911 } 912 913 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) 914 { 915 u32 val = en ? CS_SELECT_NAND_WP : 0; 916 917 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val); 918 } 919 920 /*********************************************************************** 921 * Flash DMA 922 ***********************************************************************/ 923 924 static inline bool has_flash_dma(struct brcmnand_controller *ctrl) 925 { 926 return ctrl->flash_dma_base; 927 } 928 929 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl) 930 { 931 if (ctrl->pio_poll_mode) 932 return; 933 934 if (has_flash_dma(ctrl)) { 935 ctrl->flash_dma_base = NULL; 936 disable_irq(ctrl->dma_irq); 937 } 938 939 disable_irq(ctrl->irq); 940 ctrl->pio_poll_mode = true; 941 } 942 943 static inline bool flash_dma_buf_ok(const void *buf) 944 { 945 return buf && !is_vmalloc_addr(buf) && 946 likely(IS_ALIGNED((uintptr_t)buf, 4)); 947 } 948 949 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, 950 enum flash_dma_reg dma_reg, u32 val) 951 { 952 u16 offs = ctrl->flash_dma_offsets[dma_reg]; 953 954 brcmnand_writel(val, ctrl->flash_dma_base + offs); 955 } 956 957 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, 958 enum flash_dma_reg dma_reg) 959 { 960 u16 offs = ctrl->flash_dma_offsets[dma_reg]; 961 962 return brcmnand_readl(ctrl->flash_dma_base + offs); 963 } 964 965 /* Low-level operation types: command, address, write, or read */ 966 enum brcmnand_llop_type { 967 LL_OP_CMD, 968 LL_OP_ADDR, 969 LL_OP_WR, 970 LL_OP_RD, 971 }; 972 973 /*********************************************************************** 974 * Internal support functions 975 ***********************************************************************/ 976 977 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, 978 struct brcmnand_cfg *cfg) 979 { 980 if (ctrl->nand_version <= 0x0701) 981 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && 982 cfg->ecc_level == 15; 983 else 984 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && 985 cfg->ecc_level == 15) || 986 (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); 987 } 988 989 /* 990 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given 991 * the layout/configuration. 992 * Returns -ERRCODE on failure. 993 */ 994 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section, 995 struct mtd_oob_region *oobregion) 996 { 997 struct nand_chip *chip = mtd_to_nand(mtd); 998 struct brcmnand_host *host = nand_get_controller_data(chip); 999 struct brcmnand_cfg *cfg = &host->hwcfg; 1000 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1001 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1002 1003 if (section >= sectors) 1004 return -ERANGE; 1005 1006 oobregion->offset = (section * sas) + 6; 1007 oobregion->length = 3; 1008 1009 return 0; 1010 } 1011 1012 static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, 1013 struct mtd_oob_region *oobregion) 1014 { 1015 struct nand_chip *chip = mtd_to_nand(mtd); 1016 struct brcmnand_host *host = nand_get_controller_data(chip); 1017 struct brcmnand_cfg *cfg = &host->hwcfg; 1018 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1019 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1020 1021 if (section >= sectors * 2) 1022 return -ERANGE; 1023 1024 oobregion->offset = (section / 2) * sas; 1025 1026 if (section & 1) { 1027 oobregion->offset += 9; 1028 oobregion->length = 7; 1029 } else { 1030 oobregion->length = 6; 1031 1032 /* First sector of each page may have BBI */ 1033 if (!section) { 1034 /* 1035 * Small-page NAND use byte 6 for BBI while large-page 1036 * NAND use byte 0. 1037 */ 1038 if (cfg->page_size > 512) 1039 oobregion->offset++; 1040 oobregion->length--; 1041 } 1042 } 1043 1044 return 0; 1045 } 1046 1047 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = { 1048 .ecc = brcmnand_hamming_ooblayout_ecc, 1049 .free = brcmnand_hamming_ooblayout_free, 1050 }; 1051 1052 static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, 1053 struct mtd_oob_region *oobregion) 1054 { 1055 struct nand_chip *chip = mtd_to_nand(mtd); 1056 struct brcmnand_host *host = nand_get_controller_data(chip); 1057 struct brcmnand_cfg *cfg = &host->hwcfg; 1058 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1059 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1060 1061 if (section >= sectors) 1062 return -ERANGE; 1063 1064 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes; 1065 oobregion->length = chip->ecc.bytes; 1066 1067 return 0; 1068 } 1069 1070 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section, 1071 struct mtd_oob_region *oobregion) 1072 { 1073 struct nand_chip *chip = mtd_to_nand(mtd); 1074 struct brcmnand_host *host = nand_get_controller_data(chip); 1075 struct brcmnand_cfg *cfg = &host->hwcfg; 1076 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1077 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 1078 1079 if (section >= sectors) 1080 return -ERANGE; 1081 1082 if (sas <= chip->ecc.bytes) 1083 return 0; 1084 1085 oobregion->offset = section * sas; 1086 oobregion->length = sas - chip->ecc.bytes; 1087 1088 if (!section) { 1089 oobregion->offset++; 1090 oobregion->length--; 1091 } 1092 1093 return 0; 1094 } 1095 1096 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section, 1097 struct mtd_oob_region *oobregion) 1098 { 1099 struct nand_chip *chip = mtd_to_nand(mtd); 1100 struct brcmnand_host *host = nand_get_controller_data(chip); 1101 struct brcmnand_cfg *cfg = &host->hwcfg; 1102 int sas = cfg->spare_area_size << cfg->sector_size_1k; 1103 1104 if (section > 1 || sas - chip->ecc.bytes < 6 || 1105 (section && sas - chip->ecc.bytes == 6)) 1106 return -ERANGE; 1107 1108 if (!section) { 1109 oobregion->offset = 0; 1110 oobregion->length = 5; 1111 } else { 1112 oobregion->offset = 6; 1113 oobregion->length = sas - chip->ecc.bytes - 6; 1114 } 1115 1116 return 0; 1117 } 1118 1119 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = { 1120 .ecc = brcmnand_bch_ooblayout_ecc, 1121 .free = brcmnand_bch_ooblayout_free_lp, 1122 }; 1123 1124 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = { 1125 .ecc = brcmnand_bch_ooblayout_ecc, 1126 .free = brcmnand_bch_ooblayout_free_sp, 1127 }; 1128 1129 static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) 1130 { 1131 struct brcmnand_cfg *p = &host->hwcfg; 1132 struct mtd_info *mtd = nand_to_mtd(&host->chip); 1133 struct nand_ecc_ctrl *ecc = &host->chip.ecc; 1134 unsigned int ecc_level = p->ecc_level; 1135 int sas = p->spare_area_size << p->sector_size_1k; 1136 int sectors = p->page_size / (512 << p->sector_size_1k); 1137 1138 if (p->sector_size_1k) 1139 ecc_level <<= 1; 1140 1141 if (is_hamming_ecc(host->ctrl, p)) { 1142 ecc->bytes = 3 * sectors; 1143 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); 1144 return 0; 1145 } 1146 1147 /* 1148 * CONTROLLER_VERSION: 1149 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8) 1150 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) 1151 * But we will just be conservative. 1152 */ 1153 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); 1154 if (p->page_size == 512) 1155 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops); 1156 else 1157 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops); 1158 1159 if (ecc->bytes >= sas) { 1160 dev_err(&host->pdev->dev, 1161 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", 1162 ecc->bytes, sas); 1163 return -EINVAL; 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void brcmnand_wp(struct mtd_info *mtd, int wp) 1170 { 1171 struct nand_chip *chip = mtd_to_nand(mtd); 1172 struct brcmnand_host *host = nand_get_controller_data(chip); 1173 struct brcmnand_controller *ctrl = host->ctrl; 1174 1175 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { 1176 static int old_wp = -1; 1177 int ret; 1178 1179 if (old_wp != wp) { 1180 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); 1181 old_wp = wp; 1182 } 1183 1184 /* 1185 * make sure ctrl/flash ready before and after 1186 * changing state of #WP pin 1187 */ 1188 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | 1189 NAND_STATUS_READY, 1190 NAND_CTRL_RDY | 1191 NAND_STATUS_READY, 0); 1192 if (ret) 1193 return; 1194 1195 brcmnand_set_wp(ctrl, wp); 1196 nand_status_op(chip, NULL); 1197 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ 1198 ret = bcmnand_ctrl_poll_status(ctrl, 1199 NAND_CTRL_RDY | 1200 NAND_STATUS_READY | 1201 NAND_STATUS_WP, 1202 NAND_CTRL_RDY | 1203 NAND_STATUS_READY | 1204 (wp ? 0 : NAND_STATUS_WP), 0); 1205 1206 if (ret) 1207 dev_err_ratelimited(&host->pdev->dev, 1208 "nand #WP expected %s\n", 1209 wp ? "on" : "off"); 1210 } 1211 } 1212 1213 /* Helper functions for reading and writing OOB registers */ 1214 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs) 1215 { 1216 u16 offset0, offset10, reg_offs; 1217 1218 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE]; 1219 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE]; 1220 1221 if (offs >= ctrl->max_oob) 1222 return 0x77; 1223 1224 if (offs >= 16 && offset10) 1225 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1226 else 1227 reg_offs = offset0 + (offs & ~0x03); 1228 1229 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3)); 1230 } 1231 1232 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs, 1233 u32 data) 1234 { 1235 u16 offset0, offset10, reg_offs; 1236 1237 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE]; 1238 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE]; 1239 1240 if (offs >= ctrl->max_oob) 1241 return; 1242 1243 if (offs >= 16 && offset10) 1244 reg_offs = offset10 + ((offs - 0x10) & ~0x03); 1245 else 1246 reg_offs = offset0 + (offs & ~0x03); 1247 1248 nand_writereg(ctrl, reg_offs, data); 1249 } 1250 1251 /* 1252 * read_oob_from_regs - read data from OOB registers 1253 * @ctrl: NAND controller 1254 * @i: sub-page sector index 1255 * @oob: buffer to read to 1256 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1257 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1258 */ 1259 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob, 1260 int sas, int sector_1k) 1261 { 1262 int tbytes = sas << sector_1k; 1263 int j; 1264 1265 /* Adjust OOB values for 1K sector size */ 1266 if (sector_1k && (i & 0x01)) 1267 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1268 tbytes = min_t(int, tbytes, ctrl->max_oob); 1269 1270 for (j = 0; j < tbytes; j++) 1271 oob[j] = oob_reg_read(ctrl, j); 1272 return tbytes; 1273 } 1274 1275 /* 1276 * write_oob_to_regs - write data to OOB registers 1277 * @i: sub-page sector index 1278 * @oob: buffer to write from 1279 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE) 1280 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal 1281 */ 1282 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, 1283 const u8 *oob, int sas, int sector_1k) 1284 { 1285 int tbytes = sas << sector_1k; 1286 int j; 1287 1288 /* Adjust OOB values for 1K sector size */ 1289 if (sector_1k && (i & 0x01)) 1290 tbytes = max(0, tbytes - (int)ctrl->max_oob); 1291 tbytes = min_t(int, tbytes, ctrl->max_oob); 1292 1293 for (j = 0; j < tbytes; j += 4) 1294 oob_reg_write(ctrl, j, 1295 (oob[j + 0] << 24) | 1296 (oob[j + 1] << 16) | 1297 (oob[j + 2] << 8) | 1298 (oob[j + 3] << 0)); 1299 return tbytes; 1300 } 1301 1302 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data) 1303 { 1304 struct brcmnand_controller *ctrl = data; 1305 1306 /* Discard all NAND_CTLRDY interrupts during DMA */ 1307 if (ctrl->dma_pending) 1308 return IRQ_HANDLED; 1309 1310 complete(&ctrl->done); 1311 return IRQ_HANDLED; 1312 } 1313 1314 /* Handle SoC-specific interrupt hardware */ 1315 static irqreturn_t brcmnand_irq(int irq, void *data) 1316 { 1317 struct brcmnand_controller *ctrl = data; 1318 1319 if (ctrl->soc->ctlrdy_ack(ctrl->soc)) 1320 return brcmnand_ctlrdy_irq(irq, data); 1321 1322 return IRQ_NONE; 1323 } 1324 1325 static irqreturn_t brcmnand_dma_irq(int irq, void *data) 1326 { 1327 struct brcmnand_controller *ctrl = data; 1328 1329 complete(&ctrl->dma_done); 1330 1331 return IRQ_HANDLED; 1332 } 1333 1334 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) 1335 { 1336 struct brcmnand_controller *ctrl = host->ctrl; 1337 int ret; 1338 u64 cmd_addr; 1339 1340 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); 1341 1342 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); 1343 1344 BUG_ON(ctrl->cmd_pending != 0); 1345 ctrl->cmd_pending = cmd; 1346 1347 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 1348 WARN_ON(ret); 1349 1350 mb(); /* flush previous writes */ 1351 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, 1352 cmd << brcmnand_cmd_shift(ctrl)); 1353 } 1354 1355 /*********************************************************************** 1356 * NAND MTD API: read/program/erase 1357 ***********************************************************************/ 1358 1359 static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, 1360 unsigned int ctrl) 1361 { 1362 /* intentionally left blank */ 1363 } 1364 1365 static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) 1366 { 1367 struct brcmnand_host *host = nand_get_controller_data(chip); 1368 struct brcmnand_controller *ctrl = host->ctrl; 1369 struct mtd_info *mtd = nand_to_mtd(chip); 1370 bool err = false; 1371 int sts; 1372 1373 if (mtd->oops_panic_write) { 1374 /* switch to interrupt polling and PIO mode */ 1375 disable_ctrl_irqs(ctrl); 1376 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, 1377 NAND_CTRL_RDY, 0); 1378 err = (sts < 0) ? true : false; 1379 } else { 1380 unsigned long timeo = msecs_to_jiffies( 1381 NAND_POLL_STATUS_TIMEOUT_MS); 1382 /* wait for completion interrupt */ 1383 sts = wait_for_completion_timeout(&ctrl->done, timeo); 1384 err = (sts <= 0) ? true : false; 1385 } 1386 1387 return err; 1388 } 1389 1390 static int brcmnand_waitfunc(struct nand_chip *chip) 1391 { 1392 struct brcmnand_host *host = nand_get_controller_data(chip); 1393 struct brcmnand_controller *ctrl = host->ctrl; 1394 bool err = false; 1395 1396 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); 1397 if (ctrl->cmd_pending) 1398 err = brcmstb_nand_wait_for_completion(chip); 1399 1400 if (err) { 1401 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) 1402 >> brcmnand_cmd_shift(ctrl); 1403 1404 dev_err_ratelimited(ctrl->dev, 1405 "timeout waiting for command %#02x\n", cmd); 1406 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", 1407 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS)); 1408 } 1409 ctrl->cmd_pending = 0; 1410 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1411 INTFC_FLASH_STATUS; 1412 } 1413 1414 enum { 1415 LLOP_RE = BIT(16), 1416 LLOP_WE = BIT(17), 1417 LLOP_ALE = BIT(18), 1418 LLOP_CLE = BIT(19), 1419 LLOP_RETURN_IDLE = BIT(31), 1420 1421 LLOP_DATA_MASK = GENMASK(15, 0), 1422 }; 1423 1424 static int brcmnand_low_level_op(struct brcmnand_host *host, 1425 enum brcmnand_llop_type type, u32 data, 1426 bool last_op) 1427 { 1428 struct nand_chip *chip = &host->chip; 1429 struct brcmnand_controller *ctrl = host->ctrl; 1430 u32 tmp; 1431 1432 tmp = data & LLOP_DATA_MASK; 1433 switch (type) { 1434 case LL_OP_CMD: 1435 tmp |= LLOP_WE | LLOP_CLE; 1436 break; 1437 case LL_OP_ADDR: 1438 /* WE | ALE */ 1439 tmp |= LLOP_WE | LLOP_ALE; 1440 break; 1441 case LL_OP_WR: 1442 /* WE */ 1443 tmp |= LLOP_WE; 1444 break; 1445 case LL_OP_RD: 1446 /* RE */ 1447 tmp |= LLOP_RE; 1448 break; 1449 } 1450 if (last_op) 1451 /* RETURN_IDLE */ 1452 tmp |= LLOP_RETURN_IDLE; 1453 1454 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp); 1455 1456 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp); 1457 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP); 1458 1459 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP); 1460 return brcmnand_waitfunc(chip); 1461 } 1462 1463 static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, 1464 int column, int page_addr) 1465 { 1466 struct mtd_info *mtd = nand_to_mtd(chip); 1467 struct brcmnand_host *host = nand_get_controller_data(chip); 1468 struct brcmnand_controller *ctrl = host->ctrl; 1469 u64 addr = (u64)page_addr << chip->page_shift; 1470 int native_cmd = 0; 1471 1472 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM || 1473 command == NAND_CMD_RNDOUT) 1474 addr = (u64)column; 1475 /* Avoid propagating a negative, don't-care address */ 1476 else if (page_addr < 0) 1477 addr = 0; 1478 1479 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, 1480 (unsigned long long)addr); 1481 1482 host->last_cmd = command; 1483 host->last_byte = 0; 1484 host->last_addr = addr; 1485 1486 switch (command) { 1487 case NAND_CMD_RESET: 1488 native_cmd = CMD_FLASH_RESET; 1489 break; 1490 case NAND_CMD_STATUS: 1491 native_cmd = CMD_STATUS_READ; 1492 break; 1493 case NAND_CMD_READID: 1494 native_cmd = CMD_DEVICE_ID_READ; 1495 break; 1496 case NAND_CMD_READOOB: 1497 native_cmd = CMD_SPARE_AREA_READ; 1498 break; 1499 case NAND_CMD_ERASE1: 1500 native_cmd = CMD_BLOCK_ERASE; 1501 brcmnand_wp(mtd, 0); 1502 break; 1503 case NAND_CMD_PARAM: 1504 native_cmd = CMD_PARAMETER_READ; 1505 break; 1506 case NAND_CMD_SET_FEATURES: 1507 case NAND_CMD_GET_FEATURES: 1508 brcmnand_low_level_op(host, LL_OP_CMD, command, false); 1509 brcmnand_low_level_op(host, LL_OP_ADDR, column, false); 1510 break; 1511 case NAND_CMD_RNDOUT: 1512 native_cmd = CMD_PARAMETER_CHANGE_COL; 1513 addr &= ~((u64)(FC_BYTES - 1)); 1514 /* 1515 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0 1516 * NB: hwcfg.sector_size_1k may not be initialized yet 1517 */ 1518 if (brcmnand_get_sector_size_1k(host)) { 1519 host->hwcfg.sector_size_1k = 1520 brcmnand_get_sector_size_1k(host); 1521 brcmnand_set_sector_size_1k(host, 0); 1522 } 1523 break; 1524 } 1525 1526 if (!native_cmd) 1527 return; 1528 1529 brcmnand_set_cmd_addr(mtd, addr); 1530 brcmnand_send_cmd(host, native_cmd); 1531 brcmnand_waitfunc(chip); 1532 1533 if (native_cmd == CMD_PARAMETER_READ || 1534 native_cmd == CMD_PARAMETER_CHANGE_COL) { 1535 /* Copy flash cache word-wise */ 1536 u32 *flash_cache = (u32 *)ctrl->flash_cache; 1537 int i; 1538 1539 brcmnand_soc_data_bus_prepare(ctrl->soc, true); 1540 1541 /* 1542 * Must cache the FLASH_CACHE now, since changes in 1543 * SECTOR_SIZE_1K may invalidate it 1544 */ 1545 for (i = 0; i < FC_WORDS; i++) 1546 /* 1547 * Flash cache is big endian for parameter pages, at 1548 * least on STB SoCs 1549 */ 1550 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i)); 1551 1552 brcmnand_soc_data_bus_unprepare(ctrl->soc, true); 1553 1554 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */ 1555 if (host->hwcfg.sector_size_1k) 1556 brcmnand_set_sector_size_1k(host, 1557 host->hwcfg.sector_size_1k); 1558 } 1559 1560 /* Re-enable protection is necessary only after erase */ 1561 if (command == NAND_CMD_ERASE1) 1562 brcmnand_wp(mtd, 1); 1563 } 1564 1565 static uint8_t brcmnand_read_byte(struct nand_chip *chip) 1566 { 1567 struct brcmnand_host *host = nand_get_controller_data(chip); 1568 struct brcmnand_controller *ctrl = host->ctrl; 1569 uint8_t ret = 0; 1570 int addr, offs; 1571 1572 switch (host->last_cmd) { 1573 case NAND_CMD_READID: 1574 if (host->last_byte < 4) 1575 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> 1576 (24 - (host->last_byte << 3)); 1577 else if (host->last_byte < 8) 1578 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> 1579 (56 - (host->last_byte << 3)); 1580 break; 1581 1582 case NAND_CMD_READOOB: 1583 ret = oob_reg_read(ctrl, host->last_byte); 1584 break; 1585 1586 case NAND_CMD_STATUS: 1587 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1588 INTFC_FLASH_STATUS; 1589 if (wp_on) /* hide WP status */ 1590 ret |= NAND_STATUS_WP; 1591 break; 1592 1593 case NAND_CMD_PARAM: 1594 case NAND_CMD_RNDOUT: 1595 addr = host->last_addr + host->last_byte; 1596 offs = addr & (FC_BYTES - 1); 1597 1598 /* At FC_BYTES boundary, switch to next column */ 1599 if (host->last_byte > 0 && offs == 0) 1600 nand_change_read_column_op(chip, addr, NULL, 0, false); 1601 1602 ret = ctrl->flash_cache[offs]; 1603 break; 1604 case NAND_CMD_GET_FEATURES: 1605 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { 1606 ret = 0; 1607 } else { 1608 bool last = host->last_byte == 1609 ONFI_SUBFEATURE_PARAM_LEN - 1; 1610 brcmnand_low_level_op(host, LL_OP_RD, 0, last); 1611 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; 1612 } 1613 } 1614 1615 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); 1616 host->last_byte++; 1617 1618 return ret; 1619 } 1620 1621 static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 1622 { 1623 int i; 1624 1625 for (i = 0; i < len; i++, buf++) 1626 *buf = brcmnand_read_byte(chip); 1627 } 1628 1629 static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, 1630 int len) 1631 { 1632 int i; 1633 struct brcmnand_host *host = nand_get_controller_data(chip); 1634 1635 switch (host->last_cmd) { 1636 case NAND_CMD_SET_FEATURES: 1637 for (i = 0; i < len; i++) 1638 brcmnand_low_level_op(host, LL_OP_WR, buf[i], 1639 (i + 1) == len); 1640 break; 1641 default: 1642 BUG(); 1643 break; 1644 } 1645 } 1646 1647 /** 1648 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the 1649 * following ahead of time: 1650 * - Is this descriptor the beginning or end of a linked list? 1651 * - What is the (DMA) address of the next descriptor in the linked list? 1652 */ 1653 static int brcmnand_fill_dma_desc(struct brcmnand_host *host, 1654 struct brcm_nand_dma_desc *desc, u64 addr, 1655 dma_addr_t buf, u32 len, u8 dma_cmd, 1656 bool begin, bool end, 1657 dma_addr_t next_desc) 1658 { 1659 memset(desc, 0, sizeof(*desc)); 1660 /* Descriptors are written in native byte order (wordwise) */ 1661 desc->next_desc = lower_32_bits(next_desc); 1662 desc->next_desc_ext = upper_32_bits(next_desc); 1663 desc->cmd_irq = (dma_cmd << 24) | 1664 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */ 1665 (!!begin) | ((!!end) << 1); /* head, tail */ 1666 #ifdef CONFIG_CPU_BIG_ENDIAN 1667 desc->cmd_irq |= 0x01 << 12; 1668 #endif 1669 desc->dram_addr = lower_32_bits(buf); 1670 desc->dram_addr_ext = upper_32_bits(buf); 1671 desc->tfr_len = len; 1672 desc->total_len = len; 1673 desc->flash_addr = lower_32_bits(addr); 1674 desc->flash_addr_ext = upper_32_bits(addr); 1675 desc->cs = host->cs; 1676 desc->status_valid = 0x01; 1677 return 0; 1678 } 1679 1680 /** 1681 * Kick the FLASH_DMA engine, with a given DMA descriptor 1682 */ 1683 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) 1684 { 1685 struct brcmnand_controller *ctrl = host->ctrl; 1686 unsigned long timeo = msecs_to_jiffies(100); 1687 1688 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); 1689 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); 1690 if (ctrl->nand_version > 0x0602) { 1691 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, 1692 upper_32_bits(desc)); 1693 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); 1694 } 1695 1696 /* Start FLASH_DMA engine */ 1697 ctrl->dma_pending = true; 1698 mb(); /* flush previous writes */ 1699 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */ 1700 1701 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) { 1702 dev_err(ctrl->dev, 1703 "timeout waiting for DMA; status %#x, error status %#x\n", 1704 flash_dma_readl(ctrl, FLASH_DMA_STATUS), 1705 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS)); 1706 } 1707 ctrl->dma_pending = false; 1708 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */ 1709 } 1710 1711 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf, 1712 u32 len, u8 dma_cmd) 1713 { 1714 struct brcmnand_controller *ctrl = host->ctrl; 1715 dma_addr_t buf_pa; 1716 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1717 1718 buf_pa = dma_map_single(ctrl->dev, buf, len, dir); 1719 if (dma_mapping_error(ctrl->dev, buf_pa)) { 1720 dev_err(ctrl->dev, "unable to map buffer for DMA\n"); 1721 return -ENOMEM; 1722 } 1723 1724 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len, 1725 dma_cmd, true, true, 0); 1726 1727 brcmnand_dma_run(host, ctrl->dma_pa); 1728 1729 dma_unmap_single(ctrl->dev, buf_pa, len, dir); 1730 1731 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR) 1732 return -EBADMSG; 1733 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR) 1734 return -EUCLEAN; 1735 1736 return 0; 1737 } 1738 1739 /* 1740 * Assumes proper CS is already set 1741 */ 1742 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, 1743 u64 addr, unsigned int trans, u32 *buf, 1744 u8 *oob, u64 *err_addr) 1745 { 1746 struct brcmnand_host *host = nand_get_controller_data(chip); 1747 struct brcmnand_controller *ctrl = host->ctrl; 1748 int i, j, ret = 0; 1749 1750 brcmnand_clear_ecc_addr(ctrl); 1751 1752 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1753 brcmnand_set_cmd_addr(mtd, addr); 1754 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ 1755 brcmnand_send_cmd(host, CMD_PAGE_READ); 1756 brcmnand_waitfunc(chip); 1757 1758 if (likely(buf)) { 1759 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1760 1761 for (j = 0; j < FC_WORDS; j++, buf++) 1762 *buf = brcmnand_read_fc(ctrl, j); 1763 1764 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 1765 } 1766 1767 if (oob) 1768 oob += read_oob_from_regs(ctrl, i, oob, 1769 mtd->oobsize / trans, 1770 host->hwcfg.sector_size_1k); 1771 1772 if (!ret) { 1773 *err_addr = brcmnand_get_uncorrecc_addr(ctrl); 1774 1775 if (*err_addr) 1776 ret = -EBADMSG; 1777 } 1778 1779 if (!ret) { 1780 *err_addr = brcmnand_get_correcc_addr(ctrl); 1781 1782 if (*err_addr) 1783 ret = -EUCLEAN; 1784 } 1785 } 1786 1787 return ret; 1788 } 1789 1790 /* 1791 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC 1792 * error 1793 * 1794 * Because the HW ECC signals an ECC error if an erase paged has even a single 1795 * bitflip, we must check each ECC error to see if it is actually an erased 1796 * page with bitflips, not a truly corrupted page. 1797 * 1798 * On a real error, return a negative error code (-EBADMSG for ECC error), and 1799 * buf will contain raw data. 1800 * Otherwise, buf gets filled with 0xffs and return the maximum number of 1801 * bitflips-per-ECC-sector to the caller. 1802 * 1803 */ 1804 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, 1805 struct nand_chip *chip, void *buf, u64 addr) 1806 { 1807 int i, sas; 1808 void *oob = chip->oob_poi; 1809 int bitflips = 0; 1810 int page = addr >> chip->page_shift; 1811 int ret; 1812 void *ecc_chunk; 1813 1814 if (!buf) 1815 buf = nand_get_data_buf(chip); 1816 1817 sas = mtd->oobsize / chip->ecc.steps; 1818 1819 /* read without ecc for verification */ 1820 ret = chip->ecc.read_page_raw(chip, buf, true, page); 1821 if (ret) 1822 return ret; 1823 1824 for (i = 0; i < chip->ecc.steps; i++, oob += sas) { 1825 ecc_chunk = buf + chip->ecc.size * i; 1826 ret = nand_check_erased_ecc_chunk(ecc_chunk, 1827 chip->ecc.size, 1828 oob, sas, NULL, 0, 1829 chip->ecc.strength); 1830 if (ret < 0) 1831 return ret; 1832 1833 bitflips = max(bitflips, ret); 1834 } 1835 1836 return bitflips; 1837 } 1838 1839 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, 1840 u64 addr, unsigned int trans, u32 *buf, u8 *oob) 1841 { 1842 struct brcmnand_host *host = nand_get_controller_data(chip); 1843 struct brcmnand_controller *ctrl = host->ctrl; 1844 u64 err_addr = 0; 1845 int err; 1846 bool retry = true; 1847 1848 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); 1849 1850 try_dmaread: 1851 brcmnand_clear_ecc_addr(ctrl); 1852 1853 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1854 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES, 1855 CMD_PAGE_READ); 1856 if (err) { 1857 if (mtd_is_bitflip_or_eccerr(err)) 1858 err_addr = addr; 1859 else 1860 return -EIO; 1861 } 1862 } else { 1863 if (oob) 1864 memset(oob, 0x99, mtd->oobsize); 1865 1866 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, 1867 oob, &err_addr); 1868 } 1869 1870 if (mtd_is_eccerr(err)) { 1871 /* 1872 * On controller version and 7.0, 7.1 , DMA read after a 1873 * prior PIO read that reported uncorrectable error, 1874 * the DMA engine captures this error following DMA read 1875 * cleared only on subsequent DMA read, so just retry once 1876 * to clear a possible false error reported for current DMA 1877 * read 1878 */ 1879 if ((ctrl->nand_version == 0x0700) || 1880 (ctrl->nand_version == 0x0701)) { 1881 if (retry) { 1882 retry = false; 1883 goto try_dmaread; 1884 } 1885 } 1886 1887 /* 1888 * Controller version 7.2 has hw encoder to detect erased page 1889 * bitflips, apply sw verification for older controllers only 1890 */ 1891 if (ctrl->nand_version < 0x0702) { 1892 err = brcmstb_nand_verify_erased_page(mtd, chip, buf, 1893 addr); 1894 /* erased page bitflips corrected */ 1895 if (err >= 0) 1896 return err; 1897 } 1898 1899 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", 1900 (unsigned long long)err_addr); 1901 mtd->ecc_stats.failed++; 1902 /* NAND layer expects zero on ECC errors */ 1903 return 0; 1904 } 1905 1906 if (mtd_is_bitflip(err)) { 1907 unsigned int corrected = brcmnand_count_corrected(ctrl); 1908 1909 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", 1910 (unsigned long long)err_addr); 1911 mtd->ecc_stats.corrected += corrected; 1912 /* Always exceed the software-imposed threshold */ 1913 return max(mtd->bitflip_threshold, corrected); 1914 } 1915 1916 return 0; 1917 } 1918 1919 static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, 1920 int oob_required, int page) 1921 { 1922 struct mtd_info *mtd = nand_to_mtd(chip); 1923 struct brcmnand_host *host = nand_get_controller_data(chip); 1924 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1925 1926 nand_read_page_op(chip, page, 0, NULL, 0); 1927 1928 return brcmnand_read(mtd, chip, host->last_addr, 1929 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1930 } 1931 1932 static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, 1933 int oob_required, int page) 1934 { 1935 struct brcmnand_host *host = nand_get_controller_data(chip); 1936 struct mtd_info *mtd = nand_to_mtd(chip); 1937 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 1938 int ret; 1939 1940 nand_read_page_op(chip, page, 0, NULL, 0); 1941 1942 brcmnand_set_ecc_enabled(host, 0); 1943 ret = brcmnand_read(mtd, chip, host->last_addr, 1944 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); 1945 brcmnand_set_ecc_enabled(host, 1); 1946 return ret; 1947 } 1948 1949 static int brcmnand_read_oob(struct nand_chip *chip, int page) 1950 { 1951 struct mtd_info *mtd = nand_to_mtd(chip); 1952 1953 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1954 mtd->writesize >> FC_SHIFT, 1955 NULL, (u8 *)chip->oob_poi); 1956 } 1957 1958 static int brcmnand_read_oob_raw(struct nand_chip *chip, int page) 1959 { 1960 struct mtd_info *mtd = nand_to_mtd(chip); 1961 struct brcmnand_host *host = nand_get_controller_data(chip); 1962 1963 brcmnand_set_ecc_enabled(host, 0); 1964 brcmnand_read(mtd, chip, (u64)page << chip->page_shift, 1965 mtd->writesize >> FC_SHIFT, 1966 NULL, (u8 *)chip->oob_poi); 1967 brcmnand_set_ecc_enabled(host, 1); 1968 return 0; 1969 } 1970 1971 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip, 1972 u64 addr, const u32 *buf, u8 *oob) 1973 { 1974 struct brcmnand_host *host = nand_get_controller_data(chip); 1975 struct brcmnand_controller *ctrl = host->ctrl; 1976 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT; 1977 int status, ret = 0; 1978 1979 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf); 1980 1981 if (unlikely((unsigned long)buf & 0x03)) { 1982 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf); 1983 buf = (u32 *)((unsigned long)buf & ~0x03); 1984 } 1985 1986 brcmnand_wp(mtd, 0); 1987 1988 for (i = 0; i < ctrl->max_oob; i += 4) 1989 oob_reg_write(ctrl, i, 0xffffffff); 1990 1991 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1992 if (brcmnand_dma_trans(host, addr, (u32 *)buf, 1993 mtd->writesize, CMD_PROGRAM_PAGE)) 1994 ret = -EIO; 1995 goto out; 1996 } 1997 1998 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1999 /* full address MUST be set before populating FC */ 2000 brcmnand_set_cmd_addr(mtd, addr); 2001 2002 if (buf) { 2003 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 2004 2005 for (j = 0; j < FC_WORDS; j++, buf++) 2006 brcmnand_write_fc(ctrl, j, *buf); 2007 2008 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); 2009 } else if (oob) { 2010 for (j = 0; j < FC_WORDS; j++) 2011 brcmnand_write_fc(ctrl, j, 0xffffffff); 2012 } 2013 2014 if (oob) { 2015 oob += write_oob_to_regs(ctrl, i, oob, 2016 mtd->oobsize / trans, 2017 host->hwcfg.sector_size_1k); 2018 } 2019 2020 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */ 2021 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE); 2022 status = brcmnand_waitfunc(chip); 2023 2024 if (status & NAND_STATUS_FAIL) { 2025 dev_info(ctrl->dev, "program failed at %llx\n", 2026 (unsigned long long)addr); 2027 ret = -EIO; 2028 goto out; 2029 } 2030 } 2031 out: 2032 brcmnand_wp(mtd, 1); 2033 return ret; 2034 } 2035 2036 static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, 2037 int oob_required, int page) 2038 { 2039 struct mtd_info *mtd = nand_to_mtd(chip); 2040 struct brcmnand_host *host = nand_get_controller_data(chip); 2041 void *oob = oob_required ? chip->oob_poi : NULL; 2042 2043 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2044 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2045 2046 return nand_prog_page_end_op(chip); 2047 } 2048 2049 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 2050 int oob_required, int page) 2051 { 2052 struct mtd_info *mtd = nand_to_mtd(chip); 2053 struct brcmnand_host *host = nand_get_controller_data(chip); 2054 void *oob = oob_required ? chip->oob_poi : NULL; 2055 2056 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2057 brcmnand_set_ecc_enabled(host, 0); 2058 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2059 brcmnand_set_ecc_enabled(host, 1); 2060 2061 return nand_prog_page_end_op(chip); 2062 } 2063 2064 static int brcmnand_write_oob(struct nand_chip *chip, int page) 2065 { 2066 return brcmnand_write(nand_to_mtd(chip), chip, 2067 (u64)page << chip->page_shift, NULL, 2068 chip->oob_poi); 2069 } 2070 2071 static int brcmnand_write_oob_raw(struct nand_chip *chip, int page) 2072 { 2073 struct mtd_info *mtd = nand_to_mtd(chip); 2074 struct brcmnand_host *host = nand_get_controller_data(chip); 2075 int ret; 2076 2077 brcmnand_set_ecc_enabled(host, 0); 2078 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL, 2079 (u8 *)chip->oob_poi); 2080 brcmnand_set_ecc_enabled(host, 1); 2081 2082 return ret; 2083 } 2084 2085 /*********************************************************************** 2086 * Per-CS setup (1 NAND device) 2087 ***********************************************************************/ 2088 2089 static int brcmnand_set_cfg(struct brcmnand_host *host, 2090 struct brcmnand_cfg *cfg) 2091 { 2092 struct brcmnand_controller *ctrl = host->ctrl; 2093 struct nand_chip *chip = &host->chip; 2094 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2095 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 2096 BRCMNAND_CS_CFG_EXT); 2097 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 2098 BRCMNAND_CS_ACC_CONTROL); 2099 u8 block_size = 0, page_size = 0, device_size = 0; 2100 u32 tmp; 2101 2102 if (ctrl->block_sizes) { 2103 int i, found; 2104 2105 for (i = 0, found = 0; ctrl->block_sizes[i]; i++) 2106 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) { 2107 block_size = i; 2108 found = 1; 2109 } 2110 if (!found) { 2111 dev_warn(ctrl->dev, "invalid block size %u\n", 2112 cfg->block_size); 2113 return -EINVAL; 2114 } 2115 } else { 2116 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE); 2117 } 2118 2119 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size && 2120 cfg->block_size > ctrl->max_block_size)) { 2121 dev_warn(ctrl->dev, "invalid block size %u\n", 2122 cfg->block_size); 2123 block_size = 0; 2124 } 2125 2126 if (ctrl->page_sizes) { 2127 int i, found; 2128 2129 for (i = 0, found = 0; ctrl->page_sizes[i]; i++) 2130 if (ctrl->page_sizes[i] == cfg->page_size) { 2131 page_size = i; 2132 found = 1; 2133 } 2134 if (!found) { 2135 dev_warn(ctrl->dev, "invalid page size %u\n", 2136 cfg->page_size); 2137 return -EINVAL; 2138 } 2139 } else { 2140 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE); 2141 } 2142 2143 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size && 2144 cfg->page_size > ctrl->max_page_size)) { 2145 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size); 2146 return -EINVAL; 2147 } 2148 2149 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) { 2150 dev_warn(ctrl->dev, "invalid device size 0x%llx\n", 2151 (unsigned long long)cfg->device_size); 2152 return -EINVAL; 2153 } 2154 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE); 2155 2156 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) | 2157 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) | 2158 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) | 2159 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | 2160 (device_size << CFG_DEVICE_SIZE_SHIFT); 2161 if (cfg_offs == cfg_ext_offs) { 2162 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) | 2163 (block_size << CFG_BLK_SIZE_SHIFT); 2164 nand_writereg(ctrl, cfg_offs, tmp); 2165 } else { 2166 nand_writereg(ctrl, cfg_offs, tmp); 2167 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) | 2168 (block_size << CFG_EXT_BLK_SIZE_SHIFT); 2169 nand_writereg(ctrl, cfg_ext_offs, tmp); 2170 } 2171 2172 tmp = nand_readreg(ctrl, acc_control_offs); 2173 tmp &= ~brcmnand_ecc_level_mask(ctrl); 2174 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; 2175 tmp &= ~brcmnand_spare_area_mask(ctrl); 2176 tmp |= cfg->spare_area_size; 2177 nand_writereg(ctrl, acc_control_offs, tmp); 2178 2179 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); 2180 2181 /* threshold = ceil(BCH-level * 0.75) */ 2182 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4)); 2183 2184 return 0; 2185 } 2186 2187 static void brcmnand_print_cfg(struct brcmnand_host *host, 2188 char *buf, struct brcmnand_cfg *cfg) 2189 { 2190 buf += sprintf(buf, 2191 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", 2192 (unsigned long long)cfg->device_size >> 20, 2193 cfg->block_size >> 10, 2194 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size, 2195 cfg->page_size >= 1024 ? "KiB" : "B", 2196 cfg->spare_area_size, cfg->device_width); 2197 2198 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ 2199 if (is_hamming_ecc(host->ctrl, cfg)) 2200 sprintf(buf, ", Hamming ECC"); 2201 else if (cfg->sector_size_1k) 2202 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); 2203 else 2204 sprintf(buf, ", BCH-%u", cfg->ecc_level); 2205 } 2206 2207 /* 2208 * Minimum number of bytes to address a page. Calculated as: 2209 * roundup(log2(size / page-size) / 8) 2210 * 2211 * NB: the following does not "round up" for non-power-of-2 'size'; but this is 2212 * OK because many other things will break if 'size' is irregular... 2213 */ 2214 static inline int get_blk_adr_bytes(u64 size, u32 writesize) 2215 { 2216 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3; 2217 } 2218 2219 static int brcmnand_setup_dev(struct brcmnand_host *host) 2220 { 2221 struct mtd_info *mtd = nand_to_mtd(&host->chip); 2222 struct nand_chip *chip = &host->chip; 2223 struct brcmnand_controller *ctrl = host->ctrl; 2224 struct brcmnand_cfg *cfg = &host->hwcfg; 2225 char msg[128]; 2226 u32 offs, tmp, oob_sector; 2227 int ret; 2228 2229 memset(cfg, 0, sizeof(*cfg)); 2230 2231 ret = of_property_read_u32(nand_get_flash_node(chip), 2232 "brcm,nand-oob-sector-size", 2233 &oob_sector); 2234 if (ret) { 2235 /* Use detected size */ 2236 cfg->spare_area_size = mtd->oobsize / 2237 (mtd->writesize >> FC_SHIFT); 2238 } else { 2239 cfg->spare_area_size = oob_sector; 2240 } 2241 if (cfg->spare_area_size > ctrl->max_oob) 2242 cfg->spare_area_size = ctrl->max_oob; 2243 /* 2244 * Set oobsize to be consistent with controller's spare_area_size, as 2245 * the rest is inaccessible. 2246 */ 2247 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT); 2248 2249 cfg->device_size = mtd->size; 2250 cfg->block_size = mtd->erasesize; 2251 cfg->page_size = mtd->writesize; 2252 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8; 2253 cfg->col_adr_bytes = 2; 2254 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); 2255 2256 if (chip->ecc.mode != NAND_ECC_HW) { 2257 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", 2258 chip->ecc.mode); 2259 return -EINVAL; 2260 } 2261 2262 if (chip->ecc.algo == NAND_ECC_UNKNOWN) { 2263 if (chip->ecc.strength == 1 && chip->ecc.size == 512) 2264 /* Default to Hamming for 1-bit ECC, if unspecified */ 2265 chip->ecc.algo = NAND_ECC_HAMMING; 2266 else 2267 /* Otherwise, BCH */ 2268 chip->ecc.algo = NAND_ECC_BCH; 2269 } 2270 2271 if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 || 2272 chip->ecc.size != 512)) { 2273 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", 2274 chip->ecc.strength, chip->ecc.size); 2275 return -EINVAL; 2276 } 2277 2278 if (chip->ecc.mode != NAND_ECC_NONE && 2279 (!chip->ecc.size || !chip->ecc.strength)) { 2280 if (chip->base.eccreq.step_size && chip->base.eccreq.strength) { 2281 /* use detected ECC parameters */ 2282 chip->ecc.size = chip->base.eccreq.step_size; 2283 chip->ecc.strength = chip->base.eccreq.strength; 2284 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n", 2285 chip->ecc.size, chip->ecc.strength); 2286 } 2287 } 2288 2289 switch (chip->ecc.size) { 2290 case 512: 2291 if (chip->ecc.algo == NAND_ECC_HAMMING) 2292 cfg->ecc_level = 15; 2293 else 2294 cfg->ecc_level = chip->ecc.strength; 2295 cfg->sector_size_1k = 0; 2296 break; 2297 case 1024: 2298 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) { 2299 dev_err(ctrl->dev, "1KB sectors not supported\n"); 2300 return -EINVAL; 2301 } 2302 if (chip->ecc.strength & 0x1) { 2303 dev_err(ctrl->dev, 2304 "odd ECC not supported with 1KB sectors\n"); 2305 return -EINVAL; 2306 } 2307 2308 cfg->ecc_level = chip->ecc.strength >> 1; 2309 cfg->sector_size_1k = 1; 2310 break; 2311 default: 2312 dev_err(ctrl->dev, "unsupported ECC size: %d\n", 2313 chip->ecc.size); 2314 return -EINVAL; 2315 } 2316 2317 cfg->ful_adr_bytes = cfg->blk_adr_bytes; 2318 if (mtd->writesize > 512) 2319 cfg->ful_adr_bytes += cfg->col_adr_bytes; 2320 else 2321 cfg->ful_adr_bytes += 1; 2322 2323 ret = brcmnand_set_cfg(host, cfg); 2324 if (ret) 2325 return ret; 2326 2327 brcmnand_set_ecc_enabled(host, 1); 2328 2329 brcmnand_print_cfg(host, msg, cfg); 2330 dev_info(ctrl->dev, "detected %s\n", msg); 2331 2332 /* Configure ACC_CONTROL */ 2333 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); 2334 tmp = nand_readreg(ctrl, offs); 2335 tmp &= ~ACC_CONTROL_PARTIAL_PAGE; 2336 tmp &= ~ACC_CONTROL_RD_ERASED; 2337 2338 /* We need to turn on Read from erased paged protected by ECC */ 2339 if (ctrl->nand_version >= 0x0702) 2340 tmp |= ACC_CONTROL_RD_ERASED; 2341 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 2342 if (ctrl->features & BRCMNAND_HAS_PREFETCH) 2343 tmp &= ~ACC_CONTROL_PREFETCH; 2344 2345 nand_writereg(ctrl, offs, tmp); 2346 2347 return 0; 2348 } 2349 2350 static int brcmnand_attach_chip(struct nand_chip *chip) 2351 { 2352 struct mtd_info *mtd = nand_to_mtd(chip); 2353 struct brcmnand_host *host = nand_get_controller_data(chip); 2354 int ret; 2355 2356 chip->options |= NAND_NO_SUBPAGE_WRITE; 2357 /* 2358 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA 2359 * to/from, and have nand_base pass us a bounce buffer instead, as 2360 * needed. 2361 */ 2362 chip->options |= NAND_USE_BOUNCE_BUFFER; 2363 2364 if (chip->bbt_options & NAND_BBT_USE_FLASH) 2365 chip->bbt_options |= NAND_BBT_NO_OOB; 2366 2367 if (brcmnand_setup_dev(host)) 2368 return -ENXIO; 2369 2370 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512; 2371 2372 /* only use our internal HW threshold */ 2373 mtd->bitflip_threshold = 1; 2374 2375 ret = brcmstb_choose_ecc_layout(host); 2376 2377 return ret; 2378 } 2379 2380 static const struct nand_controller_ops brcmnand_controller_ops = { 2381 .attach_chip = brcmnand_attach_chip, 2382 }; 2383 2384 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn) 2385 { 2386 struct brcmnand_controller *ctrl = host->ctrl; 2387 struct platform_device *pdev = host->pdev; 2388 struct mtd_info *mtd; 2389 struct nand_chip *chip; 2390 int ret; 2391 u16 cfg_offs; 2392 2393 ret = of_property_read_u32(dn, "reg", &host->cs); 2394 if (ret) { 2395 dev_err(&pdev->dev, "can't get chip-select\n"); 2396 return -ENXIO; 2397 } 2398 2399 mtd = nand_to_mtd(&host->chip); 2400 chip = &host->chip; 2401 2402 nand_set_flash_node(chip, dn); 2403 nand_set_controller_data(chip, host); 2404 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d", 2405 host->cs); 2406 if (!mtd->name) 2407 return -ENOMEM; 2408 2409 mtd->owner = THIS_MODULE; 2410 mtd->dev.parent = &pdev->dev; 2411 2412 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; 2413 chip->legacy.cmdfunc = brcmnand_cmdfunc; 2414 chip->legacy.waitfunc = brcmnand_waitfunc; 2415 chip->legacy.read_byte = brcmnand_read_byte; 2416 chip->legacy.read_buf = brcmnand_read_buf; 2417 chip->legacy.write_buf = brcmnand_write_buf; 2418 2419 chip->ecc.mode = NAND_ECC_HW; 2420 chip->ecc.read_page = brcmnand_read_page; 2421 chip->ecc.write_page = brcmnand_write_page; 2422 chip->ecc.read_page_raw = brcmnand_read_page_raw; 2423 chip->ecc.write_page_raw = brcmnand_write_page_raw; 2424 chip->ecc.write_oob_raw = brcmnand_write_oob_raw; 2425 chip->ecc.read_oob_raw = brcmnand_read_oob_raw; 2426 chip->ecc.read_oob = brcmnand_read_oob; 2427 chip->ecc.write_oob = brcmnand_write_oob; 2428 2429 chip->controller = &ctrl->controller; 2430 2431 /* 2432 * The bootloader might have configured 16bit mode but 2433 * NAND READID command only works in 8bit mode. We force 2434 * 8bit mode here to ensure that NAND READID commands works. 2435 */ 2436 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2437 nand_writereg(ctrl, cfg_offs, 2438 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH); 2439 2440 ret = nand_scan(chip, 1); 2441 if (ret) 2442 return ret; 2443 2444 ret = mtd_device_register(mtd, NULL, 0); 2445 if (ret) 2446 nand_cleanup(chip); 2447 2448 return ret; 2449 } 2450 2451 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host, 2452 int restore) 2453 { 2454 struct brcmnand_controller *ctrl = host->ctrl; 2455 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); 2456 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, 2457 BRCMNAND_CS_CFG_EXT); 2458 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 2459 BRCMNAND_CS_ACC_CONTROL); 2460 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); 2461 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); 2462 2463 if (restore) { 2464 nand_writereg(ctrl, cfg_offs, host->hwcfg.config); 2465 if (cfg_offs != cfg_ext_offs) 2466 nand_writereg(ctrl, cfg_ext_offs, 2467 host->hwcfg.config_ext); 2468 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control); 2469 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1); 2470 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2); 2471 } else { 2472 host->hwcfg.config = nand_readreg(ctrl, cfg_offs); 2473 if (cfg_offs != cfg_ext_offs) 2474 host->hwcfg.config_ext = 2475 nand_readreg(ctrl, cfg_ext_offs); 2476 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs); 2477 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs); 2478 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs); 2479 } 2480 } 2481 2482 static int brcmnand_suspend(struct device *dev) 2483 { 2484 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2485 struct brcmnand_host *host; 2486 2487 list_for_each_entry(host, &ctrl->host_list, node) 2488 brcmnand_save_restore_cs_config(host, 0); 2489 2490 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT); 2491 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR); 2492 ctrl->corr_stat_threshold = 2493 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD); 2494 2495 if (has_flash_dma(ctrl)) 2496 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE); 2497 2498 return 0; 2499 } 2500 2501 static int brcmnand_resume(struct device *dev) 2502 { 2503 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); 2504 struct brcmnand_host *host; 2505 2506 if (has_flash_dma(ctrl)) { 2507 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode); 2508 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2509 } 2510 2511 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select); 2512 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor); 2513 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD, 2514 ctrl->corr_stat_threshold); 2515 if (ctrl->soc) { 2516 /* Clear/re-enable interrupt */ 2517 ctrl->soc->ctlrdy_ack(ctrl->soc); 2518 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2519 } 2520 2521 list_for_each_entry(host, &ctrl->host_list, node) { 2522 struct nand_chip *chip = &host->chip; 2523 2524 brcmnand_save_restore_cs_config(host, 1); 2525 2526 /* Reset the chip, required by some chips after power-up */ 2527 nand_reset_op(chip); 2528 } 2529 2530 return 0; 2531 } 2532 2533 const struct dev_pm_ops brcmnand_pm_ops = { 2534 .suspend = brcmnand_suspend, 2535 .resume = brcmnand_resume, 2536 }; 2537 EXPORT_SYMBOL_GPL(brcmnand_pm_ops); 2538 2539 static const struct of_device_id brcmnand_of_match[] = { 2540 { .compatible = "brcm,brcmnand-v4.0" }, 2541 { .compatible = "brcm,brcmnand-v5.0" }, 2542 { .compatible = "brcm,brcmnand-v6.0" }, 2543 { .compatible = "brcm,brcmnand-v6.1" }, 2544 { .compatible = "brcm,brcmnand-v6.2" }, 2545 { .compatible = "brcm,brcmnand-v7.0" }, 2546 { .compatible = "brcm,brcmnand-v7.1" }, 2547 { .compatible = "brcm,brcmnand-v7.2" }, 2548 { .compatible = "brcm,brcmnand-v7.3" }, 2549 {}, 2550 }; 2551 MODULE_DEVICE_TABLE(of, brcmnand_of_match); 2552 2553 /*********************************************************************** 2554 * Platform driver setup (per controller) 2555 ***********************************************************************/ 2556 2557 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) 2558 { 2559 struct device *dev = &pdev->dev; 2560 struct device_node *dn = dev->of_node, *child; 2561 struct brcmnand_controller *ctrl; 2562 struct resource *res; 2563 int ret; 2564 2565 /* We only support device-tree instantiation */ 2566 if (!dn) 2567 return -ENODEV; 2568 2569 if (!of_match_node(brcmnand_of_match, dn)) 2570 return -ENODEV; 2571 2572 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 2573 if (!ctrl) 2574 return -ENOMEM; 2575 2576 dev_set_drvdata(dev, ctrl); 2577 ctrl->dev = dev; 2578 2579 init_completion(&ctrl->done); 2580 init_completion(&ctrl->dma_done); 2581 nand_controller_init(&ctrl->controller); 2582 ctrl->controller.ops = &brcmnand_controller_ops; 2583 INIT_LIST_HEAD(&ctrl->host_list); 2584 2585 /* NAND register range */ 2586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2587 ctrl->nand_base = devm_ioremap_resource(dev, res); 2588 if (IS_ERR(ctrl->nand_base)) 2589 return PTR_ERR(ctrl->nand_base); 2590 2591 /* Enable clock before using NAND registers */ 2592 ctrl->clk = devm_clk_get(dev, "nand"); 2593 if (!IS_ERR(ctrl->clk)) { 2594 ret = clk_prepare_enable(ctrl->clk); 2595 if (ret) 2596 return ret; 2597 } else { 2598 ret = PTR_ERR(ctrl->clk); 2599 if (ret == -EPROBE_DEFER) 2600 return ret; 2601 2602 ctrl->clk = NULL; 2603 } 2604 2605 /* Initialize NAND revision */ 2606 ret = brcmnand_revision_init(ctrl); 2607 if (ret) 2608 goto err; 2609 2610 /* 2611 * Most chips have this cache at a fixed offset within 'nand' block. 2612 * Some must specify this region separately. 2613 */ 2614 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache"); 2615 if (res) { 2616 ctrl->nand_fc = devm_ioremap_resource(dev, res); 2617 if (IS_ERR(ctrl->nand_fc)) { 2618 ret = PTR_ERR(ctrl->nand_fc); 2619 goto err; 2620 } 2621 } else { 2622 ctrl->nand_fc = ctrl->nand_base + 2623 ctrl->reg_offsets[BRCMNAND_FC_BASE]; 2624 } 2625 2626 /* FLASH_DMA */ 2627 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma"); 2628 if (res) { 2629 ctrl->flash_dma_base = devm_ioremap_resource(dev, res); 2630 if (IS_ERR(ctrl->flash_dma_base)) { 2631 ret = PTR_ERR(ctrl->flash_dma_base); 2632 goto err; 2633 } 2634 2635 /* initialize the dma version */ 2636 brcmnand_flash_dma_revision_init(ctrl); 2637 2638 ret = -EIO; 2639 if (ctrl->nand_version >= 0x0700) 2640 ret = dma_set_mask_and_coherent(&pdev->dev, 2641 DMA_BIT_MASK(40)); 2642 if (ret) 2643 ret = dma_set_mask_and_coherent(&pdev->dev, 2644 DMA_BIT_MASK(32)); 2645 if (ret) 2646 goto err; 2647 2648 /* linked-list and stop on error */ 2649 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK); 2650 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2651 2652 /* Allocate descriptor(s) */ 2653 ctrl->dma_desc = dmam_alloc_coherent(dev, 2654 sizeof(*ctrl->dma_desc), 2655 &ctrl->dma_pa, GFP_KERNEL); 2656 if (!ctrl->dma_desc) { 2657 ret = -ENOMEM; 2658 goto err; 2659 } 2660 2661 ctrl->dma_irq = platform_get_irq(pdev, 1); 2662 if ((int)ctrl->dma_irq < 0) { 2663 dev_err(dev, "missing FLASH_DMA IRQ\n"); 2664 ret = -ENODEV; 2665 goto err; 2666 } 2667 2668 ret = devm_request_irq(dev, ctrl->dma_irq, 2669 brcmnand_dma_irq, 0, DRV_NAME, 2670 ctrl); 2671 if (ret < 0) { 2672 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2673 ctrl->dma_irq, ret); 2674 goto err; 2675 } 2676 2677 dev_info(dev, "enabling FLASH_DMA\n"); 2678 } 2679 2680 /* Disable automatic device ID config, direct addressing */ 2681 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, 2682 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0); 2683 /* Disable XOR addressing */ 2684 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0); 2685 2686 if (ctrl->features & BRCMNAND_HAS_WP) { 2687 /* Permanently disable write protection */ 2688 if (wp_on == 2) 2689 brcmnand_set_wp(ctrl, false); 2690 } else { 2691 wp_on = 0; 2692 } 2693 2694 /* IRQ */ 2695 ctrl->irq = platform_get_irq(pdev, 0); 2696 if ((int)ctrl->irq < 0) { 2697 dev_err(dev, "no IRQ defined\n"); 2698 ret = -ENODEV; 2699 goto err; 2700 } 2701 2702 /* 2703 * Some SoCs integrate this controller (e.g., its interrupt bits) in 2704 * interesting ways 2705 */ 2706 if (soc) { 2707 ctrl->soc = soc; 2708 2709 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0, 2710 DRV_NAME, ctrl); 2711 2712 /* Enable interrupt */ 2713 ctrl->soc->ctlrdy_ack(ctrl->soc); 2714 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); 2715 } else { 2716 /* Use standard interrupt infrastructure */ 2717 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0, 2718 DRV_NAME, ctrl); 2719 } 2720 if (ret < 0) { 2721 dev_err(dev, "can't allocate IRQ %d: error %d\n", 2722 ctrl->irq, ret); 2723 goto err; 2724 } 2725 2726 for_each_available_child_of_node(dn, child) { 2727 if (of_device_is_compatible(child, "brcm,nandcs")) { 2728 struct brcmnand_host *host; 2729 2730 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2731 if (!host) { 2732 of_node_put(child); 2733 ret = -ENOMEM; 2734 goto err; 2735 } 2736 host->pdev = pdev; 2737 host->ctrl = ctrl; 2738 2739 ret = brcmnand_init_cs(host, child); 2740 if (ret) { 2741 devm_kfree(dev, host); 2742 continue; /* Try all chip-selects */ 2743 } 2744 2745 list_add_tail(&host->node, &ctrl->host_list); 2746 } 2747 } 2748 2749 /* No chip-selects could initialize properly */ 2750 if (list_empty(&ctrl->host_list)) { 2751 ret = -ENODEV; 2752 goto err; 2753 } 2754 2755 return 0; 2756 2757 err: 2758 clk_disable_unprepare(ctrl->clk); 2759 return ret; 2760 2761 } 2762 EXPORT_SYMBOL_GPL(brcmnand_probe); 2763 2764 int brcmnand_remove(struct platform_device *pdev) 2765 { 2766 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); 2767 struct brcmnand_host *host; 2768 2769 list_for_each_entry(host, &ctrl->host_list, node) 2770 nand_release(&host->chip); 2771 2772 clk_disable_unprepare(ctrl->clk); 2773 2774 dev_set_drvdata(&pdev->dev, NULL); 2775 2776 return 0; 2777 } 2778 EXPORT_SYMBOL_GPL(brcmnand_remove); 2779 2780 MODULE_LICENSE("GPL v2"); 2781 MODULE_AUTHOR("Kevin Cernekee"); 2782 MODULE_AUTHOR("Brian Norris"); 2783 MODULE_DESCRIPTION("NAND driver for Broadcom chips"); 2784 MODULE_ALIAS("platform:brcmnand"); 2785