1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Mediatek SPI NOR controller driver 4 // 5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 7 #include <linux/bits.h> 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/spi/spi.h> 19 #include <linux/spi/spi-mem.h> 20 #include <linux/string.h> 21 22 #define DRIVER_NAME "mtk-spi-nor" 23 24 #define MTK_NOR_REG_CMD 0x00 25 #define MTK_NOR_CMD_WRITE BIT(4) 26 #define MTK_NOR_CMD_PROGRAM BIT(2) 27 #define MTK_NOR_CMD_READ BIT(0) 28 #define MTK_NOR_CMD_MASK GENMASK(5, 0) 29 30 #define MTK_NOR_REG_PRG_CNT 0x04 31 #define MTK_NOR_PRG_CNT_MAX 56 32 #define MTK_NOR_REG_RDATA 0x0c 33 34 #define MTK_NOR_REG_RADR0 0x10 35 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 36 #define MTK_NOR_REG_RADR3 0xc8 37 38 #define MTK_NOR_REG_WDATA 0x1c 39 40 #define MTK_NOR_REG_PRGDATA0 0x20 41 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 42 #define MTK_NOR_REG_PRGDATA_MAX 5 43 44 #define MTK_NOR_REG_SHIFT0 0x38 45 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 46 #define MTK_NOR_REG_SHIFT_MAX 9 47 48 #define MTK_NOR_REG_CFG1 0x60 49 #define MTK_NOR_FAST_READ BIT(0) 50 51 #define MTK_NOR_REG_CFG2 0x64 52 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 53 #define MTK_NOR_WR_BUF_EN BIT(0) 54 55 #define MTK_NOR_REG_PP_DATA 0x98 56 57 #define MTK_NOR_REG_IRQ_STAT 0xa8 58 #define MTK_NOR_REG_IRQ_EN 0xac 59 #define MTK_NOR_IRQ_DMA BIT(7) 60 #define MTK_NOR_IRQ_MASK GENMASK(7, 0) 61 62 #define MTK_NOR_REG_CFG3 0xb4 63 #define MTK_NOR_DISABLE_WREN BIT(7) 64 #define MTK_NOR_DISABLE_SR_POLL BIT(5) 65 66 #define MTK_NOR_REG_WP 0xc4 67 #define MTK_NOR_ENABLE_SF_CMD 0x30 68 69 #define MTK_NOR_REG_BUSCFG 0xcc 70 #define MTK_NOR_4B_ADDR BIT(4) 71 #define MTK_NOR_QUAD_ADDR BIT(3) 72 #define MTK_NOR_QUAD_READ BIT(2) 73 #define MTK_NOR_DUAL_ADDR BIT(1) 74 #define MTK_NOR_DUAL_READ BIT(0) 75 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 76 77 #define MTK_NOR_REG_DMA_CTL 0x718 78 #define MTK_NOR_DMA_START BIT(0) 79 80 #define MTK_NOR_REG_DMA_FADR 0x71c 81 #define MTK_NOR_REG_DMA_DADR 0x720 82 #define MTK_NOR_REG_DMA_END_DADR 0x724 83 #define MTK_NOR_REG_CG_DIS 0x728 84 #define MTK_NOR_SFC_SW_RST BIT(2) 85 86 #define MTK_NOR_REG_DMA_DADR_HB 0x738 87 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c 88 89 #define MTK_NOR_PRG_MAX_SIZE 6 90 // Reading DMA src/dst addresses have to be 16-byte aligned 91 #define MTK_NOR_DMA_ALIGN 16 92 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 93 // and we allocate a bounce buffer if destination address isn't aligned. 94 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 95 96 // Buffered page program can do one 128-byte transfer 97 #define MTK_NOR_PP_SIZE 128 98 99 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000) 100 101 struct mtk_nor_caps { 102 u8 dma_bits; 103 104 /* extra_dummy_bit is adding for the IP of new SoCs. 105 * Some new SoCs modify the timing of fetching registers' values 106 * and IDs of nor flash, they need a extra_dummy_bit which can add 107 * more clock cycles for fetching data. 108 */ 109 u8 extra_dummy_bit; 110 }; 111 112 struct mtk_nor { 113 struct spi_controller *ctlr; 114 struct device *dev; 115 void __iomem *base; 116 u8 *buffer; 117 dma_addr_t buffer_dma; 118 struct clk *spi_clk; 119 struct clk *ctlr_clk; 120 struct clk *axi_clk; 121 struct clk *axi_s_clk; 122 unsigned int spi_freq; 123 bool wbuf_en; 124 bool has_irq; 125 bool high_dma; 126 struct completion op_done; 127 const struct mtk_nor_caps *caps; 128 }; 129 130 static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 131 { 132 u32 val = readl(sp->base + reg); 133 134 val &= ~clr; 135 val |= set; 136 writel(val, sp->base + reg); 137 } 138 139 static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 140 { 141 ulong delay = CLK_TO_US(sp, clk); 142 u32 reg; 143 int ret; 144 145 writel(cmd, sp->base + MTK_NOR_REG_CMD); 146 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 147 delay / 3, (delay + 1) * 200); 148 if (ret < 0) 149 dev_err(sp->dev, "command %u timeout.\n", cmd); 150 return ret; 151 } 152 153 static void mtk_nor_reset(struct mtk_nor *sp) 154 { 155 mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, 0, MTK_NOR_SFC_SW_RST); 156 mb(); /* flush previous writes */ 157 mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, MTK_NOR_SFC_SW_RST, 0); 158 mb(); /* flush previous writes */ 159 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 160 } 161 162 static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 163 { 164 u32 addr = op->addr.val; 165 int i; 166 167 for (i = 0; i < 3; i++) { 168 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 169 addr >>= 8; 170 } 171 if (op->addr.nbytes == 4) { 172 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 173 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 174 } else { 175 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 176 } 177 } 178 179 static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 180 { 181 return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK); 182 } 183 184 static bool mtk_nor_match_read(const struct spi_mem_op *op) 185 { 186 int dummy = 0; 187 188 if (op->dummy.nbytes) 189 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 190 191 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 192 if (op->addr.buswidth == 1) 193 return dummy == 8; 194 else if (op->addr.buswidth == 2) 195 return dummy == 4; 196 else if (op->addr.buswidth == 4) 197 return dummy == 6; 198 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 199 if (op->cmd.opcode == 0x03) 200 return dummy == 0; 201 else if (op->cmd.opcode == 0x0b) 202 return dummy == 8; 203 } 204 return false; 205 } 206 207 static bool mtk_nor_match_prg(const struct spi_mem_op *op) 208 { 209 int tx_len, rx_len, prg_len, prg_left; 210 211 // prg mode is spi-only. 212 if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) || 213 (op->dummy.buswidth > 1) || (op->data.buswidth > 1)) 214 return false; 215 216 tx_len = op->cmd.nbytes + op->addr.nbytes; 217 218 if (op->data.dir == SPI_MEM_DATA_OUT) { 219 // count dummy bytes only if we need to write data after it 220 tx_len += op->dummy.nbytes; 221 222 // leave at least one byte for data 223 if (tx_len > MTK_NOR_REG_PRGDATA_MAX) 224 return false; 225 226 // if there's no addr, meaning adjust_op_size is impossible, 227 // check data length as well. 228 if ((!op->addr.nbytes) && 229 (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1)) 230 return false; 231 } else if (op->data.dir == SPI_MEM_DATA_IN) { 232 if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) 233 return false; 234 235 rx_len = op->data.nbytes; 236 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 237 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 238 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 239 if (rx_len > prg_left) { 240 if (!op->addr.nbytes) 241 return false; 242 rx_len = prg_left; 243 } 244 245 prg_len = tx_len + op->dummy.nbytes + rx_len; 246 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 247 return false; 248 } else { 249 prg_len = tx_len + op->dummy.nbytes; 250 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 251 return false; 252 } 253 return true; 254 } 255 256 static void mtk_nor_adj_prg_size(struct spi_mem_op *op) 257 { 258 int tx_len, tx_left, prg_left; 259 260 tx_len = op->cmd.nbytes + op->addr.nbytes; 261 if (op->data.dir == SPI_MEM_DATA_OUT) { 262 tx_len += op->dummy.nbytes; 263 tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len; 264 if (op->data.nbytes > tx_left) 265 op->data.nbytes = tx_left; 266 } else if (op->data.dir == SPI_MEM_DATA_IN) { 267 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 268 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 269 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 270 if (op->data.nbytes > prg_left) 271 op->data.nbytes = prg_left; 272 } 273 } 274 275 static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 276 { 277 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 278 279 if (!op->data.nbytes) 280 return 0; 281 282 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 283 if ((op->data.dir == SPI_MEM_DATA_IN) && 284 mtk_nor_match_read(op)) { 285 // limit size to prevent timeout calculation overflow 286 if (op->data.nbytes > 0x400000) 287 op->data.nbytes = 0x400000; 288 289 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 290 (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 291 op->data.nbytes = 1; 292 else if (!need_bounce(sp, op)) 293 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 294 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 295 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 296 return 0; 297 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 298 if (op->data.nbytes >= MTK_NOR_PP_SIZE) 299 op->data.nbytes = MTK_NOR_PP_SIZE; 300 else 301 op->data.nbytes = 1; 302 return 0; 303 } 304 } 305 306 mtk_nor_adj_prg_size(op); 307 return 0; 308 } 309 310 static bool mtk_nor_supports_op(struct spi_mem *mem, 311 const struct spi_mem_op *op) 312 { 313 if (!spi_mem_default_supports_op(mem, op)) 314 return false; 315 316 if (op->cmd.buswidth != 1) 317 return false; 318 319 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 320 switch (op->data.dir) { 321 case SPI_MEM_DATA_IN: 322 if (mtk_nor_match_read(op)) 323 return true; 324 break; 325 case SPI_MEM_DATA_OUT: 326 if ((op->addr.buswidth == 1) && 327 (op->dummy.nbytes == 0) && 328 (op->data.buswidth == 1)) 329 return true; 330 break; 331 default: 332 break; 333 } 334 } 335 336 return mtk_nor_match_prg(op); 337 } 338 339 static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 340 { 341 u32 reg = 0; 342 343 if (op->addr.nbytes == 4) 344 reg |= MTK_NOR_4B_ADDR; 345 346 if (op->data.buswidth == 4) { 347 reg |= MTK_NOR_QUAD_READ; 348 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 349 if (op->addr.buswidth == 4) 350 reg |= MTK_NOR_QUAD_ADDR; 351 } else if (op->data.buswidth == 2) { 352 reg |= MTK_NOR_DUAL_READ; 353 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 354 if (op->addr.buswidth == 2) 355 reg |= MTK_NOR_DUAL_ADDR; 356 } else { 357 if (op->cmd.opcode == 0x0b) 358 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 359 else 360 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 361 } 362 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 363 } 364 365 static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length, 366 dma_addr_t dma_addr) 367 { 368 int ret = 0; 369 u32 delay, timeout; 370 u32 reg; 371 372 writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 373 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 374 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 375 376 if (sp->high_dma) { 377 writel(upper_32_bits(dma_addr), 378 sp->base + MTK_NOR_REG_DMA_DADR_HB); 379 writel(upper_32_bits(dma_addr + length), 380 sp->base + MTK_NOR_REG_DMA_END_DADR_HB); 381 } 382 383 if (sp->has_irq) { 384 reinit_completion(&sp->op_done); 385 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 386 } 387 388 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 389 390 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 391 timeout = (delay + 1) * 100; 392 393 if (sp->has_irq) { 394 if (!wait_for_completion_timeout(&sp->op_done, 395 usecs_to_jiffies(max(timeout, 10000U)))) 396 ret = -ETIMEDOUT; 397 } else { 398 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 399 !(reg & MTK_NOR_DMA_START), delay / 3, 400 timeout); 401 } 402 403 if (ret < 0) 404 dev_err(sp->dev, "dma read timeout.\n"); 405 406 return ret; 407 } 408 409 static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 410 { 411 unsigned int rdlen; 412 int ret; 413 414 if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK) 415 rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 416 else 417 rdlen = op->data.nbytes; 418 419 ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma); 420 421 if (!ret) 422 memcpy(op->data.buf.in, sp->buffer, op->data.nbytes); 423 424 return ret; 425 } 426 427 static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op) 428 { 429 int ret; 430 dma_addr_t dma_addr; 431 432 if (need_bounce(sp, op)) 433 return mtk_nor_read_bounce(sp, op); 434 435 dma_addr = dma_map_single(sp->dev, op->data.buf.in, 436 op->data.nbytes, DMA_FROM_DEVICE); 437 438 if (dma_mapping_error(sp->dev, dma_addr)) 439 return -EINVAL; 440 441 ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr); 442 443 dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE); 444 445 return ret; 446 } 447 448 static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 449 { 450 u8 *buf = op->data.buf.in; 451 int ret; 452 453 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 454 if (!ret) 455 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 456 return ret; 457 } 458 459 static int mtk_nor_setup_write_buffer(struct mtk_nor *sp, bool on) 460 { 461 int ret; 462 u32 val; 463 464 if (!(sp->wbuf_en ^ on)) 465 return 0; 466 467 val = readl(sp->base + MTK_NOR_REG_CFG2); 468 if (on) { 469 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 470 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 471 val & MTK_NOR_WR_BUF_EN, 0, 10000); 472 } else { 473 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 474 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 475 !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 476 } 477 478 if (!ret) 479 sp->wbuf_en = on; 480 481 return ret; 482 } 483 484 static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 485 { 486 const u8 *buf = op->data.buf.out; 487 u32 val; 488 int ret, i; 489 490 ret = mtk_nor_setup_write_buffer(sp, true); 491 if (ret < 0) 492 return ret; 493 494 for (i = 0; i < op->data.nbytes; i += 4) { 495 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 496 buf[i]; 497 writel(val, sp->base + MTK_NOR_REG_PP_DATA); 498 } 499 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 500 (op->data.nbytes + 5) * BITS_PER_BYTE); 501 } 502 503 static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 504 const struct spi_mem_op *op) 505 { 506 const u8 *buf = op->data.buf.out; 507 int ret; 508 509 ret = mtk_nor_setup_write_buffer(sp, false); 510 if (ret < 0) 511 return ret; 512 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 513 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 514 } 515 516 static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op) 517 { 518 int rx_len = 0; 519 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 520 int tx_len, prg_len; 521 int i, ret; 522 void __iomem *reg; 523 u8 bufbyte; 524 525 tx_len = op->cmd.nbytes + op->addr.nbytes; 526 527 // count dummy bytes only if we need to write data after it 528 if (op->data.dir == SPI_MEM_DATA_OUT) 529 tx_len += op->dummy.nbytes + op->data.nbytes; 530 else if (op->data.dir == SPI_MEM_DATA_IN) 531 rx_len = op->data.nbytes; 532 533 prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes + 534 op->data.nbytes; 535 536 // an invalid op may reach here if the caller calls exec_op without 537 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that 538 // spi-mem won't try this op again with generic spi transfers. 539 if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) || 540 (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) || 541 (prg_len > MTK_NOR_PRG_CNT_MAX / 8)) 542 return -EINVAL; 543 544 // fill tx data 545 for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) { 546 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 547 bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 548 writeb(bufbyte, reg); 549 } 550 551 for (i = op->addr.nbytes; i > 0; i--, reg_offset--) { 552 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 553 bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 554 writeb(bufbyte, reg); 555 } 556 557 if (op->data.dir == SPI_MEM_DATA_OUT) { 558 for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) { 559 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 560 writeb(0, reg); 561 } 562 563 for (i = 0; i < op->data.nbytes; i++, reg_offset--) { 564 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 565 writeb(((const u8 *)(op->data.buf.out))[i], reg); 566 } 567 } 568 569 for (; reg_offset >= 0; reg_offset--) { 570 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 571 writeb(0, reg); 572 } 573 574 // trigger op 575 if (rx_len) 576 writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit, 577 sp->base + MTK_NOR_REG_PRG_CNT); 578 else 579 writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 580 581 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 582 prg_len * BITS_PER_BYTE); 583 if (ret) 584 return ret; 585 586 // fetch read data 587 reg_offset = 0; 588 if (op->data.dir == SPI_MEM_DATA_IN) { 589 for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) { 590 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 591 ((u8 *)(op->data.buf.in))[i] = readb(reg); 592 } 593 } 594 595 return 0; 596 } 597 598 static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 599 { 600 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 601 int ret; 602 603 if ((op->data.nbytes == 0) || 604 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 605 return mtk_nor_spi_mem_prg(sp, op); 606 607 if (op->data.dir == SPI_MEM_DATA_OUT) { 608 mtk_nor_set_addr(sp, op); 609 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 610 if (op->data.nbytes == MTK_NOR_PP_SIZE) 611 return mtk_nor_pp_buffered(sp, op); 612 return mtk_nor_pp_unbuffered(sp, op); 613 } 614 615 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 616 ret = mtk_nor_setup_write_buffer(sp, false); 617 if (ret < 0) 618 return ret; 619 mtk_nor_setup_bus(sp, op); 620 if (op->data.nbytes == 1) { 621 mtk_nor_set_addr(sp, op); 622 return mtk_nor_read_pio(sp, op); 623 } else { 624 ret = mtk_nor_read_dma(sp, op); 625 if (unlikely(ret)) { 626 /* Handle rare bus glitch */ 627 mtk_nor_reset(sp); 628 mtk_nor_setup_bus(sp, op); 629 return mtk_nor_read_dma(sp, op); 630 } 631 632 return ret; 633 } 634 } 635 636 return mtk_nor_spi_mem_prg(sp, op); 637 } 638 639 static int mtk_nor_setup(struct spi_device *spi) 640 { 641 struct mtk_nor *sp = spi_controller_get_devdata(spi->master); 642 643 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 644 dev_err(&spi->dev, "spi clock should be %u Hz.\n", 645 sp->spi_freq); 646 return -EINVAL; 647 } 648 spi->max_speed_hz = sp->spi_freq; 649 650 return 0; 651 } 652 653 static int mtk_nor_transfer_one_message(struct spi_controller *master, 654 struct spi_message *m) 655 { 656 struct mtk_nor *sp = spi_controller_get_devdata(master); 657 struct spi_transfer *t = NULL; 658 unsigned long trx_len = 0; 659 int stat = 0; 660 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 661 void __iomem *reg; 662 const u8 *txbuf; 663 u8 *rxbuf; 664 int i; 665 666 list_for_each_entry(t, &m->transfers, transfer_list) { 667 txbuf = t->tx_buf; 668 for (i = 0; i < t->len; i++, reg_offset--) { 669 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 670 if (txbuf) 671 writeb(txbuf[i], reg); 672 else 673 writeb(0, reg); 674 } 675 trx_len += t->len; 676 } 677 678 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 679 680 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 681 trx_len * BITS_PER_BYTE); 682 if (stat < 0) 683 goto msg_done; 684 685 reg_offset = trx_len - 1; 686 list_for_each_entry(t, &m->transfers, transfer_list) { 687 rxbuf = t->rx_buf; 688 for (i = 0; i < t->len; i++, reg_offset--) { 689 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 690 if (rxbuf) 691 rxbuf[i] = readb(reg); 692 } 693 } 694 695 m->actual_length = trx_len; 696 msg_done: 697 m->status = stat; 698 spi_finalize_current_message(master); 699 700 return 0; 701 } 702 703 static void mtk_nor_disable_clk(struct mtk_nor *sp) 704 { 705 clk_disable_unprepare(sp->spi_clk); 706 clk_disable_unprepare(sp->ctlr_clk); 707 clk_disable_unprepare(sp->axi_clk); 708 clk_disable_unprepare(sp->axi_s_clk); 709 } 710 711 static int mtk_nor_enable_clk(struct mtk_nor *sp) 712 { 713 int ret; 714 715 ret = clk_prepare_enable(sp->spi_clk); 716 if (ret) 717 return ret; 718 719 ret = clk_prepare_enable(sp->ctlr_clk); 720 if (ret) { 721 clk_disable_unprepare(sp->spi_clk); 722 return ret; 723 } 724 725 ret = clk_prepare_enable(sp->axi_clk); 726 if (ret) { 727 clk_disable_unprepare(sp->spi_clk); 728 clk_disable_unprepare(sp->ctlr_clk); 729 return ret; 730 } 731 732 ret = clk_prepare_enable(sp->axi_s_clk); 733 if (ret) { 734 clk_disable_unprepare(sp->spi_clk); 735 clk_disable_unprepare(sp->ctlr_clk); 736 clk_disable_unprepare(sp->axi_clk); 737 return ret; 738 } 739 740 return 0; 741 } 742 743 static void mtk_nor_init(struct mtk_nor *sp) 744 { 745 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 746 writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT); 747 748 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 749 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 750 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 751 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 752 } 753 754 static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 755 { 756 struct mtk_nor *sp = data; 757 u32 irq_status, irq_enabled; 758 759 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 760 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 761 // write status back to clear interrupt 762 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 763 764 if (!(irq_status & irq_enabled)) 765 return IRQ_NONE; 766 767 if (irq_status & MTK_NOR_IRQ_DMA) { 768 complete(&sp->op_done); 769 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 770 } 771 772 return IRQ_HANDLED; 773 } 774 775 static size_t mtk_max_msg_size(struct spi_device *spi) 776 { 777 return MTK_NOR_PRG_MAX_SIZE; 778 } 779 780 static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 781 .adjust_op_size = mtk_nor_adjust_op_size, 782 .supports_op = mtk_nor_supports_op, 783 .exec_op = mtk_nor_exec_op 784 }; 785 786 static const struct mtk_nor_caps mtk_nor_caps_mt8173 = { 787 .dma_bits = 32, 788 .extra_dummy_bit = 0, 789 }; 790 791 static const struct mtk_nor_caps mtk_nor_caps_mt8186 = { 792 .dma_bits = 32, 793 .extra_dummy_bit = 1, 794 }; 795 796 static const struct mtk_nor_caps mtk_nor_caps_mt8192 = { 797 .dma_bits = 36, 798 .extra_dummy_bit = 0, 799 }; 800 801 static const struct of_device_id mtk_nor_match[] = { 802 { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 }, 803 { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 }, 804 { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 }, 805 { /* sentinel */ } 806 }; 807 MODULE_DEVICE_TABLE(of, mtk_nor_match); 808 809 static int mtk_nor_probe(struct platform_device *pdev) 810 { 811 struct spi_controller *ctlr; 812 struct mtk_nor *sp; 813 struct mtk_nor_caps *caps; 814 void __iomem *base; 815 struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk; 816 int ret, irq; 817 818 base = devm_platform_ioremap_resource(pdev, 0); 819 if (IS_ERR(base)) 820 return PTR_ERR(base); 821 822 spi_clk = devm_clk_get(&pdev->dev, "spi"); 823 if (IS_ERR(spi_clk)) 824 return PTR_ERR(spi_clk); 825 826 ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 827 if (IS_ERR(ctlr_clk)) 828 return PTR_ERR(ctlr_clk); 829 830 axi_clk = devm_clk_get_optional(&pdev->dev, "axi"); 831 if (IS_ERR(axi_clk)) 832 return PTR_ERR(axi_clk); 833 834 axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s"); 835 if (IS_ERR(axi_s_clk)) 836 return PTR_ERR(axi_s_clk); 837 838 caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev); 839 840 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits)); 841 if (ret) { 842 dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits); 843 return ret; 844 } 845 846 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp)); 847 if (!ctlr) { 848 dev_err(&pdev->dev, "failed to allocate spi controller\n"); 849 return -ENOMEM; 850 } 851 852 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 853 ctlr->dev.of_node = pdev->dev.of_node; 854 ctlr->max_message_size = mtk_max_msg_size; 855 ctlr->mem_ops = &mtk_nor_mem_ops; 856 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 857 ctlr->num_chipselect = 1; 858 ctlr->setup = mtk_nor_setup; 859 ctlr->transfer_one_message = mtk_nor_transfer_one_message; 860 ctlr->auto_runtime_pm = true; 861 862 dev_set_drvdata(&pdev->dev, ctlr); 863 864 sp = spi_controller_get_devdata(ctlr); 865 sp->base = base; 866 sp->has_irq = false; 867 sp->wbuf_en = false; 868 sp->ctlr = ctlr; 869 sp->dev = &pdev->dev; 870 sp->spi_clk = spi_clk; 871 sp->ctlr_clk = ctlr_clk; 872 sp->axi_clk = axi_clk; 873 sp->axi_s_clk = axi_s_clk; 874 sp->caps = caps; 875 sp->high_dma = caps->dma_bits > 32; 876 sp->buffer = dmam_alloc_coherent(&pdev->dev, 877 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 878 &sp->buffer_dma, GFP_KERNEL); 879 if (!sp->buffer) 880 return -ENOMEM; 881 882 if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) { 883 dev_err(sp->dev, "misaligned allocation of internal buffer.\n"); 884 return -ENOMEM; 885 } 886 887 ret = mtk_nor_enable_clk(sp); 888 if (ret < 0) 889 return ret; 890 891 sp->spi_freq = clk_get_rate(sp->spi_clk); 892 893 mtk_nor_init(sp); 894 895 irq = platform_get_irq_optional(pdev, 0); 896 897 if (irq < 0) { 898 dev_warn(sp->dev, "IRQ not available."); 899 } else { 900 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 901 pdev->name, sp); 902 if (ret < 0) { 903 dev_warn(sp->dev, "failed to request IRQ."); 904 } else { 905 init_completion(&sp->op_done); 906 sp->has_irq = true; 907 } 908 } 909 910 pm_runtime_set_autosuspend_delay(&pdev->dev, -1); 911 pm_runtime_use_autosuspend(&pdev->dev); 912 pm_runtime_set_active(&pdev->dev); 913 pm_runtime_enable(&pdev->dev); 914 pm_runtime_get_noresume(&pdev->dev); 915 916 ret = devm_spi_register_controller(&pdev->dev, ctlr); 917 if (ret < 0) 918 goto err_probe; 919 920 pm_runtime_mark_last_busy(&pdev->dev); 921 pm_runtime_put_autosuspend(&pdev->dev); 922 923 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 924 925 return 0; 926 927 err_probe: 928 pm_runtime_disable(&pdev->dev); 929 pm_runtime_set_suspended(&pdev->dev); 930 pm_runtime_dont_use_autosuspend(&pdev->dev); 931 932 mtk_nor_disable_clk(sp); 933 934 return ret; 935 } 936 937 static int mtk_nor_remove(struct platform_device *pdev) 938 { 939 struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); 940 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 941 942 pm_runtime_disable(&pdev->dev); 943 pm_runtime_set_suspended(&pdev->dev); 944 pm_runtime_dont_use_autosuspend(&pdev->dev); 945 946 mtk_nor_disable_clk(sp); 947 948 return 0; 949 } 950 951 static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev) 952 { 953 struct spi_controller *ctlr = dev_get_drvdata(dev); 954 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 955 956 mtk_nor_disable_clk(sp); 957 958 return 0; 959 } 960 961 static int __maybe_unused mtk_nor_runtime_resume(struct device *dev) 962 { 963 struct spi_controller *ctlr = dev_get_drvdata(dev); 964 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 965 966 return mtk_nor_enable_clk(sp); 967 } 968 969 static int __maybe_unused mtk_nor_suspend(struct device *dev) 970 { 971 return pm_runtime_force_suspend(dev); 972 } 973 974 static int __maybe_unused mtk_nor_resume(struct device *dev) 975 { 976 struct spi_controller *ctlr = dev_get_drvdata(dev); 977 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 978 int ret; 979 980 ret = pm_runtime_force_resume(dev); 981 if (ret) 982 return ret; 983 984 mtk_nor_init(sp); 985 986 return 0; 987 } 988 989 static const struct dev_pm_ops mtk_nor_pm_ops = { 990 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend, 991 mtk_nor_runtime_resume, NULL) 992 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume) 993 }; 994 995 static struct platform_driver mtk_nor_driver = { 996 .driver = { 997 .name = DRIVER_NAME, 998 .of_match_table = mtk_nor_match, 999 .pm = &mtk_nor_pm_ops, 1000 }, 1001 .probe = mtk_nor_probe, 1002 .remove = mtk_nor_remove, 1003 }; 1004 1005 module_platform_driver(mtk_nor_driver); 1006 1007 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 1008 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 1009 MODULE_LICENSE("GPL v2"); 1010 MODULE_ALIAS("platform:" DRIVER_NAME); 1011