1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Mediatek SPI NOR controller driver 4 // 5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 7 #include <linux/bits.h> 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/spi/spi.h> 19 #include <linux/spi/spi-mem.h> 20 #include <linux/string.h> 21 22 #define DRIVER_NAME "mtk-spi-nor" 23 24 #define MTK_NOR_REG_CMD 0x00 25 #define MTK_NOR_CMD_WRITE BIT(4) 26 #define MTK_NOR_CMD_PROGRAM BIT(2) 27 #define MTK_NOR_CMD_READ BIT(0) 28 #define MTK_NOR_CMD_MASK GENMASK(5, 0) 29 30 #define MTK_NOR_REG_PRG_CNT 0x04 31 #define MTK_NOR_PRG_CNT_MAX 56 32 #define MTK_NOR_REG_RDATA 0x0c 33 34 #define MTK_NOR_REG_RADR0 0x10 35 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 36 #define MTK_NOR_REG_RADR3 0xc8 37 38 #define MTK_NOR_REG_WDATA 0x1c 39 40 #define MTK_NOR_REG_PRGDATA0 0x20 41 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 42 #define MTK_NOR_REG_PRGDATA_MAX 5 43 44 #define MTK_NOR_REG_SHIFT0 0x38 45 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 46 #define MTK_NOR_REG_SHIFT_MAX 9 47 48 #define MTK_NOR_REG_CFG1 0x60 49 #define MTK_NOR_FAST_READ BIT(0) 50 51 #define MTK_NOR_REG_CFG2 0x64 52 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 53 #define MTK_NOR_WR_BUF_EN BIT(0) 54 55 #define MTK_NOR_REG_PP_DATA 0x98 56 57 #define MTK_NOR_REG_IRQ_STAT 0xa8 58 #define MTK_NOR_REG_IRQ_EN 0xac 59 #define MTK_NOR_IRQ_DMA BIT(7) 60 #define MTK_NOR_IRQ_MASK GENMASK(7, 0) 61 62 #define MTK_NOR_REG_CFG3 0xb4 63 #define MTK_NOR_DISABLE_WREN BIT(7) 64 #define MTK_NOR_DISABLE_SR_POLL BIT(5) 65 66 #define MTK_NOR_REG_WP 0xc4 67 #define MTK_NOR_ENABLE_SF_CMD 0x30 68 69 #define MTK_NOR_REG_BUSCFG 0xcc 70 #define MTK_NOR_4B_ADDR BIT(4) 71 #define MTK_NOR_QUAD_ADDR BIT(3) 72 #define MTK_NOR_QUAD_READ BIT(2) 73 #define MTK_NOR_DUAL_ADDR BIT(1) 74 #define MTK_NOR_DUAL_READ BIT(0) 75 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 76 77 #define MTK_NOR_REG_DMA_CTL 0x718 78 #define MTK_NOR_DMA_START BIT(0) 79 80 #define MTK_NOR_REG_DMA_FADR 0x71c 81 #define MTK_NOR_REG_DMA_DADR 0x720 82 #define MTK_NOR_REG_DMA_END_DADR 0x724 83 #define MTK_NOR_REG_DMA_DADR_HB 0x738 84 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c 85 86 #define MTK_NOR_PRG_MAX_SIZE 6 87 // Reading DMA src/dst addresses have to be 16-byte aligned 88 #define MTK_NOR_DMA_ALIGN 16 89 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 90 // and we allocate a bounce buffer if destination address isn't aligned. 91 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 92 93 // Buffered page program can do one 128-byte transfer 94 #define MTK_NOR_PP_SIZE 128 95 96 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000) 97 98 struct mtk_nor { 99 struct spi_controller *ctlr; 100 struct device *dev; 101 void __iomem *base; 102 u8 *buffer; 103 dma_addr_t buffer_dma; 104 struct clk *spi_clk; 105 struct clk *ctlr_clk; 106 struct clk *axi_clk; 107 unsigned int spi_freq; 108 bool wbuf_en; 109 bool has_irq; 110 bool high_dma; 111 struct completion op_done; 112 }; 113 114 static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 115 { 116 u32 val = readl(sp->base + reg); 117 118 val &= ~clr; 119 val |= set; 120 writel(val, sp->base + reg); 121 } 122 123 static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 124 { 125 ulong delay = CLK_TO_US(sp, clk); 126 u32 reg; 127 int ret; 128 129 writel(cmd, sp->base + MTK_NOR_REG_CMD); 130 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 131 delay / 3, (delay + 1) * 200); 132 if (ret < 0) 133 dev_err(sp->dev, "command %u timeout.\n", cmd); 134 return ret; 135 } 136 137 static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 138 { 139 u32 addr = op->addr.val; 140 int i; 141 142 for (i = 0; i < 3; i++) { 143 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 144 addr >>= 8; 145 } 146 if (op->addr.nbytes == 4) { 147 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 148 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 149 } else { 150 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 151 } 152 } 153 154 static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 155 { 156 return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK); 157 } 158 159 static bool mtk_nor_match_read(const struct spi_mem_op *op) 160 { 161 int dummy = 0; 162 163 if (op->dummy.buswidth) 164 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 165 166 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 167 if (op->addr.buswidth == 1) 168 return dummy == 8; 169 else if (op->addr.buswidth == 2) 170 return dummy == 4; 171 else if (op->addr.buswidth == 4) 172 return dummy == 6; 173 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 174 if (op->cmd.opcode == 0x03) 175 return dummy == 0; 176 else if (op->cmd.opcode == 0x0b) 177 return dummy == 8; 178 } 179 return false; 180 } 181 182 static bool mtk_nor_match_prg(const struct spi_mem_op *op) 183 { 184 int tx_len, rx_len, prg_len, prg_left; 185 186 // prg mode is spi-only. 187 if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) || 188 (op->dummy.buswidth > 1) || (op->data.buswidth > 1)) 189 return false; 190 191 tx_len = op->cmd.nbytes + op->addr.nbytes; 192 193 if (op->data.dir == SPI_MEM_DATA_OUT) { 194 // count dummy bytes only if we need to write data after it 195 tx_len += op->dummy.nbytes; 196 197 // leave at least one byte for data 198 if (tx_len > MTK_NOR_REG_PRGDATA_MAX) 199 return false; 200 201 // if there's no addr, meaning adjust_op_size is impossible, 202 // check data length as well. 203 if ((!op->addr.nbytes) && 204 (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1)) 205 return false; 206 } else if (op->data.dir == SPI_MEM_DATA_IN) { 207 if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) 208 return false; 209 210 rx_len = op->data.nbytes; 211 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 212 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 213 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 214 if (rx_len > prg_left) { 215 if (!op->addr.nbytes) 216 return false; 217 rx_len = prg_left; 218 } 219 220 prg_len = tx_len + op->dummy.nbytes + rx_len; 221 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 222 return false; 223 } else { 224 prg_len = tx_len + op->dummy.nbytes; 225 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 226 return false; 227 } 228 return true; 229 } 230 231 static void mtk_nor_adj_prg_size(struct spi_mem_op *op) 232 { 233 int tx_len, tx_left, prg_left; 234 235 tx_len = op->cmd.nbytes + op->addr.nbytes; 236 if (op->data.dir == SPI_MEM_DATA_OUT) { 237 tx_len += op->dummy.nbytes; 238 tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len; 239 if (op->data.nbytes > tx_left) 240 op->data.nbytes = tx_left; 241 } else if (op->data.dir == SPI_MEM_DATA_IN) { 242 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 243 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 244 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 245 if (op->data.nbytes > prg_left) 246 op->data.nbytes = prg_left; 247 } 248 } 249 250 static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 251 { 252 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 253 254 if (!op->data.nbytes) 255 return 0; 256 257 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 258 if ((op->data.dir == SPI_MEM_DATA_IN) && 259 mtk_nor_match_read(op)) { 260 // limit size to prevent timeout calculation overflow 261 if (op->data.nbytes > 0x400000) 262 op->data.nbytes = 0x400000; 263 264 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 265 (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 266 op->data.nbytes = 1; 267 else if (!need_bounce(sp, op)) 268 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 269 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 270 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 271 return 0; 272 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 273 if (op->data.nbytes >= MTK_NOR_PP_SIZE) 274 op->data.nbytes = MTK_NOR_PP_SIZE; 275 else 276 op->data.nbytes = 1; 277 return 0; 278 } 279 } 280 281 mtk_nor_adj_prg_size(op); 282 return 0; 283 } 284 285 static bool mtk_nor_supports_op(struct spi_mem *mem, 286 const struct spi_mem_op *op) 287 { 288 if (!spi_mem_default_supports_op(mem, op)) 289 return false; 290 291 if (op->cmd.buswidth != 1) 292 return false; 293 294 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 295 switch(op->data.dir) { 296 case SPI_MEM_DATA_IN: 297 if (mtk_nor_match_read(op)) 298 return true; 299 break; 300 case SPI_MEM_DATA_OUT: 301 if ((op->addr.buswidth == 1) && 302 (op->dummy.nbytes == 0) && 303 (op->data.buswidth == 1)) 304 return true; 305 break; 306 default: 307 break; 308 } 309 } 310 311 return mtk_nor_match_prg(op); 312 } 313 314 static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 315 { 316 u32 reg = 0; 317 318 if (op->addr.nbytes == 4) 319 reg |= MTK_NOR_4B_ADDR; 320 321 if (op->data.buswidth == 4) { 322 reg |= MTK_NOR_QUAD_READ; 323 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 324 if (op->addr.buswidth == 4) 325 reg |= MTK_NOR_QUAD_ADDR; 326 } else if (op->data.buswidth == 2) { 327 reg |= MTK_NOR_DUAL_READ; 328 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 329 if (op->addr.buswidth == 2) 330 reg |= MTK_NOR_DUAL_ADDR; 331 } else { 332 if (op->cmd.opcode == 0x0b) 333 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 334 else 335 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 336 } 337 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 338 } 339 340 static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length, 341 dma_addr_t dma_addr) 342 { 343 int ret = 0; 344 ulong delay; 345 u32 reg; 346 347 writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 348 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 349 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 350 351 if (sp->high_dma) { 352 writel(upper_32_bits(dma_addr), 353 sp->base + MTK_NOR_REG_DMA_DADR_HB); 354 writel(upper_32_bits(dma_addr + length), 355 sp->base + MTK_NOR_REG_DMA_END_DADR_HB); 356 } 357 358 if (sp->has_irq) { 359 reinit_completion(&sp->op_done); 360 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 361 } 362 363 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 364 365 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 366 367 if (sp->has_irq) { 368 if (!wait_for_completion_timeout(&sp->op_done, 369 (delay + 1) * 100)) 370 ret = -ETIMEDOUT; 371 } else { 372 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 373 !(reg & MTK_NOR_DMA_START), delay / 3, 374 (delay + 1) * 100); 375 } 376 377 if (ret < 0) 378 dev_err(sp->dev, "dma read timeout.\n"); 379 380 return ret; 381 } 382 383 static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 384 { 385 unsigned int rdlen; 386 int ret; 387 388 if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK) 389 rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 390 else 391 rdlen = op->data.nbytes; 392 393 ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma); 394 395 if (!ret) 396 memcpy(op->data.buf.in, sp->buffer, op->data.nbytes); 397 398 return ret; 399 } 400 401 static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op) 402 { 403 int ret; 404 dma_addr_t dma_addr; 405 406 if (need_bounce(sp, op)) 407 return mtk_nor_read_bounce(sp, op); 408 409 dma_addr = dma_map_single(sp->dev, op->data.buf.in, 410 op->data.nbytes, DMA_FROM_DEVICE); 411 412 if (dma_mapping_error(sp->dev, dma_addr)) 413 return -EINVAL; 414 415 ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr); 416 417 dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE); 418 419 return ret; 420 } 421 422 static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 423 { 424 u8 *buf = op->data.buf.in; 425 int ret; 426 427 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 428 if (!ret) 429 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 430 return ret; 431 } 432 433 static int mtk_nor_write_buffer_enable(struct mtk_nor *sp) 434 { 435 int ret; 436 u32 val; 437 438 if (sp->wbuf_en) 439 return 0; 440 441 val = readl(sp->base + MTK_NOR_REG_CFG2); 442 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 443 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 444 val & MTK_NOR_WR_BUF_EN, 0, 10000); 445 if (!ret) 446 sp->wbuf_en = true; 447 return ret; 448 } 449 450 static int mtk_nor_write_buffer_disable(struct mtk_nor *sp) 451 { 452 int ret; 453 u32 val; 454 455 if (!sp->wbuf_en) 456 return 0; 457 val = readl(sp->base + MTK_NOR_REG_CFG2); 458 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 459 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 460 !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 461 if (!ret) 462 sp->wbuf_en = false; 463 return ret; 464 } 465 466 static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 467 { 468 const u8 *buf = op->data.buf.out; 469 u32 val; 470 int ret, i; 471 472 ret = mtk_nor_write_buffer_enable(sp); 473 if (ret < 0) 474 return ret; 475 476 for (i = 0; i < op->data.nbytes; i += 4) { 477 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 478 buf[i]; 479 writel(val, sp->base + MTK_NOR_REG_PP_DATA); 480 } 481 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 482 (op->data.nbytes + 5) * BITS_PER_BYTE); 483 } 484 485 static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 486 const struct spi_mem_op *op) 487 { 488 const u8 *buf = op->data.buf.out; 489 int ret; 490 491 ret = mtk_nor_write_buffer_disable(sp); 492 if (ret < 0) 493 return ret; 494 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 495 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 496 } 497 498 static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op) 499 { 500 int rx_len = 0; 501 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 502 int tx_len, prg_len; 503 int i, ret; 504 void __iomem *reg; 505 u8 bufbyte; 506 507 tx_len = op->cmd.nbytes + op->addr.nbytes; 508 509 // count dummy bytes only if we need to write data after it 510 if (op->data.dir == SPI_MEM_DATA_OUT) 511 tx_len += op->dummy.nbytes + op->data.nbytes; 512 else if (op->data.dir == SPI_MEM_DATA_IN) 513 rx_len = op->data.nbytes; 514 515 prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes + 516 op->data.nbytes; 517 518 // an invalid op may reach here if the caller calls exec_op without 519 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that 520 // spi-mem won't try this op again with generic spi transfers. 521 if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) || 522 (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) || 523 (prg_len > MTK_NOR_PRG_CNT_MAX / 8)) 524 return -EINVAL; 525 526 // fill tx data 527 for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) { 528 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 529 bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 530 writeb(bufbyte, reg); 531 } 532 533 for (i = op->addr.nbytes; i > 0; i--, reg_offset--) { 534 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 535 bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 536 writeb(bufbyte, reg); 537 } 538 539 if (op->data.dir == SPI_MEM_DATA_OUT) { 540 for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) { 541 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 542 writeb(0, reg); 543 } 544 545 for (i = 0; i < op->data.nbytes; i++, reg_offset--) { 546 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 547 writeb(((const u8 *)(op->data.buf.out))[i], reg); 548 } 549 } 550 551 for (; reg_offset >= 0; reg_offset--) { 552 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 553 writeb(0, reg); 554 } 555 556 // trigger op 557 writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 558 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 559 prg_len * BITS_PER_BYTE); 560 if (ret) 561 return ret; 562 563 // fetch read data 564 reg_offset = 0; 565 if (op->data.dir == SPI_MEM_DATA_IN) { 566 for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) { 567 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 568 ((u8 *)(op->data.buf.in))[i] = readb(reg); 569 } 570 } 571 572 return 0; 573 } 574 575 static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 576 { 577 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 578 int ret; 579 580 if ((op->data.nbytes == 0) || 581 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 582 return mtk_nor_spi_mem_prg(sp, op); 583 584 if (op->data.dir == SPI_MEM_DATA_OUT) { 585 mtk_nor_set_addr(sp, op); 586 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 587 if (op->data.nbytes == MTK_NOR_PP_SIZE) 588 return mtk_nor_pp_buffered(sp, op); 589 return mtk_nor_pp_unbuffered(sp, op); 590 } 591 592 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 593 ret = mtk_nor_write_buffer_disable(sp); 594 if (ret < 0) 595 return ret; 596 mtk_nor_setup_bus(sp, op); 597 if (op->data.nbytes == 1) { 598 mtk_nor_set_addr(sp, op); 599 return mtk_nor_read_pio(sp, op); 600 } else { 601 return mtk_nor_read_dma(sp, op); 602 } 603 } 604 605 return mtk_nor_spi_mem_prg(sp, op); 606 } 607 608 static int mtk_nor_setup(struct spi_device *spi) 609 { 610 struct mtk_nor *sp = spi_controller_get_devdata(spi->master); 611 612 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 613 dev_err(&spi->dev, "spi clock should be %u Hz.\n", 614 sp->spi_freq); 615 return -EINVAL; 616 } 617 spi->max_speed_hz = sp->spi_freq; 618 619 return 0; 620 } 621 622 static int mtk_nor_transfer_one_message(struct spi_controller *master, 623 struct spi_message *m) 624 { 625 struct mtk_nor *sp = spi_controller_get_devdata(master); 626 struct spi_transfer *t = NULL; 627 unsigned long trx_len = 0; 628 int stat = 0; 629 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 630 void __iomem *reg; 631 const u8 *txbuf; 632 u8 *rxbuf; 633 int i; 634 635 list_for_each_entry(t, &m->transfers, transfer_list) { 636 txbuf = t->tx_buf; 637 for (i = 0; i < t->len; i++, reg_offset--) { 638 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 639 if (txbuf) 640 writeb(txbuf[i], reg); 641 else 642 writeb(0, reg); 643 } 644 trx_len += t->len; 645 } 646 647 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 648 649 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 650 trx_len * BITS_PER_BYTE); 651 if (stat < 0) 652 goto msg_done; 653 654 reg_offset = trx_len - 1; 655 list_for_each_entry(t, &m->transfers, transfer_list) { 656 rxbuf = t->rx_buf; 657 for (i = 0; i < t->len; i++, reg_offset--) { 658 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 659 if (rxbuf) 660 rxbuf[i] = readb(reg); 661 } 662 } 663 664 m->actual_length = trx_len; 665 msg_done: 666 m->status = stat; 667 spi_finalize_current_message(master); 668 669 return 0; 670 } 671 672 static void mtk_nor_disable_clk(struct mtk_nor *sp) 673 { 674 clk_disable_unprepare(sp->spi_clk); 675 clk_disable_unprepare(sp->ctlr_clk); 676 clk_disable_unprepare(sp->axi_clk); 677 } 678 679 static int mtk_nor_enable_clk(struct mtk_nor *sp) 680 { 681 int ret; 682 683 ret = clk_prepare_enable(sp->spi_clk); 684 if (ret) 685 return ret; 686 687 ret = clk_prepare_enable(sp->ctlr_clk); 688 if (ret) { 689 clk_disable_unprepare(sp->spi_clk); 690 return ret; 691 } 692 693 ret = clk_prepare_enable(sp->axi_clk); 694 if (ret) { 695 clk_disable_unprepare(sp->spi_clk); 696 clk_disable_unprepare(sp->ctlr_clk); 697 return ret; 698 } 699 700 return 0; 701 } 702 703 static void mtk_nor_init(struct mtk_nor *sp) 704 { 705 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 706 writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT); 707 708 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 709 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 710 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 711 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 712 } 713 714 static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 715 { 716 struct mtk_nor *sp = data; 717 u32 irq_status, irq_enabled; 718 719 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 720 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 721 // write status back to clear interrupt 722 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 723 724 if (!(irq_status & irq_enabled)) 725 return IRQ_NONE; 726 727 if (irq_status & MTK_NOR_IRQ_DMA) { 728 complete(&sp->op_done); 729 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 730 } 731 732 return IRQ_HANDLED; 733 } 734 735 static size_t mtk_max_msg_size(struct spi_device *spi) 736 { 737 return MTK_NOR_PRG_MAX_SIZE; 738 } 739 740 static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 741 .adjust_op_size = mtk_nor_adjust_op_size, 742 .supports_op = mtk_nor_supports_op, 743 .exec_op = mtk_nor_exec_op 744 }; 745 746 static const struct of_device_id mtk_nor_match[] = { 747 { .compatible = "mediatek,mt8192-nor", .data = (void *)36 }, 748 { .compatible = "mediatek,mt8173-nor", .data = (void *)32 }, 749 { /* sentinel */ } 750 }; 751 MODULE_DEVICE_TABLE(of, mtk_nor_match); 752 753 static int mtk_nor_probe(struct platform_device *pdev) 754 { 755 struct spi_controller *ctlr; 756 struct mtk_nor *sp; 757 void __iomem *base; 758 struct clk *spi_clk, *ctlr_clk, *axi_clk; 759 int ret, irq; 760 unsigned long dma_bits; 761 762 base = devm_platform_ioremap_resource(pdev, 0); 763 if (IS_ERR(base)) 764 return PTR_ERR(base); 765 766 spi_clk = devm_clk_get(&pdev->dev, "spi"); 767 if (IS_ERR(spi_clk)) 768 return PTR_ERR(spi_clk); 769 770 ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 771 if (IS_ERR(ctlr_clk)) 772 return PTR_ERR(ctlr_clk); 773 774 axi_clk = devm_clk_get_optional(&pdev->dev, "axi"); 775 if (IS_ERR(axi_clk)) 776 return PTR_ERR(axi_clk); 777 778 dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev); 779 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) { 780 dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits); 781 return -EINVAL; 782 } 783 784 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp)); 785 if (!ctlr) { 786 dev_err(&pdev->dev, "failed to allocate spi controller\n"); 787 return -ENOMEM; 788 } 789 790 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 791 ctlr->dev.of_node = pdev->dev.of_node; 792 ctlr->max_message_size = mtk_max_msg_size; 793 ctlr->mem_ops = &mtk_nor_mem_ops; 794 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 795 ctlr->num_chipselect = 1; 796 ctlr->setup = mtk_nor_setup; 797 ctlr->transfer_one_message = mtk_nor_transfer_one_message; 798 ctlr->auto_runtime_pm = true; 799 800 dev_set_drvdata(&pdev->dev, ctlr); 801 802 sp = spi_controller_get_devdata(ctlr); 803 sp->base = base; 804 sp->has_irq = false; 805 sp->wbuf_en = false; 806 sp->ctlr = ctlr; 807 sp->dev = &pdev->dev; 808 sp->spi_clk = spi_clk; 809 sp->ctlr_clk = ctlr_clk; 810 sp->axi_clk = axi_clk; 811 sp->high_dma = (dma_bits > 32); 812 sp->buffer = dmam_alloc_coherent(&pdev->dev, 813 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 814 &sp->buffer_dma, GFP_KERNEL); 815 if (!sp->buffer) 816 return -ENOMEM; 817 818 if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) { 819 dev_err(sp->dev, "misaligned allocation of internal buffer.\n"); 820 return -ENOMEM; 821 } 822 823 ret = mtk_nor_enable_clk(sp); 824 if (ret < 0) 825 return ret; 826 827 sp->spi_freq = clk_get_rate(sp->spi_clk); 828 829 mtk_nor_init(sp); 830 831 irq = platform_get_irq_optional(pdev, 0); 832 833 if (irq < 0) { 834 dev_warn(sp->dev, "IRQ not available."); 835 } else { 836 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 837 pdev->name, sp); 838 if (ret < 0) { 839 dev_warn(sp->dev, "failed to request IRQ."); 840 } else { 841 init_completion(&sp->op_done); 842 sp->has_irq = true; 843 } 844 } 845 846 pm_runtime_set_autosuspend_delay(&pdev->dev, -1); 847 pm_runtime_use_autosuspend(&pdev->dev); 848 pm_runtime_set_active(&pdev->dev); 849 pm_runtime_enable(&pdev->dev); 850 pm_runtime_get_noresume(&pdev->dev); 851 852 ret = devm_spi_register_controller(&pdev->dev, ctlr); 853 if (ret < 0) 854 goto err_probe; 855 856 pm_runtime_mark_last_busy(&pdev->dev); 857 pm_runtime_put_autosuspend(&pdev->dev); 858 859 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 860 861 return 0; 862 863 err_probe: 864 pm_runtime_disable(&pdev->dev); 865 pm_runtime_set_suspended(&pdev->dev); 866 pm_runtime_dont_use_autosuspend(&pdev->dev); 867 868 mtk_nor_disable_clk(sp); 869 870 return ret; 871 } 872 873 static int mtk_nor_remove(struct platform_device *pdev) 874 { 875 struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); 876 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 877 878 pm_runtime_disable(&pdev->dev); 879 pm_runtime_set_suspended(&pdev->dev); 880 pm_runtime_dont_use_autosuspend(&pdev->dev); 881 882 mtk_nor_disable_clk(sp); 883 884 return 0; 885 } 886 887 static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev) 888 { 889 struct spi_controller *ctlr = dev_get_drvdata(dev); 890 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 891 892 mtk_nor_disable_clk(sp); 893 894 return 0; 895 } 896 897 static int __maybe_unused mtk_nor_runtime_resume(struct device *dev) 898 { 899 struct spi_controller *ctlr = dev_get_drvdata(dev); 900 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 901 902 return mtk_nor_enable_clk(sp); 903 } 904 905 static int __maybe_unused mtk_nor_suspend(struct device *dev) 906 { 907 return pm_runtime_force_suspend(dev); 908 } 909 910 static int __maybe_unused mtk_nor_resume(struct device *dev) 911 { 912 return pm_runtime_force_resume(dev); 913 } 914 915 static const struct dev_pm_ops mtk_nor_pm_ops = { 916 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend, 917 mtk_nor_runtime_resume, NULL) 918 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume) 919 }; 920 921 static struct platform_driver mtk_nor_driver = { 922 .driver = { 923 .name = DRIVER_NAME, 924 .of_match_table = mtk_nor_match, 925 .pm = &mtk_nor_pm_ops, 926 }, 927 .probe = mtk_nor_probe, 928 .remove = mtk_nor_remove, 929 }; 930 931 module_platform_driver(mtk_nor_driver); 932 933 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 934 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 935 MODULE_LICENSE("GPL v2"); 936 MODULE_ALIAS("platform:" DRIVER_NAME); 937