1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Mediatek SPI NOR controller driver 4 // 5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 7 #include <linux/bits.h> 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_device.h> 17 #include <linux/spi/spi.h> 18 #include <linux/spi/spi-mem.h> 19 #include <linux/string.h> 20 21 #define DRIVER_NAME "mtk-spi-nor" 22 23 #define MTK_NOR_REG_CMD 0x00 24 #define MTK_NOR_CMD_WRITE BIT(4) 25 #define MTK_NOR_CMD_PROGRAM BIT(2) 26 #define MTK_NOR_CMD_READ BIT(0) 27 #define MTK_NOR_CMD_MASK GENMASK(5, 0) 28 29 #define MTK_NOR_REG_PRG_CNT 0x04 30 #define MTK_NOR_REG_RDATA 0x0c 31 32 #define MTK_NOR_REG_RADR0 0x10 33 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 34 #define MTK_NOR_REG_RADR3 0xc8 35 36 #define MTK_NOR_REG_WDATA 0x1c 37 38 #define MTK_NOR_REG_PRGDATA0 0x20 39 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 40 #define MTK_NOR_REG_PRGDATA_MAX 5 41 42 #define MTK_NOR_REG_SHIFT0 0x38 43 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 44 #define MTK_NOR_REG_SHIFT_MAX 9 45 46 #define MTK_NOR_REG_CFG1 0x60 47 #define MTK_NOR_FAST_READ BIT(0) 48 49 #define MTK_NOR_REG_CFG2 0x64 50 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 51 #define MTK_NOR_WR_BUF_EN BIT(0) 52 53 #define MTK_NOR_REG_PP_DATA 0x98 54 55 #define MTK_NOR_REG_IRQ_STAT 0xa8 56 #define MTK_NOR_REG_IRQ_EN 0xac 57 #define MTK_NOR_IRQ_DMA BIT(7) 58 #define MTK_NOR_IRQ_MASK GENMASK(7, 0) 59 60 #define MTK_NOR_REG_CFG3 0xb4 61 #define MTK_NOR_DISABLE_WREN BIT(7) 62 #define MTK_NOR_DISABLE_SR_POLL BIT(5) 63 64 #define MTK_NOR_REG_WP 0xc4 65 #define MTK_NOR_ENABLE_SF_CMD 0x30 66 67 #define MTK_NOR_REG_BUSCFG 0xcc 68 #define MTK_NOR_4B_ADDR BIT(4) 69 #define MTK_NOR_QUAD_ADDR BIT(3) 70 #define MTK_NOR_QUAD_READ BIT(2) 71 #define MTK_NOR_DUAL_ADDR BIT(1) 72 #define MTK_NOR_DUAL_READ BIT(0) 73 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 74 75 #define MTK_NOR_REG_DMA_CTL 0x718 76 #define MTK_NOR_DMA_START BIT(0) 77 78 #define MTK_NOR_REG_DMA_FADR 0x71c 79 #define MTK_NOR_REG_DMA_DADR 0x720 80 #define MTK_NOR_REG_DMA_END_DADR 0x724 81 82 #define MTK_NOR_PRG_MAX_SIZE 6 83 // Reading DMA src/dst addresses have to be 16-byte aligned 84 #define MTK_NOR_DMA_ALIGN 16 85 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 86 // and we allocate a bounce buffer if destination address isn't aligned. 87 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 88 89 // Buffered page program can do one 128-byte transfer 90 #define MTK_NOR_PP_SIZE 128 91 92 #define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq) 93 94 struct mtk_nor { 95 struct spi_controller *ctlr; 96 struct device *dev; 97 void __iomem *base; 98 u8 *buffer; 99 struct clk *spi_clk; 100 struct clk *ctlr_clk; 101 unsigned int spi_freq; 102 bool wbuf_en; 103 bool has_irq; 104 struct completion op_done; 105 }; 106 107 static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 108 { 109 u32 val = readl(sp->base + reg); 110 111 val &= ~clr; 112 val |= set; 113 writel(val, sp->base + reg); 114 } 115 116 static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 117 { 118 ulong delay = CLK_TO_US(sp, clk); 119 u32 reg; 120 int ret; 121 122 writel(cmd, sp->base + MTK_NOR_REG_CMD); 123 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 124 delay / 3, (delay + 1) * 200); 125 if (ret < 0) 126 dev_err(sp->dev, "command %u timeout.\n", cmd); 127 return ret; 128 } 129 130 static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 131 { 132 u32 addr = op->addr.val; 133 int i; 134 135 for (i = 0; i < 3; i++) { 136 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 137 addr >>= 8; 138 } 139 if (op->addr.nbytes == 4) { 140 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 141 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 142 } else { 143 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 144 } 145 } 146 147 static bool mtk_nor_match_read(const struct spi_mem_op *op) 148 { 149 int dummy = 0; 150 151 if (op->dummy.buswidth) 152 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 153 154 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 155 if (op->addr.buswidth == 1) 156 return dummy == 8; 157 else if (op->addr.buswidth == 2) 158 return dummy == 4; 159 else if (op->addr.buswidth == 4) 160 return dummy == 6; 161 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 162 if (op->cmd.opcode == 0x03) 163 return dummy == 0; 164 else if (op->cmd.opcode == 0x0b) 165 return dummy == 8; 166 } 167 return false; 168 } 169 170 static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 171 { 172 size_t len; 173 174 if (!op->data.nbytes) 175 return 0; 176 177 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 178 if ((op->data.dir == SPI_MEM_DATA_IN) && 179 mtk_nor_match_read(op)) { 180 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 181 (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 182 op->data.nbytes = 1; 183 else if (!((ulong)(op->data.buf.in) & 184 MTK_NOR_DMA_ALIGN_MASK)) 185 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 186 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 187 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 188 return 0; 189 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 190 if (op->data.nbytes >= MTK_NOR_PP_SIZE) 191 op->data.nbytes = MTK_NOR_PP_SIZE; 192 else 193 op->data.nbytes = 1; 194 return 0; 195 } 196 } 197 198 len = MTK_NOR_PRG_MAX_SIZE - op->cmd.nbytes - op->addr.nbytes - 199 op->dummy.nbytes; 200 if (op->data.nbytes > len) 201 op->data.nbytes = len; 202 203 return 0; 204 } 205 206 static bool mtk_nor_supports_op(struct spi_mem *mem, 207 const struct spi_mem_op *op) 208 { 209 size_t len; 210 211 if (op->cmd.buswidth != 1) 212 return false; 213 214 /* DTR ops not supported. */ 215 if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) 216 return false; 217 if (op->cmd.nbytes != 1) 218 return false; 219 220 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 221 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) 222 return true; 223 else if (op->data.dir == SPI_MEM_DATA_OUT) 224 return (op->addr.buswidth == 1) && 225 (op->dummy.buswidth == 0) && 226 (op->data.buswidth == 1); 227 } 228 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 229 if ((len > MTK_NOR_PRG_MAX_SIZE) || 230 ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE))) 231 return false; 232 return true; 233 } 234 235 static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 236 { 237 u32 reg = 0; 238 239 if (op->addr.nbytes == 4) 240 reg |= MTK_NOR_4B_ADDR; 241 242 if (op->data.buswidth == 4) { 243 reg |= MTK_NOR_QUAD_READ; 244 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 245 if (op->addr.buswidth == 4) 246 reg |= MTK_NOR_QUAD_ADDR; 247 } else if (op->data.buswidth == 2) { 248 reg |= MTK_NOR_DUAL_READ; 249 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 250 if (op->addr.buswidth == 2) 251 reg |= MTK_NOR_DUAL_ADDR; 252 } else { 253 if (op->cmd.opcode == 0x0b) 254 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 255 else 256 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 257 } 258 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 259 } 260 261 static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length, 262 u8 *buffer) 263 { 264 int ret = 0; 265 ulong delay; 266 u32 reg; 267 dma_addr_t dma_addr; 268 269 dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE); 270 if (dma_mapping_error(sp->dev, dma_addr)) { 271 dev_err(sp->dev, "failed to map dma buffer.\n"); 272 return -EINVAL; 273 } 274 275 writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 276 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 277 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 278 279 if (sp->has_irq) { 280 reinit_completion(&sp->op_done); 281 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 282 } 283 284 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 285 286 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 287 288 if (sp->has_irq) { 289 if (!wait_for_completion_timeout(&sp->op_done, 290 (delay + 1) * 100)) 291 ret = -ETIMEDOUT; 292 } else { 293 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 294 !(reg & MTK_NOR_DMA_START), delay / 3, 295 (delay + 1) * 100); 296 } 297 298 dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE); 299 if (ret < 0) 300 dev_err(sp->dev, "dma read timeout.\n"); 301 302 return ret; 303 } 304 305 static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from, 306 unsigned int length, u8 *buffer) 307 { 308 unsigned int rdlen; 309 int ret; 310 311 if (length & MTK_NOR_DMA_ALIGN_MASK) 312 rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 313 else 314 rdlen = length; 315 316 ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer); 317 if (ret) 318 return ret; 319 320 memcpy(buffer, sp->buffer, length); 321 return 0; 322 } 323 324 static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 325 { 326 u8 *buf = op->data.buf.in; 327 int ret; 328 329 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 330 if (!ret) 331 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 332 return ret; 333 } 334 335 static int mtk_nor_write_buffer_enable(struct mtk_nor *sp) 336 { 337 int ret; 338 u32 val; 339 340 if (sp->wbuf_en) 341 return 0; 342 343 val = readl(sp->base + MTK_NOR_REG_CFG2); 344 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 345 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 346 val & MTK_NOR_WR_BUF_EN, 0, 10000); 347 if (!ret) 348 sp->wbuf_en = true; 349 return ret; 350 } 351 352 static int mtk_nor_write_buffer_disable(struct mtk_nor *sp) 353 { 354 int ret; 355 u32 val; 356 357 if (!sp->wbuf_en) 358 return 0; 359 val = readl(sp->base + MTK_NOR_REG_CFG2); 360 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 361 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 362 !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 363 if (!ret) 364 sp->wbuf_en = false; 365 return ret; 366 } 367 368 static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 369 { 370 const u8 *buf = op->data.buf.out; 371 u32 val; 372 int ret, i; 373 374 ret = mtk_nor_write_buffer_enable(sp); 375 if (ret < 0) 376 return ret; 377 378 for (i = 0; i < op->data.nbytes; i += 4) { 379 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 380 buf[i]; 381 writel(val, sp->base + MTK_NOR_REG_PP_DATA); 382 } 383 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 384 (op->data.nbytes + 5) * BITS_PER_BYTE); 385 } 386 387 static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 388 const struct spi_mem_op *op) 389 { 390 const u8 *buf = op->data.buf.out; 391 int ret; 392 393 ret = mtk_nor_write_buffer_disable(sp); 394 if (ret < 0) 395 return ret; 396 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 397 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 398 } 399 400 static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 401 { 402 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 403 int ret; 404 405 if ((op->data.nbytes == 0) || 406 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 407 return -ENOTSUPP; 408 409 if (op->data.dir == SPI_MEM_DATA_OUT) { 410 mtk_nor_set_addr(sp, op); 411 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 412 if (op->data.nbytes == MTK_NOR_PP_SIZE) 413 return mtk_nor_pp_buffered(sp, op); 414 return mtk_nor_pp_unbuffered(sp, op); 415 } 416 417 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 418 ret = mtk_nor_write_buffer_disable(sp); 419 if (ret < 0) 420 return ret; 421 mtk_nor_setup_bus(sp, op); 422 if (op->data.nbytes == 1) { 423 mtk_nor_set_addr(sp, op); 424 return mtk_nor_read_pio(sp, op); 425 } else if (((ulong)(op->data.buf.in) & 426 MTK_NOR_DMA_ALIGN_MASK)) { 427 return mtk_nor_read_bounce(sp, op->addr.val, 428 op->data.nbytes, 429 op->data.buf.in); 430 } else { 431 return mtk_nor_read_dma(sp, op->addr.val, 432 op->data.nbytes, 433 op->data.buf.in); 434 } 435 } 436 437 return -ENOTSUPP; 438 } 439 440 static int mtk_nor_setup(struct spi_device *spi) 441 { 442 struct mtk_nor *sp = spi_controller_get_devdata(spi->master); 443 444 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 445 dev_err(&spi->dev, "spi clock should be %u Hz.\n", 446 sp->spi_freq); 447 return -EINVAL; 448 } 449 spi->max_speed_hz = sp->spi_freq; 450 451 return 0; 452 } 453 454 static int mtk_nor_transfer_one_message(struct spi_controller *master, 455 struct spi_message *m) 456 { 457 struct mtk_nor *sp = spi_controller_get_devdata(master); 458 struct spi_transfer *t = NULL; 459 unsigned long trx_len = 0; 460 int stat = 0; 461 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 462 void __iomem *reg; 463 const u8 *txbuf; 464 u8 *rxbuf; 465 int i; 466 467 list_for_each_entry(t, &m->transfers, transfer_list) { 468 txbuf = t->tx_buf; 469 for (i = 0; i < t->len; i++, reg_offset--) { 470 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 471 if (txbuf) 472 writeb(txbuf[i], reg); 473 else 474 writeb(0, reg); 475 } 476 trx_len += t->len; 477 } 478 479 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 480 481 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 482 trx_len * BITS_PER_BYTE); 483 if (stat < 0) 484 goto msg_done; 485 486 reg_offset = trx_len - 1; 487 list_for_each_entry(t, &m->transfers, transfer_list) { 488 rxbuf = t->rx_buf; 489 for (i = 0; i < t->len; i++, reg_offset--) { 490 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 491 if (rxbuf) 492 rxbuf[i] = readb(reg); 493 } 494 } 495 496 m->actual_length = trx_len; 497 msg_done: 498 m->status = stat; 499 spi_finalize_current_message(master); 500 501 return 0; 502 } 503 504 static void mtk_nor_disable_clk(struct mtk_nor *sp) 505 { 506 clk_disable_unprepare(sp->spi_clk); 507 clk_disable_unprepare(sp->ctlr_clk); 508 } 509 510 static int mtk_nor_enable_clk(struct mtk_nor *sp) 511 { 512 int ret; 513 514 ret = clk_prepare_enable(sp->spi_clk); 515 if (ret) 516 return ret; 517 518 ret = clk_prepare_enable(sp->ctlr_clk); 519 if (ret) { 520 clk_disable_unprepare(sp->spi_clk); 521 return ret; 522 } 523 524 return 0; 525 } 526 527 static int mtk_nor_init(struct mtk_nor *sp) 528 { 529 int ret; 530 531 ret = mtk_nor_enable_clk(sp); 532 if (ret) 533 return ret; 534 535 sp->spi_freq = clk_get_rate(sp->spi_clk); 536 537 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 538 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 539 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 540 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 541 542 return ret; 543 } 544 545 static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 546 { 547 struct mtk_nor *sp = data; 548 u32 irq_status, irq_enabled; 549 550 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 551 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 552 // write status back to clear interrupt 553 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 554 555 if (!(irq_status & irq_enabled)) 556 return IRQ_NONE; 557 558 if (irq_status & MTK_NOR_IRQ_DMA) { 559 complete(&sp->op_done); 560 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 561 } 562 563 return IRQ_HANDLED; 564 } 565 566 static size_t mtk_max_msg_size(struct spi_device *spi) 567 { 568 return MTK_NOR_PRG_MAX_SIZE; 569 } 570 571 static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 572 .adjust_op_size = mtk_nor_adjust_op_size, 573 .supports_op = mtk_nor_supports_op, 574 .exec_op = mtk_nor_exec_op 575 }; 576 577 static const struct of_device_id mtk_nor_match[] = { 578 { .compatible = "mediatek,mt8173-nor" }, 579 { /* sentinel */ } 580 }; 581 MODULE_DEVICE_TABLE(of, mtk_nor_match); 582 583 static int mtk_nor_probe(struct platform_device *pdev) 584 { 585 struct spi_controller *ctlr; 586 struct mtk_nor *sp; 587 void __iomem *base; 588 u8 *buffer; 589 struct clk *spi_clk, *ctlr_clk; 590 int ret, irq; 591 592 base = devm_platform_ioremap_resource(pdev, 0); 593 if (IS_ERR(base)) 594 return PTR_ERR(base); 595 596 spi_clk = devm_clk_get(&pdev->dev, "spi"); 597 if (IS_ERR(spi_clk)) 598 return PTR_ERR(spi_clk); 599 600 ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 601 if (IS_ERR(ctlr_clk)) 602 return PTR_ERR(ctlr_clk); 603 604 buffer = devm_kmalloc(&pdev->dev, 605 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 606 GFP_KERNEL); 607 if (!buffer) 608 return -ENOMEM; 609 610 if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK) 611 buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) & 612 ~MTK_NOR_DMA_ALIGN_MASK); 613 614 ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); 615 if (!ctlr) { 616 dev_err(&pdev->dev, "failed to allocate spi controller\n"); 617 return -ENOMEM; 618 } 619 620 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 621 ctlr->dev.of_node = pdev->dev.of_node; 622 ctlr->max_message_size = mtk_max_msg_size; 623 ctlr->mem_ops = &mtk_nor_mem_ops; 624 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 625 ctlr->num_chipselect = 1; 626 ctlr->setup = mtk_nor_setup; 627 ctlr->transfer_one_message = mtk_nor_transfer_one_message; 628 629 dev_set_drvdata(&pdev->dev, ctlr); 630 631 sp = spi_controller_get_devdata(ctlr); 632 sp->base = base; 633 sp->buffer = buffer; 634 sp->has_irq = false; 635 sp->wbuf_en = false; 636 sp->ctlr = ctlr; 637 sp->dev = &pdev->dev; 638 sp->spi_clk = spi_clk; 639 sp->ctlr_clk = ctlr_clk; 640 641 irq = platform_get_irq_optional(pdev, 0); 642 if (irq < 0) { 643 dev_warn(sp->dev, "IRQ not available."); 644 } else { 645 writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT); 646 writel(0, base + MTK_NOR_REG_IRQ_EN); 647 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 648 pdev->name, sp); 649 if (ret < 0) { 650 dev_warn(sp->dev, "failed to request IRQ."); 651 } else { 652 init_completion(&sp->op_done); 653 sp->has_irq = true; 654 } 655 } 656 657 ret = mtk_nor_init(sp); 658 if (ret < 0) { 659 kfree(ctlr); 660 return ret; 661 } 662 663 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 664 665 return devm_spi_register_controller(&pdev->dev, ctlr); 666 } 667 668 static int mtk_nor_remove(struct platform_device *pdev) 669 { 670 struct spi_controller *ctlr; 671 struct mtk_nor *sp; 672 673 ctlr = dev_get_drvdata(&pdev->dev); 674 sp = spi_controller_get_devdata(ctlr); 675 676 mtk_nor_disable_clk(sp); 677 678 return 0; 679 } 680 681 static struct platform_driver mtk_nor_driver = { 682 .driver = { 683 .name = DRIVER_NAME, 684 .of_match_table = mtk_nor_match, 685 }, 686 .probe = mtk_nor_probe, 687 .remove = mtk_nor_remove, 688 }; 689 690 module_platform_driver(mtk_nor_driver); 691 692 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 693 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 694 MODULE_LICENSE("GPL v2"); 695 MODULE_ALIAS("platform:" DRIVER_NAME); 696