1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2018 MediaTek Inc. 3 4 #include <linux/clk.h> 5 #include <linux/device.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/err.h> 8 #include <linux/interrupt.h> 9 #include <linux/module.h> 10 #include <linux/platform_device.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/spi/spi.h> 13 14 #define SPIS_IRQ_EN_REG 0x0 15 #define SPIS_IRQ_CLR_REG 0x4 16 #define SPIS_IRQ_ST_REG 0x8 17 #define SPIS_IRQ_MASK_REG 0xc 18 #define SPIS_CFG_REG 0x10 19 #define SPIS_RX_DATA_REG 0x14 20 #define SPIS_TX_DATA_REG 0x18 21 #define SPIS_RX_DST_REG 0x1c 22 #define SPIS_TX_SRC_REG 0x20 23 #define SPIS_DMA_CFG_REG 0x30 24 #define SPIS_SOFT_RST_REG 0x40 25 26 /* SPIS_IRQ_EN_REG */ 27 #define DMA_DONE_EN BIT(7) 28 #define DATA_DONE_EN BIT(2) 29 #define RSTA_DONE_EN BIT(1) 30 #define CMD_INVALID_EN BIT(0) 31 32 /* SPIS_IRQ_ST_REG */ 33 #define DMA_DONE_ST BIT(7) 34 #define DATA_DONE_ST BIT(2) 35 #define RSTA_DONE_ST BIT(1) 36 #define CMD_INVALID_ST BIT(0) 37 38 /* SPIS_IRQ_MASK_REG */ 39 #define DMA_DONE_MASK BIT(7) 40 #define DATA_DONE_MASK BIT(2) 41 #define RSTA_DONE_MASK BIT(1) 42 #define CMD_INVALID_MASK BIT(0) 43 44 /* SPIS_CFG_REG */ 45 #define SPIS_TX_ENDIAN BIT(7) 46 #define SPIS_RX_ENDIAN BIT(6) 47 #define SPIS_TXMSBF BIT(5) 48 #define SPIS_RXMSBF BIT(4) 49 #define SPIS_CPHA BIT(3) 50 #define SPIS_CPOL BIT(2) 51 #define SPIS_TX_EN BIT(1) 52 #define SPIS_RX_EN BIT(0) 53 54 /* SPIS_DMA_CFG_REG */ 55 #define TX_DMA_TRIG_EN BIT(31) 56 #define TX_DMA_EN BIT(30) 57 #define RX_DMA_EN BIT(29) 58 #define TX_DMA_LEN 0xfffff 59 60 /* SPIS_SOFT_RST_REG */ 61 #define SPIS_DMA_ADDR_EN BIT(1) 62 #define SPIS_SOFT_RST BIT(0) 63 64 #define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U 65 66 struct mtk_spi_slave { 67 struct device *dev; 68 void __iomem *base; 69 struct clk *spi_clk; 70 struct completion xfer_done; 71 struct spi_transfer *cur_transfer; 72 bool slave_aborted; 73 }; 74 75 static const struct of_device_id mtk_spi_slave_of_match[] = { 76 { .compatible = "mediatek,mt2712-spi-slave", }, 77 {} 78 }; 79 MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match); 80 81 static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata) 82 { 83 u32 reg_val; 84 85 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG); 86 reg_val &= ~RX_DMA_EN; 87 reg_val &= ~TX_DMA_EN; 88 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG); 89 } 90 91 static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata) 92 { 93 u32 reg_val; 94 95 reg_val = readl(mdata->base + SPIS_CFG_REG); 96 reg_val &= ~SPIS_TX_EN; 97 reg_val &= ~SPIS_RX_EN; 98 writel(reg_val, mdata->base + SPIS_CFG_REG); 99 } 100 101 static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata) 102 { 103 if (wait_for_completion_interruptible(&mdata->xfer_done) || 104 mdata->slave_aborted) { 105 dev_err(mdata->dev, "interrupted\n"); 106 return -EINTR; 107 } 108 109 return 0; 110 } 111 112 static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr, 113 struct spi_message *msg) 114 { 115 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 116 struct spi_device *spi = msg->spi; 117 bool cpha, cpol; 118 u32 reg_val; 119 120 cpha = spi->mode & SPI_CPHA ? 1 : 0; 121 cpol = spi->mode & SPI_CPOL ? 1 : 0; 122 123 reg_val = readl(mdata->base + SPIS_CFG_REG); 124 if (cpha) 125 reg_val |= SPIS_CPHA; 126 else 127 reg_val &= ~SPIS_CPHA; 128 if (cpol) 129 reg_val |= SPIS_CPOL; 130 else 131 reg_val &= ~SPIS_CPOL; 132 133 if (spi->mode & SPI_LSB_FIRST) 134 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF); 135 else 136 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF; 137 138 reg_val &= ~SPIS_TX_ENDIAN; 139 reg_val &= ~SPIS_RX_ENDIAN; 140 writel(reg_val, mdata->base + SPIS_CFG_REG); 141 142 return 0; 143 } 144 145 static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr, 146 struct spi_device *spi, 147 struct spi_transfer *xfer) 148 { 149 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 150 int reg_val, cnt, remainder, ret; 151 152 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG); 153 154 reg_val = readl(mdata->base + SPIS_CFG_REG); 155 if (xfer->rx_buf) 156 reg_val |= SPIS_RX_EN; 157 if (xfer->tx_buf) 158 reg_val |= SPIS_TX_EN; 159 writel(reg_val, mdata->base + SPIS_CFG_REG); 160 161 cnt = xfer->len / 4; 162 if (xfer->tx_buf) 163 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG, 164 xfer->tx_buf, cnt); 165 166 remainder = xfer->len % 4; 167 if (xfer->tx_buf && remainder > 0) { 168 reg_val = 0; 169 memcpy(®_val, xfer->tx_buf + cnt * 4, remainder); 170 writel(reg_val, mdata->base + SPIS_TX_DATA_REG); 171 } 172 173 ret = mtk_spi_slave_wait_for_completion(mdata); 174 if (ret) { 175 mtk_spi_slave_disable_xfer(mdata); 176 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG); 177 } 178 179 return ret; 180 } 181 182 static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr, 183 struct spi_device *spi, 184 struct spi_transfer *xfer) 185 { 186 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 187 struct device *dev = mdata->dev; 188 int reg_val, ret; 189 190 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG); 191 192 if (xfer->tx_buf) { 193 /* tx_buf is a const void* where we need a void * for 194 * the dma mapping 195 */ 196 void *nonconst_tx = (void *)xfer->tx_buf; 197 198 xfer->tx_dma = dma_map_single(dev, nonconst_tx, 199 xfer->len, DMA_TO_DEVICE); 200 if (dma_mapping_error(dev, xfer->tx_dma)) { 201 ret = -ENOMEM; 202 goto disable_transfer; 203 } 204 } 205 206 if (xfer->rx_buf) { 207 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, 208 xfer->len, DMA_FROM_DEVICE); 209 if (dma_mapping_error(dev, xfer->rx_dma)) { 210 ret = -ENOMEM; 211 goto unmap_txdma; 212 } 213 } 214 215 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG); 216 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG); 217 218 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG); 219 220 /* enable config reg tx rx_enable */ 221 reg_val = readl(mdata->base + SPIS_CFG_REG); 222 if (xfer->tx_buf) 223 reg_val |= SPIS_TX_EN; 224 if (xfer->rx_buf) 225 reg_val |= SPIS_RX_EN; 226 writel(reg_val, mdata->base + SPIS_CFG_REG); 227 228 /* config dma */ 229 reg_val = 0; 230 reg_val |= (xfer->len - 1) & TX_DMA_LEN; 231 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG); 232 233 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG); 234 if (xfer->tx_buf) 235 reg_val |= TX_DMA_EN; 236 if (xfer->rx_buf) 237 reg_val |= RX_DMA_EN; 238 reg_val |= TX_DMA_TRIG_EN; 239 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG); 240 241 ret = mtk_spi_slave_wait_for_completion(mdata); 242 if (ret) 243 goto unmap_rxdma; 244 245 return 0; 246 247 unmap_rxdma: 248 if (xfer->rx_buf) 249 dma_unmap_single(dev, xfer->rx_dma, 250 xfer->len, DMA_FROM_DEVICE); 251 252 unmap_txdma: 253 if (xfer->tx_buf) 254 dma_unmap_single(dev, xfer->tx_dma, 255 xfer->len, DMA_TO_DEVICE); 256 257 disable_transfer: 258 mtk_spi_slave_disable_dma(mdata); 259 mtk_spi_slave_disable_xfer(mdata); 260 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG); 261 262 return ret; 263 } 264 265 static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr, 266 struct spi_device *spi, 267 struct spi_transfer *xfer) 268 { 269 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 270 271 reinit_completion(&mdata->xfer_done); 272 mdata->slave_aborted = false; 273 mdata->cur_transfer = xfer; 274 275 if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE) 276 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer); 277 else 278 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer); 279 } 280 281 static int mtk_spi_slave_setup(struct spi_device *spi) 282 { 283 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master); 284 u32 reg_val; 285 286 reg_val = DMA_DONE_EN | DATA_DONE_EN | 287 RSTA_DONE_EN | CMD_INVALID_EN; 288 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG); 289 290 reg_val = DMA_DONE_MASK | DATA_DONE_MASK | 291 RSTA_DONE_MASK | CMD_INVALID_MASK; 292 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG); 293 294 mtk_spi_slave_disable_dma(mdata); 295 mtk_spi_slave_disable_xfer(mdata); 296 297 return 0; 298 } 299 300 static int mtk_slave_abort(struct spi_controller *ctlr) 301 { 302 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 303 304 mdata->slave_aborted = true; 305 complete(&mdata->xfer_done); 306 307 return 0; 308 } 309 310 static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id) 311 { 312 struct spi_controller *ctlr = dev_id; 313 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 314 struct spi_transfer *trans = mdata->cur_transfer; 315 u32 int_status, reg_val, cnt, remainder; 316 317 int_status = readl(mdata->base + SPIS_IRQ_ST_REG); 318 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG); 319 320 if (!trans) 321 return IRQ_NONE; 322 323 if ((int_status & DMA_DONE_ST) && 324 ((int_status & DATA_DONE_ST) || 325 (int_status & RSTA_DONE_ST))) { 326 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG); 327 328 if (trans->tx_buf) 329 dma_unmap_single(mdata->dev, trans->tx_dma, 330 trans->len, DMA_TO_DEVICE); 331 if (trans->rx_buf) 332 dma_unmap_single(mdata->dev, trans->rx_dma, 333 trans->len, DMA_FROM_DEVICE); 334 335 mtk_spi_slave_disable_dma(mdata); 336 mtk_spi_slave_disable_xfer(mdata); 337 } 338 339 if ((!(int_status & DMA_DONE_ST)) && 340 ((int_status & DATA_DONE_ST) || 341 (int_status & RSTA_DONE_ST))) { 342 cnt = trans->len / 4; 343 if (trans->rx_buf) 344 ioread32_rep(mdata->base + SPIS_RX_DATA_REG, 345 trans->rx_buf, cnt); 346 remainder = trans->len % 4; 347 if (trans->rx_buf && remainder > 0) { 348 reg_val = readl(mdata->base + SPIS_RX_DATA_REG); 349 memcpy(trans->rx_buf + (cnt * 4), 350 ®_val, remainder); 351 } 352 353 mtk_spi_slave_disable_xfer(mdata); 354 } 355 356 if (int_status & CMD_INVALID_ST) { 357 dev_warn(&ctlr->dev, "cmd invalid\n"); 358 return IRQ_NONE; 359 } 360 361 mdata->cur_transfer = NULL; 362 complete(&mdata->xfer_done); 363 364 return IRQ_HANDLED; 365 } 366 367 static int mtk_spi_slave_probe(struct platform_device *pdev) 368 { 369 struct spi_controller *ctlr; 370 struct mtk_spi_slave *mdata; 371 struct resource *res; 372 int irq, ret; 373 374 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata)); 375 if (!ctlr) { 376 dev_err(&pdev->dev, "failed to alloc spi slave\n"); 377 return -ENOMEM; 378 } 379 380 ctlr->auto_runtime_pm = true; 381 ctlr->dev.of_node = pdev->dev.of_node; 382 ctlr->mode_bits = SPI_CPOL | SPI_CPHA; 383 ctlr->mode_bits |= SPI_LSB_FIRST; 384 385 ctlr->prepare_message = mtk_spi_slave_prepare_message; 386 ctlr->transfer_one = mtk_spi_slave_transfer_one; 387 ctlr->setup = mtk_spi_slave_setup; 388 ctlr->slave_abort = mtk_slave_abort; 389 390 mdata = spi_controller_get_devdata(ctlr); 391 392 platform_set_drvdata(pdev, ctlr); 393 394 init_completion(&mdata->xfer_done); 395 396 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 397 if (!res) { 398 ret = -ENODEV; 399 dev_err(&pdev->dev, "failed to determine base address\n"); 400 goto err_put_ctlr; 401 } 402 403 mdata->dev = &pdev->dev; 404 405 mdata->base = devm_ioremap_resource(&pdev->dev, res); 406 if (IS_ERR(mdata->base)) { 407 ret = PTR_ERR(mdata->base); 408 goto err_put_ctlr; 409 } 410 411 irq = platform_get_irq(pdev, 0); 412 if (irq < 0) { 413 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 414 ret = irq; 415 goto err_put_ctlr; 416 } 417 418 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt, 419 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr); 420 if (ret) { 421 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 422 goto err_put_ctlr; 423 } 424 425 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi"); 426 if (IS_ERR(mdata->spi_clk)) { 427 ret = PTR_ERR(mdata->spi_clk); 428 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 429 goto err_put_ctlr; 430 } 431 432 ret = clk_prepare_enable(mdata->spi_clk); 433 if (ret < 0) { 434 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 435 goto err_put_ctlr; 436 } 437 438 pm_runtime_enable(&pdev->dev); 439 440 ret = devm_spi_register_controller(&pdev->dev, ctlr); 441 if (ret) { 442 dev_err(&pdev->dev, 443 "failed to register slave controller(%d)\n", ret); 444 clk_disable_unprepare(mdata->spi_clk); 445 goto err_disable_runtime_pm; 446 } 447 448 clk_disable_unprepare(mdata->spi_clk); 449 450 return 0; 451 452 err_disable_runtime_pm: 453 pm_runtime_disable(&pdev->dev); 454 err_put_ctlr: 455 spi_controller_put(ctlr); 456 457 return ret; 458 } 459 460 static int mtk_spi_slave_remove(struct platform_device *pdev) 461 { 462 pm_runtime_disable(&pdev->dev); 463 464 return 0; 465 } 466 467 #ifdef CONFIG_PM_SLEEP 468 static int mtk_spi_slave_suspend(struct device *dev) 469 { 470 struct spi_controller *ctlr = dev_get_drvdata(dev); 471 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 472 int ret; 473 474 ret = spi_controller_suspend(ctlr); 475 if (ret) 476 return ret; 477 478 if (!pm_runtime_suspended(dev)) 479 clk_disable_unprepare(mdata->spi_clk); 480 481 return ret; 482 } 483 484 static int mtk_spi_slave_resume(struct device *dev) 485 { 486 struct spi_controller *ctlr = dev_get_drvdata(dev); 487 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 488 int ret; 489 490 if (!pm_runtime_suspended(dev)) { 491 ret = clk_prepare_enable(mdata->spi_clk); 492 if (ret < 0) { 493 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 494 return ret; 495 } 496 } 497 498 ret = spi_controller_resume(ctlr); 499 if (ret < 0) 500 clk_disable_unprepare(mdata->spi_clk); 501 502 return ret; 503 } 504 #endif /* CONFIG_PM_SLEEP */ 505 506 #ifdef CONFIG_PM 507 static int mtk_spi_slave_runtime_suspend(struct device *dev) 508 { 509 struct spi_controller *ctlr = dev_get_drvdata(dev); 510 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 511 512 clk_disable_unprepare(mdata->spi_clk); 513 514 return 0; 515 } 516 517 static int mtk_spi_slave_runtime_resume(struct device *dev) 518 { 519 struct spi_controller *ctlr = dev_get_drvdata(dev); 520 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); 521 int ret; 522 523 ret = clk_prepare_enable(mdata->spi_clk); 524 if (ret < 0) { 525 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 526 return ret; 527 } 528 529 return 0; 530 } 531 #endif /* CONFIG_PM */ 532 533 static const struct dev_pm_ops mtk_spi_slave_pm = { 534 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume) 535 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend, 536 mtk_spi_slave_runtime_resume, NULL) 537 }; 538 539 static struct platform_driver mtk_spi_slave_driver = { 540 .driver = { 541 .name = "mtk-spi-slave", 542 .pm = &mtk_spi_slave_pm, 543 .of_match_table = mtk_spi_slave_of_match, 544 }, 545 .probe = mtk_spi_slave_probe, 546 .remove = mtk_spi_slave_remove, 547 }; 548 549 module_platform_driver(mtk_spi_slave_driver); 550 551 MODULE_DESCRIPTION("MTK SPI Slave Controller driver"); 552 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 553 MODULE_LICENSE("GPL v2"); 554 MODULE_ALIAS("platform:mtk-spi-slave"); 555