1 /* 2 * Copyright (c) 2015 MediaTek Inc. 3 * Author: Leilk Liu <leilk.liu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/clk.h> 16 #include <linux/device.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/ioport.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_gpio.h> 24 #include <linux/platform_device.h> 25 #include <linux/platform_data/spi-mt65xx.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/spi/spi.h> 28 29 #define SPI_CFG0_REG 0x0000 30 #define SPI_CFG1_REG 0x0004 31 #define SPI_TX_SRC_REG 0x0008 32 #define SPI_RX_DST_REG 0x000c 33 #define SPI_TX_DATA_REG 0x0010 34 #define SPI_RX_DATA_REG 0x0014 35 #define SPI_CMD_REG 0x0018 36 #define SPI_STATUS0_REG 0x001c 37 #define SPI_PAD_SEL_REG 0x0024 38 #define SPI_CFG2_REG 0x0028 39 40 #define SPI_CFG0_SCK_HIGH_OFFSET 0 41 #define SPI_CFG0_SCK_LOW_OFFSET 8 42 #define SPI_CFG0_CS_HOLD_OFFSET 16 43 #define SPI_CFG0_CS_SETUP_OFFSET 24 44 #define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 45 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 46 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 47 48 #define SPI_CFG1_CS_IDLE_OFFSET 0 49 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 50 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 51 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 52 53 #define SPI_CFG1_CS_IDLE_MASK 0xff 54 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 55 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 56 57 #define SPI_CMD_ACT BIT(0) 58 #define SPI_CMD_RESUME BIT(1) 59 #define SPI_CMD_RST BIT(2) 60 #define SPI_CMD_PAUSE_EN BIT(4) 61 #define SPI_CMD_DEASSERT BIT(5) 62 #define SPI_CMD_SAMPLE_SEL BIT(6) 63 #define SPI_CMD_CS_POL BIT(7) 64 #define SPI_CMD_CPHA BIT(8) 65 #define SPI_CMD_CPOL BIT(9) 66 #define SPI_CMD_RX_DMA BIT(10) 67 #define SPI_CMD_TX_DMA BIT(11) 68 #define SPI_CMD_TXMSBF BIT(12) 69 #define SPI_CMD_RXMSBF BIT(13) 70 #define SPI_CMD_RX_ENDIAN BIT(14) 71 #define SPI_CMD_TX_ENDIAN BIT(15) 72 #define SPI_CMD_FINISH_IE BIT(16) 73 #define SPI_CMD_PAUSE_IE BIT(17) 74 75 #define MT8173_SPI_MAX_PAD_SEL 3 76 77 #define MTK_SPI_PAUSE_INT_STATUS 0x2 78 79 #define MTK_SPI_IDLE 0 80 #define MTK_SPI_PAUSED 1 81 82 #define MTK_SPI_MAX_FIFO_SIZE 32U 83 #define MTK_SPI_PACKET_SIZE 1024 84 85 struct mtk_spi_compatible { 86 bool need_pad_sel; 87 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 88 bool must_tx; 89 /* some IC design adjust cfg register to enhance time accuracy */ 90 bool enhance_timing; 91 }; 92 93 struct mtk_spi { 94 void __iomem *base; 95 u32 state; 96 int pad_num; 97 u32 *pad_sel; 98 struct clk *parent_clk, *sel_clk, *spi_clk; 99 struct spi_transfer *cur_transfer; 100 u32 xfer_len; 101 u32 num_xfered; 102 struct scatterlist *tx_sgl, *rx_sgl; 103 u32 tx_sgl_len, rx_sgl_len; 104 const struct mtk_spi_compatible *dev_comp; 105 }; 106 107 static const struct mtk_spi_compatible mtk_common_compat; 108 109 static const struct mtk_spi_compatible mt2712_compat = { 110 .must_tx = true, 111 }; 112 113 static const struct mtk_spi_compatible mt7622_compat = { 114 .must_tx = true, 115 .enhance_timing = true, 116 }; 117 118 static const struct mtk_spi_compatible mt8173_compat = { 119 .need_pad_sel = true, 120 .must_tx = true, 121 }; 122 123 static const struct mtk_spi_compatible mt8183_compat = { 124 .need_pad_sel = true, 125 .must_tx = true, 126 .enhance_timing = true, 127 }; 128 129 /* 130 * A piece of default chip info unless the platform 131 * supplies it. 132 */ 133 static const struct mtk_chip_config mtk_default_chip_info = { 134 .rx_mlsb = 1, 135 .tx_mlsb = 1, 136 .cs_pol = 0, 137 .sample_sel = 0, 138 }; 139 140 static const struct of_device_id mtk_spi_of_match[] = { 141 { .compatible = "mediatek,mt2701-spi", 142 .data = (void *)&mtk_common_compat, 143 }, 144 { .compatible = "mediatek,mt2712-spi", 145 .data = (void *)&mt2712_compat, 146 }, 147 { .compatible = "mediatek,mt6589-spi", 148 .data = (void *)&mtk_common_compat, 149 }, 150 { .compatible = "mediatek,mt7622-spi", 151 .data = (void *)&mt7622_compat, 152 }, 153 { .compatible = "mediatek,mt7629-spi", 154 .data = (void *)&mt7622_compat, 155 }, 156 { .compatible = "mediatek,mt8135-spi", 157 .data = (void *)&mtk_common_compat, 158 }, 159 { .compatible = "mediatek,mt8173-spi", 160 .data = (void *)&mt8173_compat, 161 }, 162 { .compatible = "mediatek,mt8183-spi", 163 .data = (void *)&mt8183_compat, 164 }, 165 {} 166 }; 167 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 168 169 static void mtk_spi_reset(struct mtk_spi *mdata) 170 { 171 u32 reg_val; 172 173 /* set the software reset bit in SPI_CMD_REG. */ 174 reg_val = readl(mdata->base + SPI_CMD_REG); 175 reg_val |= SPI_CMD_RST; 176 writel(reg_val, mdata->base + SPI_CMD_REG); 177 178 reg_val = readl(mdata->base + SPI_CMD_REG); 179 reg_val &= ~SPI_CMD_RST; 180 writel(reg_val, mdata->base + SPI_CMD_REG); 181 } 182 183 static int mtk_spi_prepare_message(struct spi_master *master, 184 struct spi_message *msg) 185 { 186 u16 cpha, cpol; 187 u32 reg_val; 188 struct spi_device *spi = msg->spi; 189 struct mtk_chip_config *chip_config = spi->controller_data; 190 struct mtk_spi *mdata = spi_master_get_devdata(master); 191 192 cpha = spi->mode & SPI_CPHA ? 1 : 0; 193 cpol = spi->mode & SPI_CPOL ? 1 : 0; 194 195 reg_val = readl(mdata->base + SPI_CMD_REG); 196 if (cpha) 197 reg_val |= SPI_CMD_CPHA; 198 else 199 reg_val &= ~SPI_CMD_CPHA; 200 if (cpol) 201 reg_val |= SPI_CMD_CPOL; 202 else 203 reg_val &= ~SPI_CMD_CPOL; 204 205 /* set the mlsbx and mlsbtx */ 206 if (chip_config->tx_mlsb) 207 reg_val |= SPI_CMD_TXMSBF; 208 else 209 reg_val &= ~SPI_CMD_TXMSBF; 210 if (chip_config->rx_mlsb) 211 reg_val |= SPI_CMD_RXMSBF; 212 else 213 reg_val &= ~SPI_CMD_RXMSBF; 214 215 /* set the tx/rx endian */ 216 #ifdef __LITTLE_ENDIAN 217 reg_val &= ~SPI_CMD_TX_ENDIAN; 218 reg_val &= ~SPI_CMD_RX_ENDIAN; 219 #else 220 reg_val |= SPI_CMD_TX_ENDIAN; 221 reg_val |= SPI_CMD_RX_ENDIAN; 222 #endif 223 224 if (mdata->dev_comp->enhance_timing) { 225 if (chip_config->cs_pol) 226 reg_val |= SPI_CMD_CS_POL; 227 else 228 reg_val &= ~SPI_CMD_CS_POL; 229 if (chip_config->sample_sel) 230 reg_val |= SPI_CMD_SAMPLE_SEL; 231 else 232 reg_val &= ~SPI_CMD_SAMPLE_SEL; 233 } 234 235 /* set finish and pause interrupt always enable */ 236 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 237 238 /* disable dma mode */ 239 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 240 241 /* disable deassert mode */ 242 reg_val &= ~SPI_CMD_DEASSERT; 243 244 writel(reg_val, mdata->base + SPI_CMD_REG); 245 246 /* pad select */ 247 if (mdata->dev_comp->need_pad_sel) 248 writel(mdata->pad_sel[spi->chip_select], 249 mdata->base + SPI_PAD_SEL_REG); 250 251 return 0; 252 } 253 254 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 255 { 256 u32 reg_val; 257 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 258 259 reg_val = readl(mdata->base + SPI_CMD_REG); 260 if (!enable) { 261 reg_val |= SPI_CMD_PAUSE_EN; 262 writel(reg_val, mdata->base + SPI_CMD_REG); 263 } else { 264 reg_val &= ~SPI_CMD_PAUSE_EN; 265 writel(reg_val, mdata->base + SPI_CMD_REG); 266 mdata->state = MTK_SPI_IDLE; 267 mtk_spi_reset(mdata); 268 } 269 } 270 271 static void mtk_spi_prepare_transfer(struct spi_master *master, 272 struct spi_transfer *xfer) 273 { 274 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 275 struct mtk_spi *mdata = spi_master_get_devdata(master); 276 277 spi_clk_hz = clk_get_rate(mdata->spi_clk); 278 if (xfer->speed_hz < spi_clk_hz / 2) 279 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 280 else 281 div = 1; 282 283 sck_time = (div + 1) / 2; 284 cs_time = sck_time * 2; 285 286 if (mdata->dev_comp->enhance_timing) { 287 reg_val |= (((sck_time - 1) & 0xffff) 288 << SPI_CFG0_SCK_HIGH_OFFSET); 289 reg_val |= (((sck_time - 1) & 0xffff) 290 << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); 291 writel(reg_val, mdata->base + SPI_CFG2_REG); 292 reg_val |= (((cs_time - 1) & 0xffff) 293 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 294 reg_val |= (((cs_time - 1) & 0xffff) 295 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 296 writel(reg_val, mdata->base + SPI_CFG0_REG); 297 } else { 298 reg_val |= (((sck_time - 1) & 0xff) 299 << SPI_CFG0_SCK_HIGH_OFFSET); 300 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 301 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 302 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 303 writel(reg_val, mdata->base + SPI_CFG0_REG); 304 } 305 306 reg_val = readl(mdata->base + SPI_CFG1_REG); 307 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 308 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 309 writel(reg_val, mdata->base + SPI_CFG1_REG); 310 } 311 312 static void mtk_spi_setup_packet(struct spi_master *master) 313 { 314 u32 packet_size, packet_loop, reg_val; 315 struct mtk_spi *mdata = spi_master_get_devdata(master); 316 317 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 318 packet_loop = mdata->xfer_len / packet_size; 319 320 reg_val = readl(mdata->base + SPI_CFG1_REG); 321 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 322 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 323 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 324 writel(reg_val, mdata->base + SPI_CFG1_REG); 325 } 326 327 static void mtk_spi_enable_transfer(struct spi_master *master) 328 { 329 u32 cmd; 330 struct mtk_spi *mdata = spi_master_get_devdata(master); 331 332 cmd = readl(mdata->base + SPI_CMD_REG); 333 if (mdata->state == MTK_SPI_IDLE) 334 cmd |= SPI_CMD_ACT; 335 else 336 cmd |= SPI_CMD_RESUME; 337 writel(cmd, mdata->base + SPI_CMD_REG); 338 } 339 340 static int mtk_spi_get_mult_delta(u32 xfer_len) 341 { 342 u32 mult_delta; 343 344 if (xfer_len > MTK_SPI_PACKET_SIZE) 345 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 346 else 347 mult_delta = 0; 348 349 return mult_delta; 350 } 351 352 static void mtk_spi_update_mdata_len(struct spi_master *master) 353 { 354 int mult_delta; 355 struct mtk_spi *mdata = spi_master_get_devdata(master); 356 357 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 358 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 359 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 360 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 361 mdata->rx_sgl_len = mult_delta; 362 mdata->tx_sgl_len -= mdata->xfer_len; 363 } else { 364 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 365 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 366 mdata->tx_sgl_len = mult_delta; 367 mdata->rx_sgl_len -= mdata->xfer_len; 368 } 369 } else if (mdata->tx_sgl_len) { 370 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 371 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 372 mdata->tx_sgl_len = mult_delta; 373 } else if (mdata->rx_sgl_len) { 374 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 375 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 376 mdata->rx_sgl_len = mult_delta; 377 } 378 } 379 380 static void mtk_spi_setup_dma_addr(struct spi_master *master, 381 struct spi_transfer *xfer) 382 { 383 struct mtk_spi *mdata = spi_master_get_devdata(master); 384 385 if (mdata->tx_sgl) 386 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 387 if (mdata->rx_sgl) 388 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 389 } 390 391 static int mtk_spi_fifo_transfer(struct spi_master *master, 392 struct spi_device *spi, 393 struct spi_transfer *xfer) 394 { 395 int cnt, remainder; 396 u32 reg_val; 397 struct mtk_spi *mdata = spi_master_get_devdata(master); 398 399 mdata->cur_transfer = xfer; 400 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); 401 mdata->num_xfered = 0; 402 mtk_spi_prepare_transfer(master, xfer); 403 mtk_spi_setup_packet(master); 404 405 cnt = xfer->len / 4; 406 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 407 408 remainder = xfer->len % 4; 409 if (remainder > 0) { 410 reg_val = 0; 411 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 412 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 413 } 414 415 mtk_spi_enable_transfer(master); 416 417 return 1; 418 } 419 420 static int mtk_spi_dma_transfer(struct spi_master *master, 421 struct spi_device *spi, 422 struct spi_transfer *xfer) 423 { 424 int cmd; 425 struct mtk_spi *mdata = spi_master_get_devdata(master); 426 427 mdata->tx_sgl = NULL; 428 mdata->rx_sgl = NULL; 429 mdata->tx_sgl_len = 0; 430 mdata->rx_sgl_len = 0; 431 mdata->cur_transfer = xfer; 432 mdata->num_xfered = 0; 433 434 mtk_spi_prepare_transfer(master, xfer); 435 436 cmd = readl(mdata->base + SPI_CMD_REG); 437 if (xfer->tx_buf) 438 cmd |= SPI_CMD_TX_DMA; 439 if (xfer->rx_buf) 440 cmd |= SPI_CMD_RX_DMA; 441 writel(cmd, mdata->base + SPI_CMD_REG); 442 443 if (xfer->tx_buf) 444 mdata->tx_sgl = xfer->tx_sg.sgl; 445 if (xfer->rx_buf) 446 mdata->rx_sgl = xfer->rx_sg.sgl; 447 448 if (mdata->tx_sgl) { 449 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 450 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 451 } 452 if (mdata->rx_sgl) { 453 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 454 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 455 } 456 457 mtk_spi_update_mdata_len(master); 458 mtk_spi_setup_packet(master); 459 mtk_spi_setup_dma_addr(master, xfer); 460 mtk_spi_enable_transfer(master); 461 462 return 1; 463 } 464 465 static int mtk_spi_transfer_one(struct spi_master *master, 466 struct spi_device *spi, 467 struct spi_transfer *xfer) 468 { 469 if (master->can_dma(master, spi, xfer)) 470 return mtk_spi_dma_transfer(master, spi, xfer); 471 else 472 return mtk_spi_fifo_transfer(master, spi, xfer); 473 } 474 475 static bool mtk_spi_can_dma(struct spi_master *master, 476 struct spi_device *spi, 477 struct spi_transfer *xfer) 478 { 479 /* Buffers for DMA transactions must be 4-byte aligned */ 480 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && 481 (unsigned long)xfer->tx_buf % 4 == 0 && 482 (unsigned long)xfer->rx_buf % 4 == 0); 483 } 484 485 static int mtk_spi_setup(struct spi_device *spi) 486 { 487 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 488 489 if (!spi->controller_data) 490 spi->controller_data = (void *)&mtk_default_chip_info; 491 492 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 493 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 494 495 return 0; 496 } 497 498 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 499 { 500 u32 cmd, reg_val, cnt, remainder, len; 501 struct spi_master *master = dev_id; 502 struct mtk_spi *mdata = spi_master_get_devdata(master); 503 struct spi_transfer *trans = mdata->cur_transfer; 504 505 reg_val = readl(mdata->base + SPI_STATUS0_REG); 506 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 507 mdata->state = MTK_SPI_PAUSED; 508 else 509 mdata->state = MTK_SPI_IDLE; 510 511 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 512 if (trans->rx_buf) { 513 cnt = mdata->xfer_len / 4; 514 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 515 trans->rx_buf + mdata->num_xfered, cnt); 516 remainder = mdata->xfer_len % 4; 517 if (remainder > 0) { 518 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 519 memcpy(trans->rx_buf + 520 mdata->num_xfered + 521 (cnt * 4), 522 ®_val, 523 remainder); 524 } 525 } 526 527 mdata->num_xfered += mdata->xfer_len; 528 if (mdata->num_xfered == trans->len) { 529 spi_finalize_current_transfer(master); 530 return IRQ_HANDLED; 531 } 532 533 len = trans->len - mdata->num_xfered; 534 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 535 mtk_spi_setup_packet(master); 536 537 cnt = mdata->xfer_len / 4; 538 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 539 trans->tx_buf + mdata->num_xfered, cnt); 540 541 remainder = mdata->xfer_len % 4; 542 if (remainder > 0) { 543 reg_val = 0; 544 memcpy(®_val, 545 trans->tx_buf + (cnt * 4) + mdata->num_xfered, 546 remainder); 547 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 548 } 549 550 mtk_spi_enable_transfer(master); 551 552 return IRQ_HANDLED; 553 } 554 555 if (mdata->tx_sgl) 556 trans->tx_dma += mdata->xfer_len; 557 if (mdata->rx_sgl) 558 trans->rx_dma += mdata->xfer_len; 559 560 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 561 mdata->tx_sgl = sg_next(mdata->tx_sgl); 562 if (mdata->tx_sgl) { 563 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 564 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 565 } 566 } 567 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 568 mdata->rx_sgl = sg_next(mdata->rx_sgl); 569 if (mdata->rx_sgl) { 570 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 571 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 572 } 573 } 574 575 if (!mdata->tx_sgl && !mdata->rx_sgl) { 576 /* spi disable dma */ 577 cmd = readl(mdata->base + SPI_CMD_REG); 578 cmd &= ~SPI_CMD_TX_DMA; 579 cmd &= ~SPI_CMD_RX_DMA; 580 writel(cmd, mdata->base + SPI_CMD_REG); 581 582 spi_finalize_current_transfer(master); 583 return IRQ_HANDLED; 584 } 585 586 mtk_spi_update_mdata_len(master); 587 mtk_spi_setup_packet(master); 588 mtk_spi_setup_dma_addr(master, trans); 589 mtk_spi_enable_transfer(master); 590 591 return IRQ_HANDLED; 592 } 593 594 static int mtk_spi_probe(struct platform_device *pdev) 595 { 596 struct spi_master *master; 597 struct mtk_spi *mdata; 598 const struct of_device_id *of_id; 599 struct resource *res; 600 int i, irq, ret; 601 602 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 603 if (!master) { 604 dev_err(&pdev->dev, "failed to alloc spi master\n"); 605 return -ENOMEM; 606 } 607 608 master->auto_runtime_pm = true; 609 master->dev.of_node = pdev->dev.of_node; 610 master->mode_bits = SPI_CPOL | SPI_CPHA; 611 612 master->set_cs = mtk_spi_set_cs; 613 master->prepare_message = mtk_spi_prepare_message; 614 master->transfer_one = mtk_spi_transfer_one; 615 master->can_dma = mtk_spi_can_dma; 616 master->setup = mtk_spi_setup; 617 618 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 619 if (!of_id) { 620 dev_err(&pdev->dev, "failed to probe of_node\n"); 621 ret = -EINVAL; 622 goto err_put_master; 623 } 624 625 mdata = spi_master_get_devdata(master); 626 mdata->dev_comp = of_id->data; 627 if (mdata->dev_comp->must_tx) 628 master->flags = SPI_MASTER_MUST_TX; 629 630 if (mdata->dev_comp->need_pad_sel) { 631 mdata->pad_num = of_property_count_u32_elems( 632 pdev->dev.of_node, 633 "mediatek,pad-select"); 634 if (mdata->pad_num < 0) { 635 dev_err(&pdev->dev, 636 "No 'mediatek,pad-select' property\n"); 637 ret = -EINVAL; 638 goto err_put_master; 639 } 640 641 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 642 sizeof(u32), GFP_KERNEL); 643 if (!mdata->pad_sel) { 644 ret = -ENOMEM; 645 goto err_put_master; 646 } 647 648 for (i = 0; i < mdata->pad_num; i++) { 649 of_property_read_u32_index(pdev->dev.of_node, 650 "mediatek,pad-select", 651 i, &mdata->pad_sel[i]); 652 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 653 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 654 i, mdata->pad_sel[i]); 655 ret = -EINVAL; 656 goto err_put_master; 657 } 658 } 659 } 660 661 platform_set_drvdata(pdev, master); 662 663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 664 if (!res) { 665 ret = -ENODEV; 666 dev_err(&pdev->dev, "failed to determine base address\n"); 667 goto err_put_master; 668 } 669 670 mdata->base = devm_ioremap_resource(&pdev->dev, res); 671 if (IS_ERR(mdata->base)) { 672 ret = PTR_ERR(mdata->base); 673 goto err_put_master; 674 } 675 676 irq = platform_get_irq(pdev, 0); 677 if (irq < 0) { 678 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 679 ret = irq; 680 goto err_put_master; 681 } 682 683 if (!pdev->dev.dma_mask) 684 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 685 686 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 687 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 688 if (ret) { 689 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 690 goto err_put_master; 691 } 692 693 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 694 if (IS_ERR(mdata->parent_clk)) { 695 ret = PTR_ERR(mdata->parent_clk); 696 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 697 goto err_put_master; 698 } 699 700 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 701 if (IS_ERR(mdata->sel_clk)) { 702 ret = PTR_ERR(mdata->sel_clk); 703 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 704 goto err_put_master; 705 } 706 707 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 708 if (IS_ERR(mdata->spi_clk)) { 709 ret = PTR_ERR(mdata->spi_clk); 710 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 711 goto err_put_master; 712 } 713 714 ret = clk_prepare_enable(mdata->spi_clk); 715 if (ret < 0) { 716 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 717 goto err_put_master; 718 } 719 720 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 721 if (ret < 0) { 722 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 723 clk_disable_unprepare(mdata->spi_clk); 724 goto err_put_master; 725 } 726 727 clk_disable_unprepare(mdata->spi_clk); 728 729 pm_runtime_enable(&pdev->dev); 730 731 ret = devm_spi_register_master(&pdev->dev, master); 732 if (ret) { 733 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 734 goto err_disable_runtime_pm; 735 } 736 737 if (mdata->dev_comp->need_pad_sel) { 738 if (mdata->pad_num != master->num_chipselect) { 739 dev_err(&pdev->dev, 740 "pad_num does not match num_chipselect(%d != %d)\n", 741 mdata->pad_num, master->num_chipselect); 742 ret = -EINVAL; 743 goto err_disable_runtime_pm; 744 } 745 746 if (!master->cs_gpios && master->num_chipselect > 1) { 747 dev_err(&pdev->dev, 748 "cs_gpios not specified and num_chipselect > 1\n"); 749 ret = -EINVAL; 750 goto err_disable_runtime_pm; 751 } 752 753 if (master->cs_gpios) { 754 for (i = 0; i < master->num_chipselect; i++) { 755 ret = devm_gpio_request(&pdev->dev, 756 master->cs_gpios[i], 757 dev_name(&pdev->dev)); 758 if (ret) { 759 dev_err(&pdev->dev, 760 "can't get CS GPIO %i\n", i); 761 goto err_disable_runtime_pm; 762 } 763 } 764 } 765 } 766 767 return 0; 768 769 err_disable_runtime_pm: 770 pm_runtime_disable(&pdev->dev); 771 err_put_master: 772 spi_master_put(master); 773 774 return ret; 775 } 776 777 static int mtk_spi_remove(struct platform_device *pdev) 778 { 779 struct spi_master *master = platform_get_drvdata(pdev); 780 struct mtk_spi *mdata = spi_master_get_devdata(master); 781 782 pm_runtime_disable(&pdev->dev); 783 784 mtk_spi_reset(mdata); 785 786 return 0; 787 } 788 789 #ifdef CONFIG_PM_SLEEP 790 static int mtk_spi_suspend(struct device *dev) 791 { 792 int ret; 793 struct spi_master *master = dev_get_drvdata(dev); 794 struct mtk_spi *mdata = spi_master_get_devdata(master); 795 796 ret = spi_master_suspend(master); 797 if (ret) 798 return ret; 799 800 if (!pm_runtime_suspended(dev)) 801 clk_disable_unprepare(mdata->spi_clk); 802 803 return ret; 804 } 805 806 static int mtk_spi_resume(struct device *dev) 807 { 808 int ret; 809 struct spi_master *master = dev_get_drvdata(dev); 810 struct mtk_spi *mdata = spi_master_get_devdata(master); 811 812 if (!pm_runtime_suspended(dev)) { 813 ret = clk_prepare_enable(mdata->spi_clk); 814 if (ret < 0) { 815 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 816 return ret; 817 } 818 } 819 820 ret = spi_master_resume(master); 821 if (ret < 0) 822 clk_disable_unprepare(mdata->spi_clk); 823 824 return ret; 825 } 826 #endif /* CONFIG_PM_SLEEP */ 827 828 #ifdef CONFIG_PM 829 static int mtk_spi_runtime_suspend(struct device *dev) 830 { 831 struct spi_master *master = dev_get_drvdata(dev); 832 struct mtk_spi *mdata = spi_master_get_devdata(master); 833 834 clk_disable_unprepare(mdata->spi_clk); 835 836 return 0; 837 } 838 839 static int mtk_spi_runtime_resume(struct device *dev) 840 { 841 struct spi_master *master = dev_get_drvdata(dev); 842 struct mtk_spi *mdata = spi_master_get_devdata(master); 843 int ret; 844 845 ret = clk_prepare_enable(mdata->spi_clk); 846 if (ret < 0) { 847 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 848 return ret; 849 } 850 851 return 0; 852 } 853 #endif /* CONFIG_PM */ 854 855 static const struct dev_pm_ops mtk_spi_pm = { 856 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 857 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 858 mtk_spi_runtime_resume, NULL) 859 }; 860 861 static struct platform_driver mtk_spi_driver = { 862 .driver = { 863 .name = "mtk-spi", 864 .pm = &mtk_spi_pm, 865 .of_match_table = mtk_spi_of_match, 866 }, 867 .probe = mtk_spi_probe, 868 .remove = mtk_spi_remove, 869 }; 870 871 module_platform_driver(mtk_spi_driver); 872 873 MODULE_DESCRIPTION("MTK SPI Controller driver"); 874 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 875 MODULE_LICENSE("GPL v2"); 876 MODULE_ALIAS("platform:mtk-spi"); 877