1 /* 2 * Copyright (c) 2015 MediaTek Inc. 3 * Author: Leilk Liu <leilk.liu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/clk.h> 16 #include <linux/device.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/ioport.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_gpio.h> 24 #include <linux/platform_device.h> 25 #include <linux/platform_data/spi-mt65xx.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/spi/spi.h> 28 29 #define SPI_CFG0_REG 0x0000 30 #define SPI_CFG1_REG 0x0004 31 #define SPI_TX_SRC_REG 0x0008 32 #define SPI_RX_DST_REG 0x000c 33 #define SPI_TX_DATA_REG 0x0010 34 #define SPI_RX_DATA_REG 0x0014 35 #define SPI_CMD_REG 0x0018 36 #define SPI_STATUS0_REG 0x001c 37 #define SPI_PAD_SEL_REG 0x0024 38 #define SPI_CFG2_REG 0x0028 39 40 #define SPI_CFG0_SCK_HIGH_OFFSET 0 41 #define SPI_CFG0_SCK_LOW_OFFSET 8 42 #define SPI_CFG0_CS_HOLD_OFFSET 16 43 #define SPI_CFG0_CS_SETUP_OFFSET 24 44 #define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 45 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 46 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 47 48 #define SPI_CFG1_CS_IDLE_OFFSET 0 49 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 50 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 51 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 52 53 #define SPI_CFG1_CS_IDLE_MASK 0xff 54 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 55 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 56 57 #define SPI_CMD_ACT BIT(0) 58 #define SPI_CMD_RESUME BIT(1) 59 #define SPI_CMD_RST BIT(2) 60 #define SPI_CMD_PAUSE_EN BIT(4) 61 #define SPI_CMD_DEASSERT BIT(5) 62 #define SPI_CMD_SAMPLE_SEL BIT(6) 63 #define SPI_CMD_CS_POL BIT(7) 64 #define SPI_CMD_CPHA BIT(8) 65 #define SPI_CMD_CPOL BIT(9) 66 #define SPI_CMD_RX_DMA BIT(10) 67 #define SPI_CMD_TX_DMA BIT(11) 68 #define SPI_CMD_TXMSBF BIT(12) 69 #define SPI_CMD_RXMSBF BIT(13) 70 #define SPI_CMD_RX_ENDIAN BIT(14) 71 #define SPI_CMD_TX_ENDIAN BIT(15) 72 #define SPI_CMD_FINISH_IE BIT(16) 73 #define SPI_CMD_PAUSE_IE BIT(17) 74 75 #define MT8173_SPI_MAX_PAD_SEL 3 76 77 #define MTK_SPI_PAUSE_INT_STATUS 0x2 78 79 #define MTK_SPI_IDLE 0 80 #define MTK_SPI_PAUSED 1 81 82 #define MTK_SPI_MAX_FIFO_SIZE 32U 83 #define MTK_SPI_PACKET_SIZE 1024 84 85 struct mtk_spi_compatible { 86 bool need_pad_sel; 87 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 88 bool must_tx; 89 /* some IC design adjust cfg register to enhance time accuracy */ 90 bool enhance_timing; 91 }; 92 93 struct mtk_spi { 94 void __iomem *base; 95 u32 state; 96 int pad_num; 97 u32 *pad_sel; 98 struct clk *parent_clk, *sel_clk, *spi_clk; 99 struct spi_transfer *cur_transfer; 100 u32 xfer_len; 101 struct scatterlist *tx_sgl, *rx_sgl; 102 u32 tx_sgl_len, rx_sgl_len; 103 const struct mtk_spi_compatible *dev_comp; 104 }; 105 106 static const struct mtk_spi_compatible mtk_common_compat; 107 108 static const struct mtk_spi_compatible mt2712_compat = { 109 .must_tx = true, 110 }; 111 112 static const struct mtk_spi_compatible mt7622_compat = { 113 .must_tx = true, 114 .enhance_timing = true, 115 }; 116 117 static const struct mtk_spi_compatible mt8173_compat = { 118 .need_pad_sel = true, 119 .must_tx = true, 120 }; 121 122 /* 123 * A piece of default chip info unless the platform 124 * supplies it. 125 */ 126 static const struct mtk_chip_config mtk_default_chip_info = { 127 .rx_mlsb = 1, 128 .tx_mlsb = 1, 129 .cs_pol = 0, 130 .sample_sel = 0, 131 }; 132 133 static const struct of_device_id mtk_spi_of_match[] = { 134 { .compatible = "mediatek,mt2701-spi", 135 .data = (void *)&mtk_common_compat, 136 }, 137 { .compatible = "mediatek,mt2712-spi", 138 .data = (void *)&mt2712_compat, 139 }, 140 { .compatible = "mediatek,mt6589-spi", 141 .data = (void *)&mtk_common_compat, 142 }, 143 { .compatible = "mediatek,mt7622-spi", 144 .data = (void *)&mt7622_compat, 145 }, 146 { .compatible = "mediatek,mt8135-spi", 147 .data = (void *)&mtk_common_compat, 148 }, 149 { .compatible = "mediatek,mt8173-spi", 150 .data = (void *)&mt8173_compat, 151 }, 152 {} 153 }; 154 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 155 156 static void mtk_spi_reset(struct mtk_spi *mdata) 157 { 158 u32 reg_val; 159 160 /* set the software reset bit in SPI_CMD_REG. */ 161 reg_val = readl(mdata->base + SPI_CMD_REG); 162 reg_val |= SPI_CMD_RST; 163 writel(reg_val, mdata->base + SPI_CMD_REG); 164 165 reg_val = readl(mdata->base + SPI_CMD_REG); 166 reg_val &= ~SPI_CMD_RST; 167 writel(reg_val, mdata->base + SPI_CMD_REG); 168 } 169 170 static int mtk_spi_prepare_message(struct spi_master *master, 171 struct spi_message *msg) 172 { 173 u16 cpha, cpol; 174 u32 reg_val; 175 struct spi_device *spi = msg->spi; 176 struct mtk_chip_config *chip_config = spi->controller_data; 177 struct mtk_spi *mdata = spi_master_get_devdata(master); 178 179 cpha = spi->mode & SPI_CPHA ? 1 : 0; 180 cpol = spi->mode & SPI_CPOL ? 1 : 0; 181 182 reg_val = readl(mdata->base + SPI_CMD_REG); 183 if (cpha) 184 reg_val |= SPI_CMD_CPHA; 185 else 186 reg_val &= ~SPI_CMD_CPHA; 187 if (cpol) 188 reg_val |= SPI_CMD_CPOL; 189 else 190 reg_val &= ~SPI_CMD_CPOL; 191 192 /* set the mlsbx and mlsbtx */ 193 if (chip_config->tx_mlsb) 194 reg_val |= SPI_CMD_TXMSBF; 195 else 196 reg_val &= ~SPI_CMD_TXMSBF; 197 if (chip_config->rx_mlsb) 198 reg_val |= SPI_CMD_RXMSBF; 199 else 200 reg_val &= ~SPI_CMD_RXMSBF; 201 202 /* set the tx/rx endian */ 203 #ifdef __LITTLE_ENDIAN 204 reg_val &= ~SPI_CMD_TX_ENDIAN; 205 reg_val &= ~SPI_CMD_RX_ENDIAN; 206 #else 207 reg_val |= SPI_CMD_TX_ENDIAN; 208 reg_val |= SPI_CMD_RX_ENDIAN; 209 #endif 210 211 if (mdata->dev_comp->enhance_timing) { 212 if (chip_config->cs_pol) 213 reg_val |= SPI_CMD_CS_POL; 214 else 215 reg_val &= ~SPI_CMD_CS_POL; 216 if (chip_config->sample_sel) 217 reg_val |= SPI_CMD_SAMPLE_SEL; 218 else 219 reg_val &= ~SPI_CMD_SAMPLE_SEL; 220 } 221 222 /* set finish and pause interrupt always enable */ 223 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 224 225 /* disable dma mode */ 226 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 227 228 /* disable deassert mode */ 229 reg_val &= ~SPI_CMD_DEASSERT; 230 231 writel(reg_val, mdata->base + SPI_CMD_REG); 232 233 /* pad select */ 234 if (mdata->dev_comp->need_pad_sel) 235 writel(mdata->pad_sel[spi->chip_select], 236 mdata->base + SPI_PAD_SEL_REG); 237 238 return 0; 239 } 240 241 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 242 { 243 u32 reg_val; 244 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 245 246 reg_val = readl(mdata->base + SPI_CMD_REG); 247 if (!enable) { 248 reg_val |= SPI_CMD_PAUSE_EN; 249 writel(reg_val, mdata->base + SPI_CMD_REG); 250 } else { 251 reg_val &= ~SPI_CMD_PAUSE_EN; 252 writel(reg_val, mdata->base + SPI_CMD_REG); 253 mdata->state = MTK_SPI_IDLE; 254 mtk_spi_reset(mdata); 255 } 256 } 257 258 static void mtk_spi_prepare_transfer(struct spi_master *master, 259 struct spi_transfer *xfer) 260 { 261 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 262 struct mtk_spi *mdata = spi_master_get_devdata(master); 263 264 spi_clk_hz = clk_get_rate(mdata->spi_clk); 265 if (xfer->speed_hz < spi_clk_hz / 2) 266 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 267 else 268 div = 1; 269 270 sck_time = (div + 1) / 2; 271 cs_time = sck_time * 2; 272 273 if (mdata->dev_comp->enhance_timing) { 274 reg_val |= (((sck_time - 1) & 0xffff) 275 << SPI_CFG0_SCK_HIGH_OFFSET); 276 reg_val |= (((sck_time - 1) & 0xffff) 277 << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); 278 writel(reg_val, mdata->base + SPI_CFG2_REG); 279 reg_val |= (((cs_time - 1) & 0xffff) 280 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 281 reg_val |= (((cs_time - 1) & 0xffff) 282 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 283 writel(reg_val, mdata->base + SPI_CFG0_REG); 284 } else { 285 reg_val |= (((sck_time - 1) & 0xff) 286 << SPI_CFG0_SCK_HIGH_OFFSET); 287 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 288 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 289 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 290 writel(reg_val, mdata->base + SPI_CFG0_REG); 291 } 292 293 reg_val = readl(mdata->base + SPI_CFG1_REG); 294 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 295 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 296 writel(reg_val, mdata->base + SPI_CFG1_REG); 297 } 298 299 static void mtk_spi_setup_packet(struct spi_master *master) 300 { 301 u32 packet_size, packet_loop, reg_val; 302 struct mtk_spi *mdata = spi_master_get_devdata(master); 303 304 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 305 packet_loop = mdata->xfer_len / packet_size; 306 307 reg_val = readl(mdata->base + SPI_CFG1_REG); 308 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 309 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 310 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 311 writel(reg_val, mdata->base + SPI_CFG1_REG); 312 } 313 314 static void mtk_spi_enable_transfer(struct spi_master *master) 315 { 316 u32 cmd; 317 struct mtk_spi *mdata = spi_master_get_devdata(master); 318 319 cmd = readl(mdata->base + SPI_CMD_REG); 320 if (mdata->state == MTK_SPI_IDLE) 321 cmd |= SPI_CMD_ACT; 322 else 323 cmd |= SPI_CMD_RESUME; 324 writel(cmd, mdata->base + SPI_CMD_REG); 325 } 326 327 static int mtk_spi_get_mult_delta(u32 xfer_len) 328 { 329 u32 mult_delta; 330 331 if (xfer_len > MTK_SPI_PACKET_SIZE) 332 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 333 else 334 mult_delta = 0; 335 336 return mult_delta; 337 } 338 339 static void mtk_spi_update_mdata_len(struct spi_master *master) 340 { 341 int mult_delta; 342 struct mtk_spi *mdata = spi_master_get_devdata(master); 343 344 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 345 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 346 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 347 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 348 mdata->rx_sgl_len = mult_delta; 349 mdata->tx_sgl_len -= mdata->xfer_len; 350 } else { 351 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 352 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 353 mdata->tx_sgl_len = mult_delta; 354 mdata->rx_sgl_len -= mdata->xfer_len; 355 } 356 } else if (mdata->tx_sgl_len) { 357 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 358 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 359 mdata->tx_sgl_len = mult_delta; 360 } else if (mdata->rx_sgl_len) { 361 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 362 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 363 mdata->rx_sgl_len = mult_delta; 364 } 365 } 366 367 static void mtk_spi_setup_dma_addr(struct spi_master *master, 368 struct spi_transfer *xfer) 369 { 370 struct mtk_spi *mdata = spi_master_get_devdata(master); 371 372 if (mdata->tx_sgl) 373 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 374 if (mdata->rx_sgl) 375 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 376 } 377 378 static int mtk_spi_fifo_transfer(struct spi_master *master, 379 struct spi_device *spi, 380 struct spi_transfer *xfer) 381 { 382 int cnt, remainder; 383 u32 reg_val; 384 struct mtk_spi *mdata = spi_master_get_devdata(master); 385 386 mdata->cur_transfer = xfer; 387 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); 388 mtk_spi_prepare_transfer(master, xfer); 389 mtk_spi_setup_packet(master); 390 391 cnt = xfer->len / 4; 392 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 393 394 remainder = xfer->len % 4; 395 if (remainder > 0) { 396 reg_val = 0; 397 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 398 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 399 } 400 401 mtk_spi_enable_transfer(master); 402 403 return 1; 404 } 405 406 static int mtk_spi_dma_transfer(struct spi_master *master, 407 struct spi_device *spi, 408 struct spi_transfer *xfer) 409 { 410 int cmd; 411 struct mtk_spi *mdata = spi_master_get_devdata(master); 412 413 mdata->tx_sgl = NULL; 414 mdata->rx_sgl = NULL; 415 mdata->tx_sgl_len = 0; 416 mdata->rx_sgl_len = 0; 417 mdata->cur_transfer = xfer; 418 419 mtk_spi_prepare_transfer(master, xfer); 420 421 cmd = readl(mdata->base + SPI_CMD_REG); 422 if (xfer->tx_buf) 423 cmd |= SPI_CMD_TX_DMA; 424 if (xfer->rx_buf) 425 cmd |= SPI_CMD_RX_DMA; 426 writel(cmd, mdata->base + SPI_CMD_REG); 427 428 if (xfer->tx_buf) 429 mdata->tx_sgl = xfer->tx_sg.sgl; 430 if (xfer->rx_buf) 431 mdata->rx_sgl = xfer->rx_sg.sgl; 432 433 if (mdata->tx_sgl) { 434 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 435 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 436 } 437 if (mdata->rx_sgl) { 438 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 439 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 440 } 441 442 mtk_spi_update_mdata_len(master); 443 mtk_spi_setup_packet(master); 444 mtk_spi_setup_dma_addr(master, xfer); 445 mtk_spi_enable_transfer(master); 446 447 return 1; 448 } 449 450 static int mtk_spi_transfer_one(struct spi_master *master, 451 struct spi_device *spi, 452 struct spi_transfer *xfer) 453 { 454 if (master->can_dma(master, spi, xfer)) 455 return mtk_spi_dma_transfer(master, spi, xfer); 456 else 457 return mtk_spi_fifo_transfer(master, spi, xfer); 458 } 459 460 static bool mtk_spi_can_dma(struct spi_master *master, 461 struct spi_device *spi, 462 struct spi_transfer *xfer) 463 { 464 /* Buffers for DMA transactions must be 4-byte aligned */ 465 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && 466 (unsigned long)xfer->tx_buf % 4 == 0 && 467 (unsigned long)xfer->rx_buf % 4 == 0); 468 } 469 470 static int mtk_spi_setup(struct spi_device *spi) 471 { 472 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 473 474 if (!spi->controller_data) 475 spi->controller_data = (void *)&mtk_default_chip_info; 476 477 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 478 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 479 480 return 0; 481 } 482 483 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 484 { 485 u32 cmd, reg_val, cnt, remainder; 486 struct spi_master *master = dev_id; 487 struct mtk_spi *mdata = spi_master_get_devdata(master); 488 struct spi_transfer *trans = mdata->cur_transfer; 489 490 reg_val = readl(mdata->base + SPI_STATUS0_REG); 491 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 492 mdata->state = MTK_SPI_PAUSED; 493 else 494 mdata->state = MTK_SPI_IDLE; 495 496 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 497 if (trans->rx_buf) { 498 cnt = mdata->xfer_len / 4; 499 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 500 trans->rx_buf, cnt); 501 remainder = mdata->xfer_len % 4; 502 if (remainder > 0) { 503 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 504 memcpy(trans->rx_buf + (cnt * 4), 505 ®_val, remainder); 506 } 507 } 508 509 trans->len -= mdata->xfer_len; 510 if (!trans->len) { 511 spi_finalize_current_transfer(master); 512 return IRQ_HANDLED; 513 } 514 515 if (trans->tx_buf) 516 trans->tx_buf += mdata->xfer_len; 517 if (trans->rx_buf) 518 trans->rx_buf += mdata->xfer_len; 519 520 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len); 521 mtk_spi_setup_packet(master); 522 523 cnt = trans->len / 4; 524 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt); 525 526 remainder = trans->len % 4; 527 if (remainder > 0) { 528 reg_val = 0; 529 memcpy(®_val, trans->tx_buf + (cnt * 4), remainder); 530 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 531 } 532 533 mtk_spi_enable_transfer(master); 534 535 return IRQ_HANDLED; 536 } 537 538 if (mdata->tx_sgl) 539 trans->tx_dma += mdata->xfer_len; 540 if (mdata->rx_sgl) 541 trans->rx_dma += mdata->xfer_len; 542 543 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 544 mdata->tx_sgl = sg_next(mdata->tx_sgl); 545 if (mdata->tx_sgl) { 546 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 547 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 548 } 549 } 550 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 551 mdata->rx_sgl = sg_next(mdata->rx_sgl); 552 if (mdata->rx_sgl) { 553 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 554 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 555 } 556 } 557 558 if (!mdata->tx_sgl && !mdata->rx_sgl) { 559 /* spi disable dma */ 560 cmd = readl(mdata->base + SPI_CMD_REG); 561 cmd &= ~SPI_CMD_TX_DMA; 562 cmd &= ~SPI_CMD_RX_DMA; 563 writel(cmd, mdata->base + SPI_CMD_REG); 564 565 spi_finalize_current_transfer(master); 566 return IRQ_HANDLED; 567 } 568 569 mtk_spi_update_mdata_len(master); 570 mtk_spi_setup_packet(master); 571 mtk_spi_setup_dma_addr(master, trans); 572 mtk_spi_enable_transfer(master); 573 574 return IRQ_HANDLED; 575 } 576 577 static int mtk_spi_probe(struct platform_device *pdev) 578 { 579 struct spi_master *master; 580 struct mtk_spi *mdata; 581 const struct of_device_id *of_id; 582 struct resource *res; 583 int i, irq, ret; 584 585 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 586 if (!master) { 587 dev_err(&pdev->dev, "failed to alloc spi master\n"); 588 return -ENOMEM; 589 } 590 591 master->auto_runtime_pm = true; 592 master->dev.of_node = pdev->dev.of_node; 593 master->mode_bits = SPI_CPOL | SPI_CPHA; 594 595 master->set_cs = mtk_spi_set_cs; 596 master->prepare_message = mtk_spi_prepare_message; 597 master->transfer_one = mtk_spi_transfer_one; 598 master->can_dma = mtk_spi_can_dma; 599 master->setup = mtk_spi_setup; 600 601 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 602 if (!of_id) { 603 dev_err(&pdev->dev, "failed to probe of_node\n"); 604 ret = -EINVAL; 605 goto err_put_master; 606 } 607 608 mdata = spi_master_get_devdata(master); 609 mdata->dev_comp = of_id->data; 610 if (mdata->dev_comp->must_tx) 611 master->flags = SPI_MASTER_MUST_TX; 612 613 if (mdata->dev_comp->need_pad_sel) { 614 mdata->pad_num = of_property_count_u32_elems( 615 pdev->dev.of_node, 616 "mediatek,pad-select"); 617 if (mdata->pad_num < 0) { 618 dev_err(&pdev->dev, 619 "No 'mediatek,pad-select' property\n"); 620 ret = -EINVAL; 621 goto err_put_master; 622 } 623 624 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 625 sizeof(u32), GFP_KERNEL); 626 if (!mdata->pad_sel) { 627 ret = -ENOMEM; 628 goto err_put_master; 629 } 630 631 for (i = 0; i < mdata->pad_num; i++) { 632 of_property_read_u32_index(pdev->dev.of_node, 633 "mediatek,pad-select", 634 i, &mdata->pad_sel[i]); 635 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 636 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 637 i, mdata->pad_sel[i]); 638 ret = -EINVAL; 639 goto err_put_master; 640 } 641 } 642 } 643 644 platform_set_drvdata(pdev, master); 645 646 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 647 if (!res) { 648 ret = -ENODEV; 649 dev_err(&pdev->dev, "failed to determine base address\n"); 650 goto err_put_master; 651 } 652 653 mdata->base = devm_ioremap_resource(&pdev->dev, res); 654 if (IS_ERR(mdata->base)) { 655 ret = PTR_ERR(mdata->base); 656 goto err_put_master; 657 } 658 659 irq = platform_get_irq(pdev, 0); 660 if (irq < 0) { 661 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 662 ret = irq; 663 goto err_put_master; 664 } 665 666 if (!pdev->dev.dma_mask) 667 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 668 669 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 670 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 671 if (ret) { 672 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 673 goto err_put_master; 674 } 675 676 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 677 if (IS_ERR(mdata->parent_clk)) { 678 ret = PTR_ERR(mdata->parent_clk); 679 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 680 goto err_put_master; 681 } 682 683 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 684 if (IS_ERR(mdata->sel_clk)) { 685 ret = PTR_ERR(mdata->sel_clk); 686 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 687 goto err_put_master; 688 } 689 690 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 691 if (IS_ERR(mdata->spi_clk)) { 692 ret = PTR_ERR(mdata->spi_clk); 693 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 694 goto err_put_master; 695 } 696 697 ret = clk_prepare_enable(mdata->spi_clk); 698 if (ret < 0) { 699 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 700 goto err_put_master; 701 } 702 703 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 704 if (ret < 0) { 705 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 706 clk_disable_unprepare(mdata->spi_clk); 707 goto err_put_master; 708 } 709 710 clk_disable_unprepare(mdata->spi_clk); 711 712 pm_runtime_enable(&pdev->dev); 713 714 ret = devm_spi_register_master(&pdev->dev, master); 715 if (ret) { 716 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 717 goto err_disable_runtime_pm; 718 } 719 720 if (mdata->dev_comp->need_pad_sel) { 721 if (mdata->pad_num != master->num_chipselect) { 722 dev_err(&pdev->dev, 723 "pad_num does not match num_chipselect(%d != %d)\n", 724 mdata->pad_num, master->num_chipselect); 725 ret = -EINVAL; 726 goto err_disable_runtime_pm; 727 } 728 729 if (!master->cs_gpios && master->num_chipselect > 1) { 730 dev_err(&pdev->dev, 731 "cs_gpios not specified and num_chipselect > 1\n"); 732 ret = -EINVAL; 733 goto err_disable_runtime_pm; 734 } 735 736 if (master->cs_gpios) { 737 for (i = 0; i < master->num_chipselect; i++) { 738 ret = devm_gpio_request(&pdev->dev, 739 master->cs_gpios[i], 740 dev_name(&pdev->dev)); 741 if (ret) { 742 dev_err(&pdev->dev, 743 "can't get CS GPIO %i\n", i); 744 goto err_disable_runtime_pm; 745 } 746 } 747 } 748 } 749 750 return 0; 751 752 err_disable_runtime_pm: 753 pm_runtime_disable(&pdev->dev); 754 err_put_master: 755 spi_master_put(master); 756 757 return ret; 758 } 759 760 static int mtk_spi_remove(struct platform_device *pdev) 761 { 762 struct spi_master *master = platform_get_drvdata(pdev); 763 struct mtk_spi *mdata = spi_master_get_devdata(master); 764 765 pm_runtime_disable(&pdev->dev); 766 767 mtk_spi_reset(mdata); 768 769 return 0; 770 } 771 772 #ifdef CONFIG_PM_SLEEP 773 static int mtk_spi_suspend(struct device *dev) 774 { 775 int ret; 776 struct spi_master *master = dev_get_drvdata(dev); 777 struct mtk_spi *mdata = spi_master_get_devdata(master); 778 779 ret = spi_master_suspend(master); 780 if (ret) 781 return ret; 782 783 if (!pm_runtime_suspended(dev)) 784 clk_disable_unprepare(mdata->spi_clk); 785 786 return ret; 787 } 788 789 static int mtk_spi_resume(struct device *dev) 790 { 791 int ret; 792 struct spi_master *master = dev_get_drvdata(dev); 793 struct mtk_spi *mdata = spi_master_get_devdata(master); 794 795 if (!pm_runtime_suspended(dev)) { 796 ret = clk_prepare_enable(mdata->spi_clk); 797 if (ret < 0) { 798 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 799 return ret; 800 } 801 } 802 803 ret = spi_master_resume(master); 804 if (ret < 0) 805 clk_disable_unprepare(mdata->spi_clk); 806 807 return ret; 808 } 809 #endif /* CONFIG_PM_SLEEP */ 810 811 #ifdef CONFIG_PM 812 static int mtk_spi_runtime_suspend(struct device *dev) 813 { 814 struct spi_master *master = dev_get_drvdata(dev); 815 struct mtk_spi *mdata = spi_master_get_devdata(master); 816 817 clk_disable_unprepare(mdata->spi_clk); 818 819 return 0; 820 } 821 822 static int mtk_spi_runtime_resume(struct device *dev) 823 { 824 struct spi_master *master = dev_get_drvdata(dev); 825 struct mtk_spi *mdata = spi_master_get_devdata(master); 826 int ret; 827 828 ret = clk_prepare_enable(mdata->spi_clk); 829 if (ret < 0) { 830 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 831 return ret; 832 } 833 834 return 0; 835 } 836 #endif /* CONFIG_PM */ 837 838 static const struct dev_pm_ops mtk_spi_pm = { 839 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 840 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 841 mtk_spi_runtime_resume, NULL) 842 }; 843 844 static struct platform_driver mtk_spi_driver = { 845 .driver = { 846 .name = "mtk-spi", 847 .pm = &mtk_spi_pm, 848 .of_match_table = mtk_spi_of_match, 849 }, 850 .probe = mtk_spi_probe, 851 .remove = mtk_spi_remove, 852 }; 853 854 module_platform_driver(mtk_spi_driver); 855 856 MODULE_DESCRIPTION("MTK SPI Controller driver"); 857 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 858 MODULE_LICENSE("GPL v2"); 859 MODULE_ALIAS("platform:mtk-spi"); 860