1 /* 2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 3 * Author: Addy Ke <addy.ke@rock-chips.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <linux/clk.h> 17 #include <linux/dmaengine.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 #include <linux/spi/spi.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/scatterlist.h> 24 25 #define DRIVER_NAME "rockchip-spi" 26 27 /* SPI register offsets */ 28 #define ROCKCHIP_SPI_CTRLR0 0x0000 29 #define ROCKCHIP_SPI_CTRLR1 0x0004 30 #define ROCKCHIP_SPI_SSIENR 0x0008 31 #define ROCKCHIP_SPI_SER 0x000c 32 #define ROCKCHIP_SPI_BAUDR 0x0010 33 #define ROCKCHIP_SPI_TXFTLR 0x0014 34 #define ROCKCHIP_SPI_RXFTLR 0x0018 35 #define ROCKCHIP_SPI_TXFLR 0x001c 36 #define ROCKCHIP_SPI_RXFLR 0x0020 37 #define ROCKCHIP_SPI_SR 0x0024 38 #define ROCKCHIP_SPI_IPR 0x0028 39 #define ROCKCHIP_SPI_IMR 0x002c 40 #define ROCKCHIP_SPI_ISR 0x0030 41 #define ROCKCHIP_SPI_RISR 0x0034 42 #define ROCKCHIP_SPI_ICR 0x0038 43 #define ROCKCHIP_SPI_DMACR 0x003c 44 #define ROCKCHIP_SPI_DMATDLR 0x0040 45 #define ROCKCHIP_SPI_DMARDLR 0x0044 46 #define ROCKCHIP_SPI_TXDR 0x0400 47 #define ROCKCHIP_SPI_RXDR 0x0800 48 49 /* Bit fields in CTRLR0 */ 50 #define CR0_DFS_OFFSET 0 51 52 #define CR0_CFS_OFFSET 2 53 54 #define CR0_SCPH_OFFSET 6 55 56 #define CR0_SCPOL_OFFSET 7 57 58 #define CR0_CSM_OFFSET 8 59 #define CR0_CSM_KEEP 0x0 60 /* ss_n be high for half sclk_out cycles */ 61 #define CR0_CSM_HALF 0X1 62 /* ss_n be high for one sclk_out cycle */ 63 #define CR0_CSM_ONE 0x2 64 65 /* ss_n to sclk_out delay */ 66 #define CR0_SSD_OFFSET 10 67 /* 68 * The period between ss_n active and 69 * sclk_out active is half sclk_out cycles 70 */ 71 #define CR0_SSD_HALF 0x0 72 /* 73 * The period between ss_n active and 74 * sclk_out active is one sclk_out cycle 75 */ 76 #define CR0_SSD_ONE 0x1 77 78 #define CR0_EM_OFFSET 11 79 #define CR0_EM_LITTLE 0x0 80 #define CR0_EM_BIG 0x1 81 82 #define CR0_FBM_OFFSET 12 83 #define CR0_FBM_MSB 0x0 84 #define CR0_FBM_LSB 0x1 85 86 #define CR0_BHT_OFFSET 13 87 #define CR0_BHT_16BIT 0x0 88 #define CR0_BHT_8BIT 0x1 89 90 #define CR0_RSD_OFFSET 14 91 92 #define CR0_FRF_OFFSET 16 93 #define CR0_FRF_SPI 0x0 94 #define CR0_FRF_SSP 0x1 95 #define CR0_FRF_MICROWIRE 0x2 96 97 #define CR0_XFM_OFFSET 18 98 #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET) 99 #define CR0_XFM_TR 0x0 100 #define CR0_XFM_TO 0x1 101 #define CR0_XFM_RO 0x2 102 103 #define CR0_OPM_OFFSET 20 104 #define CR0_OPM_MASTER 0x0 105 #define CR0_OPM_SLAVE 0x1 106 107 #define CR0_MTM_OFFSET 0x21 108 109 /* Bit fields in SER, 2bit */ 110 #define SER_MASK 0x3 111 112 /* Bit fields in SR, 5bit */ 113 #define SR_MASK 0x1f 114 #define SR_BUSY (1 << 0) 115 #define SR_TF_FULL (1 << 1) 116 #define SR_TF_EMPTY (1 << 2) 117 #define SR_RF_EMPTY (1 << 3) 118 #define SR_RF_FULL (1 << 4) 119 120 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */ 121 #define INT_MASK 0x1f 122 #define INT_TF_EMPTY (1 << 0) 123 #define INT_TF_OVERFLOW (1 << 1) 124 #define INT_RF_UNDERFLOW (1 << 2) 125 #define INT_RF_OVERFLOW (1 << 3) 126 #define INT_RF_FULL (1 << 4) 127 128 /* Bit fields in ICR, 4bit */ 129 #define ICR_MASK 0x0f 130 #define ICR_ALL (1 << 0) 131 #define ICR_RF_UNDERFLOW (1 << 1) 132 #define ICR_RF_OVERFLOW (1 << 2) 133 #define ICR_TF_OVERFLOW (1 << 3) 134 135 /* Bit fields in DMACR */ 136 #define RF_DMA_EN (1 << 0) 137 #define TF_DMA_EN (1 << 1) 138 139 #define RXBUSY (1 << 0) 140 #define TXBUSY (1 << 1) 141 142 /* sclk_out: spi master internal logic in rk3x can support 50Mhz */ 143 #define MAX_SCLK_OUT 50000000 144 145 enum rockchip_ssi_type { 146 SSI_MOTO_SPI = 0, 147 SSI_TI_SSP, 148 SSI_NS_MICROWIRE, 149 }; 150 151 struct rockchip_spi_dma_data { 152 struct dma_chan *ch; 153 enum dma_transfer_direction direction; 154 dma_addr_t addr; 155 }; 156 157 struct rockchip_spi { 158 struct device *dev; 159 struct spi_master *master; 160 161 struct clk *spiclk; 162 struct clk *apb_pclk; 163 164 void __iomem *regs; 165 /*depth of the FIFO buffer */ 166 u32 fifo_len; 167 /* max bus freq supported */ 168 u32 max_freq; 169 /* supported slave numbers */ 170 enum rockchip_ssi_type type; 171 172 u16 mode; 173 u8 tmode; 174 u8 bpw; 175 u8 n_bytes; 176 u32 rsd_nsecs; 177 unsigned len; 178 u32 speed; 179 180 const void *tx; 181 const void *tx_end; 182 void *rx; 183 void *rx_end; 184 185 u32 state; 186 /* protect state */ 187 spinlock_t lock; 188 189 u32 use_dma; 190 struct sg_table tx_sg; 191 struct sg_table rx_sg; 192 struct rockchip_spi_dma_data dma_rx; 193 struct rockchip_spi_dma_data dma_tx; 194 struct dma_slave_caps dma_caps; 195 }; 196 197 static inline void spi_enable_chip(struct rockchip_spi *rs, int enable) 198 { 199 writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR); 200 } 201 202 static inline void spi_set_clk(struct rockchip_spi *rs, u16 div) 203 { 204 writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR); 205 } 206 207 static inline void flush_fifo(struct rockchip_spi *rs) 208 { 209 while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR)) 210 readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR); 211 } 212 213 static inline void wait_for_idle(struct rockchip_spi *rs) 214 { 215 unsigned long timeout = jiffies + msecs_to_jiffies(5); 216 217 do { 218 if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)) 219 return; 220 } while (!time_after(jiffies, timeout)); 221 222 dev_warn(rs->dev, "spi controller is in busy state!\n"); 223 } 224 225 static u32 get_fifo_len(struct rockchip_spi *rs) 226 { 227 u32 fifo; 228 229 for (fifo = 2; fifo < 32; fifo++) { 230 writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR); 231 if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR)) 232 break; 233 } 234 235 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR); 236 237 return (fifo == 31) ? 0 : fifo; 238 } 239 240 static inline u32 tx_max(struct rockchip_spi *rs) 241 { 242 u32 tx_left, tx_room; 243 244 tx_left = (rs->tx_end - rs->tx) / rs->n_bytes; 245 tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR); 246 247 return min(tx_left, tx_room); 248 } 249 250 static inline u32 rx_max(struct rockchip_spi *rs) 251 { 252 u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes; 253 u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR); 254 255 return min(rx_left, rx_room); 256 } 257 258 static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) 259 { 260 u32 ser; 261 struct spi_master *master = spi->master; 262 struct rockchip_spi *rs = spi_master_get_devdata(master); 263 264 pm_runtime_get_sync(rs->dev); 265 266 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK; 267 268 /* 269 * drivers/spi/spi.c: 270 * static void spi_set_cs(struct spi_device *spi, bool enable) 271 * { 272 * if (spi->mode & SPI_CS_HIGH) 273 * enable = !enable; 274 * 275 * if (spi->cs_gpio >= 0) 276 * gpio_set_value(spi->cs_gpio, !enable); 277 * else if (spi->master->set_cs) 278 * spi->master->set_cs(spi, !enable); 279 * } 280 * 281 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs) 282 */ 283 if (!enable) 284 ser |= 1 << spi->chip_select; 285 else 286 ser &= ~(1 << spi->chip_select); 287 288 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER); 289 290 pm_runtime_put_sync(rs->dev); 291 } 292 293 static int rockchip_spi_prepare_message(struct spi_master *master, 294 struct spi_message *msg) 295 { 296 struct rockchip_spi *rs = spi_master_get_devdata(master); 297 struct spi_device *spi = msg->spi; 298 299 rs->mode = spi->mode; 300 301 return 0; 302 } 303 304 static void rockchip_spi_handle_err(struct spi_master *master, 305 struct spi_message *msg) 306 { 307 unsigned long flags; 308 struct rockchip_spi *rs = spi_master_get_devdata(master); 309 310 spin_lock_irqsave(&rs->lock, flags); 311 312 /* 313 * For DMA mode, we need terminate DMA channel and flush 314 * fifo for the next transfer if DMA thansfer timeout. 315 * handle_err() was called by core if transfer failed. 316 * Maybe it is reasonable for error handling here. 317 */ 318 if (rs->use_dma) { 319 if (rs->state & RXBUSY) { 320 dmaengine_terminate_async(rs->dma_rx.ch); 321 flush_fifo(rs); 322 } 323 324 if (rs->state & TXBUSY) 325 dmaengine_terminate_async(rs->dma_tx.ch); 326 } 327 328 spin_unlock_irqrestore(&rs->lock, flags); 329 } 330 331 static int rockchip_spi_unprepare_message(struct spi_master *master, 332 struct spi_message *msg) 333 { 334 struct rockchip_spi *rs = spi_master_get_devdata(master); 335 336 spi_enable_chip(rs, 0); 337 338 return 0; 339 } 340 341 static void rockchip_spi_pio_writer(struct rockchip_spi *rs) 342 { 343 u32 max = tx_max(rs); 344 u32 txw = 0; 345 346 while (max--) { 347 if (rs->n_bytes == 1) 348 txw = *(u8 *)(rs->tx); 349 else 350 txw = *(u16 *)(rs->tx); 351 352 writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR); 353 rs->tx += rs->n_bytes; 354 } 355 } 356 357 static void rockchip_spi_pio_reader(struct rockchip_spi *rs) 358 { 359 u32 max = rx_max(rs); 360 u32 rxw; 361 362 while (max--) { 363 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR); 364 if (rs->n_bytes == 1) 365 *(u8 *)(rs->rx) = (u8)rxw; 366 else 367 *(u16 *)(rs->rx) = (u16)rxw; 368 rs->rx += rs->n_bytes; 369 } 370 } 371 372 static int rockchip_spi_pio_transfer(struct rockchip_spi *rs) 373 { 374 int remain = 0; 375 376 do { 377 if (rs->tx) { 378 remain = rs->tx_end - rs->tx; 379 rockchip_spi_pio_writer(rs); 380 } 381 382 if (rs->rx) { 383 remain = rs->rx_end - rs->rx; 384 rockchip_spi_pio_reader(rs); 385 } 386 387 cpu_relax(); 388 } while (remain); 389 390 /* If tx, wait until the FIFO data completely. */ 391 if (rs->tx) 392 wait_for_idle(rs); 393 394 spi_enable_chip(rs, 0); 395 396 return 0; 397 } 398 399 static void rockchip_spi_dma_rxcb(void *data) 400 { 401 unsigned long flags; 402 struct rockchip_spi *rs = data; 403 404 spin_lock_irqsave(&rs->lock, flags); 405 406 rs->state &= ~RXBUSY; 407 if (!(rs->state & TXBUSY)) { 408 spi_enable_chip(rs, 0); 409 spi_finalize_current_transfer(rs->master); 410 } 411 412 spin_unlock_irqrestore(&rs->lock, flags); 413 } 414 415 static void rockchip_spi_dma_txcb(void *data) 416 { 417 unsigned long flags; 418 struct rockchip_spi *rs = data; 419 420 /* Wait until the FIFO data completely. */ 421 wait_for_idle(rs); 422 423 spin_lock_irqsave(&rs->lock, flags); 424 425 rs->state &= ~TXBUSY; 426 if (!(rs->state & RXBUSY)) { 427 spi_enable_chip(rs, 0); 428 spi_finalize_current_transfer(rs->master); 429 } 430 431 spin_unlock_irqrestore(&rs->lock, flags); 432 } 433 434 static int rockchip_spi_prepare_dma(struct rockchip_spi *rs) 435 { 436 unsigned long flags; 437 struct dma_slave_config rxconf, txconf; 438 struct dma_async_tx_descriptor *rxdesc, *txdesc; 439 440 spin_lock_irqsave(&rs->lock, flags); 441 rs->state &= ~RXBUSY; 442 rs->state &= ~TXBUSY; 443 spin_unlock_irqrestore(&rs->lock, flags); 444 445 rxdesc = NULL; 446 if (rs->rx) { 447 rxconf.direction = rs->dma_rx.direction; 448 rxconf.src_addr = rs->dma_rx.addr; 449 rxconf.src_addr_width = rs->n_bytes; 450 if (rs->dma_caps.max_burst > 4) 451 rxconf.src_maxburst = 4; 452 else 453 rxconf.src_maxburst = 1; 454 dmaengine_slave_config(rs->dma_rx.ch, &rxconf); 455 456 rxdesc = dmaengine_prep_slave_sg( 457 rs->dma_rx.ch, 458 rs->rx_sg.sgl, rs->rx_sg.nents, 459 rs->dma_rx.direction, DMA_PREP_INTERRUPT); 460 if (!rxdesc) 461 return -EINVAL; 462 463 rxdesc->callback = rockchip_spi_dma_rxcb; 464 rxdesc->callback_param = rs; 465 } 466 467 txdesc = NULL; 468 if (rs->tx) { 469 txconf.direction = rs->dma_tx.direction; 470 txconf.dst_addr = rs->dma_tx.addr; 471 txconf.dst_addr_width = rs->n_bytes; 472 if (rs->dma_caps.max_burst > 4) 473 txconf.dst_maxburst = 4; 474 else 475 txconf.dst_maxburst = 1; 476 dmaengine_slave_config(rs->dma_tx.ch, &txconf); 477 478 txdesc = dmaengine_prep_slave_sg( 479 rs->dma_tx.ch, 480 rs->tx_sg.sgl, rs->tx_sg.nents, 481 rs->dma_tx.direction, DMA_PREP_INTERRUPT); 482 if (!txdesc) { 483 if (rxdesc) 484 dmaengine_terminate_sync(rs->dma_rx.ch); 485 return -EINVAL; 486 } 487 488 txdesc->callback = rockchip_spi_dma_txcb; 489 txdesc->callback_param = rs; 490 } 491 492 /* rx must be started before tx due to spi instinct */ 493 if (rxdesc) { 494 spin_lock_irqsave(&rs->lock, flags); 495 rs->state |= RXBUSY; 496 spin_unlock_irqrestore(&rs->lock, flags); 497 dmaengine_submit(rxdesc); 498 dma_async_issue_pending(rs->dma_rx.ch); 499 } 500 501 if (txdesc) { 502 spin_lock_irqsave(&rs->lock, flags); 503 rs->state |= TXBUSY; 504 spin_unlock_irqrestore(&rs->lock, flags); 505 dmaengine_submit(txdesc); 506 dma_async_issue_pending(rs->dma_tx.ch); 507 } 508 509 return 0; 510 } 511 512 static void rockchip_spi_config(struct rockchip_spi *rs) 513 { 514 u32 div = 0; 515 u32 dmacr = 0; 516 int rsd = 0; 517 518 u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) 519 | (CR0_SSD_ONE << CR0_SSD_OFFSET) 520 | (CR0_EM_BIG << CR0_EM_OFFSET); 521 522 cr0 |= (rs->n_bytes << CR0_DFS_OFFSET); 523 cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET); 524 cr0 |= (rs->tmode << CR0_XFM_OFFSET); 525 cr0 |= (rs->type << CR0_FRF_OFFSET); 526 527 if (rs->use_dma) { 528 if (rs->tx) 529 dmacr |= TF_DMA_EN; 530 if (rs->rx) 531 dmacr |= RF_DMA_EN; 532 } 533 534 if (WARN_ON(rs->speed > MAX_SCLK_OUT)) 535 rs->speed = MAX_SCLK_OUT; 536 537 /* the minimum divisor is 2 */ 538 if (rs->max_freq < 2 * rs->speed) { 539 clk_set_rate(rs->spiclk, 2 * rs->speed); 540 rs->max_freq = clk_get_rate(rs->spiclk); 541 } 542 543 /* div doesn't support odd number */ 544 div = DIV_ROUND_UP(rs->max_freq, rs->speed); 545 div = (div + 1) & 0xfffe; 546 547 /* Rx sample delay is expressed in parent clock cycles (max 3) */ 548 rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8), 549 1000000000 >> 8); 550 if (!rsd && rs->rsd_nsecs) { 551 pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n", 552 rs->max_freq, rs->rsd_nsecs); 553 } else if (rsd > 3) { 554 rsd = 3; 555 pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n", 556 rs->max_freq, rs->rsd_nsecs, 557 rsd * 1000000000U / rs->max_freq); 558 } 559 cr0 |= rsd << CR0_RSD_OFFSET; 560 561 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); 562 563 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); 564 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR); 565 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR); 566 567 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR); 568 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR); 569 writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR); 570 571 spi_set_clk(rs, div); 572 573 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); 574 } 575 576 static int rockchip_spi_transfer_one( 577 struct spi_master *master, 578 struct spi_device *spi, 579 struct spi_transfer *xfer) 580 { 581 int ret = 0; 582 struct rockchip_spi *rs = spi_master_get_devdata(master); 583 584 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && 585 (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)); 586 587 if (!xfer->tx_buf && !xfer->rx_buf) { 588 dev_err(rs->dev, "No buffer for transfer\n"); 589 return -EINVAL; 590 } 591 592 rs->speed = xfer->speed_hz; 593 rs->bpw = xfer->bits_per_word; 594 rs->n_bytes = rs->bpw >> 3; 595 596 rs->tx = xfer->tx_buf; 597 rs->tx_end = rs->tx + xfer->len; 598 rs->rx = xfer->rx_buf; 599 rs->rx_end = rs->rx + xfer->len; 600 rs->len = xfer->len; 601 602 rs->tx_sg = xfer->tx_sg; 603 rs->rx_sg = xfer->rx_sg; 604 605 if (rs->tx && rs->rx) 606 rs->tmode = CR0_XFM_TR; 607 else if (rs->tx) 608 rs->tmode = CR0_XFM_TO; 609 else if (rs->rx) 610 rs->tmode = CR0_XFM_RO; 611 612 /* we need prepare dma before spi was enabled */ 613 if (master->can_dma && master->can_dma(master, spi, xfer)) 614 rs->use_dma = 1; 615 else 616 rs->use_dma = 0; 617 618 rockchip_spi_config(rs); 619 620 if (rs->use_dma) { 621 if (rs->tmode == CR0_XFM_RO) { 622 /* rx: dma must be prepared first */ 623 ret = rockchip_spi_prepare_dma(rs); 624 spi_enable_chip(rs, 1); 625 } else { 626 /* tx or tr: spi must be enabled first */ 627 spi_enable_chip(rs, 1); 628 ret = rockchip_spi_prepare_dma(rs); 629 } 630 /* successful DMA prepare means the transfer is in progress */ 631 ret = ret ? ret : 1; 632 } else { 633 spi_enable_chip(rs, 1); 634 ret = rockchip_spi_pio_transfer(rs); 635 } 636 637 return ret; 638 } 639 640 static bool rockchip_spi_can_dma(struct spi_master *master, 641 struct spi_device *spi, 642 struct spi_transfer *xfer) 643 { 644 struct rockchip_spi *rs = spi_master_get_devdata(master); 645 646 return (xfer->len > rs->fifo_len); 647 } 648 649 static int rockchip_spi_probe(struct platform_device *pdev) 650 { 651 int ret = 0; 652 struct rockchip_spi *rs; 653 struct spi_master *master; 654 struct resource *mem; 655 u32 rsd_nsecs; 656 657 master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); 658 if (!master) 659 return -ENOMEM; 660 661 platform_set_drvdata(pdev, master); 662 663 rs = spi_master_get_devdata(master); 664 665 /* Get basic io resource and map it */ 666 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 667 rs->regs = devm_ioremap_resource(&pdev->dev, mem); 668 if (IS_ERR(rs->regs)) { 669 ret = PTR_ERR(rs->regs); 670 goto err_ioremap_resource; 671 } 672 673 rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); 674 if (IS_ERR(rs->apb_pclk)) { 675 dev_err(&pdev->dev, "Failed to get apb_pclk\n"); 676 ret = PTR_ERR(rs->apb_pclk); 677 goto err_ioremap_resource; 678 } 679 680 rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); 681 if (IS_ERR(rs->spiclk)) { 682 dev_err(&pdev->dev, "Failed to get spi_pclk\n"); 683 ret = PTR_ERR(rs->spiclk); 684 goto err_ioremap_resource; 685 } 686 687 ret = clk_prepare_enable(rs->apb_pclk); 688 if (ret) { 689 dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); 690 goto err_ioremap_resource; 691 } 692 693 ret = clk_prepare_enable(rs->spiclk); 694 if (ret) { 695 dev_err(&pdev->dev, "Failed to enable spi_clk\n"); 696 goto err_spiclk_enable; 697 } 698 699 spi_enable_chip(rs, 0); 700 701 rs->type = SSI_MOTO_SPI; 702 rs->master = master; 703 rs->dev = &pdev->dev; 704 rs->max_freq = clk_get_rate(rs->spiclk); 705 706 if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", 707 &rsd_nsecs)) 708 rs->rsd_nsecs = rsd_nsecs; 709 710 rs->fifo_len = get_fifo_len(rs); 711 if (!rs->fifo_len) { 712 dev_err(&pdev->dev, "Failed to get fifo length\n"); 713 ret = -EINVAL; 714 goto err_get_fifo_len; 715 } 716 717 spin_lock_init(&rs->lock); 718 719 pm_runtime_set_active(&pdev->dev); 720 pm_runtime_enable(&pdev->dev); 721 722 master->auto_runtime_pm = true; 723 master->bus_num = pdev->id; 724 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; 725 master->num_chipselect = 2; 726 master->dev.of_node = pdev->dev.of_node; 727 master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 728 729 master->set_cs = rockchip_spi_set_cs; 730 master->prepare_message = rockchip_spi_prepare_message; 731 master->unprepare_message = rockchip_spi_unprepare_message; 732 master->transfer_one = rockchip_spi_transfer_one; 733 master->handle_err = rockchip_spi_handle_err; 734 735 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx"); 736 if (IS_ERR(rs->dma_tx.ch)) { 737 /* Check tx to see if we need defer probing driver */ 738 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { 739 ret = -EPROBE_DEFER; 740 goto err_get_fifo_len; 741 } 742 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 743 rs->dma_tx.ch = NULL; 744 } 745 746 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx"); 747 if (IS_ERR(rs->dma_rx.ch)) { 748 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) { 749 ret = -EPROBE_DEFER; 750 goto err_free_dma_tx; 751 } 752 dev_warn(rs->dev, "Failed to request RX DMA channel\n"); 753 rs->dma_rx.ch = NULL; 754 } 755 756 if (rs->dma_tx.ch && rs->dma_rx.ch) { 757 dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps)); 758 rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); 759 rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); 760 rs->dma_tx.direction = DMA_MEM_TO_DEV; 761 rs->dma_rx.direction = DMA_DEV_TO_MEM; 762 763 master->can_dma = rockchip_spi_can_dma; 764 master->dma_tx = rs->dma_tx.ch; 765 master->dma_rx = rs->dma_rx.ch; 766 } 767 768 ret = devm_spi_register_master(&pdev->dev, master); 769 if (ret) { 770 dev_err(&pdev->dev, "Failed to register master\n"); 771 goto err_register_master; 772 } 773 774 return 0; 775 776 err_register_master: 777 pm_runtime_disable(&pdev->dev); 778 if (rs->dma_rx.ch) 779 dma_release_channel(rs->dma_rx.ch); 780 err_free_dma_tx: 781 if (rs->dma_tx.ch) 782 dma_release_channel(rs->dma_tx.ch); 783 err_get_fifo_len: 784 clk_disable_unprepare(rs->spiclk); 785 err_spiclk_enable: 786 clk_disable_unprepare(rs->apb_pclk); 787 err_ioremap_resource: 788 spi_master_put(master); 789 790 return ret; 791 } 792 793 static int rockchip_spi_remove(struct platform_device *pdev) 794 { 795 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 796 struct rockchip_spi *rs = spi_master_get_devdata(master); 797 798 pm_runtime_disable(&pdev->dev); 799 800 clk_disable_unprepare(rs->spiclk); 801 clk_disable_unprepare(rs->apb_pclk); 802 803 if (rs->dma_tx.ch) 804 dma_release_channel(rs->dma_tx.ch); 805 if (rs->dma_rx.ch) 806 dma_release_channel(rs->dma_rx.ch); 807 808 spi_master_put(master); 809 810 return 0; 811 } 812 813 #ifdef CONFIG_PM_SLEEP 814 static int rockchip_spi_suspend(struct device *dev) 815 { 816 int ret = 0; 817 struct spi_master *master = dev_get_drvdata(dev); 818 struct rockchip_spi *rs = spi_master_get_devdata(master); 819 820 ret = spi_master_suspend(rs->master); 821 if (ret) 822 return ret; 823 824 if (!pm_runtime_suspended(dev)) { 825 clk_disable_unprepare(rs->spiclk); 826 clk_disable_unprepare(rs->apb_pclk); 827 } 828 829 return ret; 830 } 831 832 static int rockchip_spi_resume(struct device *dev) 833 { 834 int ret = 0; 835 struct spi_master *master = dev_get_drvdata(dev); 836 struct rockchip_spi *rs = spi_master_get_devdata(master); 837 838 if (!pm_runtime_suspended(dev)) { 839 ret = clk_prepare_enable(rs->apb_pclk); 840 if (ret < 0) 841 return ret; 842 843 ret = clk_prepare_enable(rs->spiclk); 844 if (ret < 0) { 845 clk_disable_unprepare(rs->apb_pclk); 846 return ret; 847 } 848 } 849 850 ret = spi_master_resume(rs->master); 851 if (ret < 0) { 852 clk_disable_unprepare(rs->spiclk); 853 clk_disable_unprepare(rs->apb_pclk); 854 } 855 856 return ret; 857 } 858 #endif /* CONFIG_PM_SLEEP */ 859 860 #ifdef CONFIG_PM 861 static int rockchip_spi_runtime_suspend(struct device *dev) 862 { 863 struct spi_master *master = dev_get_drvdata(dev); 864 struct rockchip_spi *rs = spi_master_get_devdata(master); 865 866 clk_disable_unprepare(rs->spiclk); 867 clk_disable_unprepare(rs->apb_pclk); 868 869 return 0; 870 } 871 872 static int rockchip_spi_runtime_resume(struct device *dev) 873 { 874 int ret; 875 struct spi_master *master = dev_get_drvdata(dev); 876 struct rockchip_spi *rs = spi_master_get_devdata(master); 877 878 ret = clk_prepare_enable(rs->apb_pclk); 879 if (ret) 880 return ret; 881 882 ret = clk_prepare_enable(rs->spiclk); 883 if (ret) 884 clk_disable_unprepare(rs->apb_pclk); 885 886 return ret; 887 } 888 #endif /* CONFIG_PM */ 889 890 static const struct dev_pm_ops rockchip_spi_pm = { 891 SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume) 892 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend, 893 rockchip_spi_runtime_resume, NULL) 894 }; 895 896 static const struct of_device_id rockchip_spi_dt_match[] = { 897 { .compatible = "rockchip,rk3066-spi", }, 898 { .compatible = "rockchip,rk3188-spi", }, 899 { .compatible = "rockchip,rk3288-spi", }, 900 { .compatible = "rockchip,rk3399-spi", }, 901 { }, 902 }; 903 MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match); 904 905 static struct platform_driver rockchip_spi_driver = { 906 .driver = { 907 .name = DRIVER_NAME, 908 .pm = &rockchip_spi_pm, 909 .of_match_table = of_match_ptr(rockchip_spi_dt_match), 910 }, 911 .probe = rockchip_spi_probe, 912 .remove = rockchip_spi_remove, 913 }; 914 915 module_platform_driver(rockchip_spi_driver); 916 917 MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>"); 918 MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver"); 919 MODULE_LICENSE("GPL v2"); 920