1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Freescale i.MX7ULP LPSPI driver 4 // 5 // Copyright 2016 Freescale Semiconductor, Inc. 6 // Copyright 2018 NXP Semiconductors 7 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/gpio.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/of_gpio.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/platform_device.h> 25 #include <linux/platform_data/dma-imx.h> 26 #include <linux/platform_data/spi-imx.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/slab.h> 29 #include <linux/spi/spi.h> 30 #include <linux/spi/spi_bitbang.h> 31 #include <linux/types.h> 32 33 #define DRIVER_NAME "fsl_lpspi" 34 35 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */ 36 37 /* The maximum bytes that edma can transfer once.*/ 38 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1) 39 40 /* i.MX7ULP LPSPI registers */ 41 #define IMX7ULP_VERID 0x0 42 #define IMX7ULP_PARAM 0x4 43 #define IMX7ULP_CR 0x10 44 #define IMX7ULP_SR 0x14 45 #define IMX7ULP_IER 0x18 46 #define IMX7ULP_DER 0x1c 47 #define IMX7ULP_CFGR0 0x20 48 #define IMX7ULP_CFGR1 0x24 49 #define IMX7ULP_DMR0 0x30 50 #define IMX7ULP_DMR1 0x34 51 #define IMX7ULP_CCR 0x40 52 #define IMX7ULP_FCR 0x58 53 #define IMX7ULP_FSR 0x5c 54 #define IMX7ULP_TCR 0x60 55 #define IMX7ULP_TDR 0x64 56 #define IMX7ULP_RSR 0x70 57 #define IMX7ULP_RDR 0x74 58 59 /* General control register field define */ 60 #define CR_RRF BIT(9) 61 #define CR_RTF BIT(8) 62 #define CR_RST BIT(1) 63 #define CR_MEN BIT(0) 64 #define SR_MBF BIT(24) 65 #define SR_TCF BIT(10) 66 #define SR_FCF BIT(9) 67 #define SR_RDF BIT(1) 68 #define SR_TDF BIT(0) 69 #define IER_TCIE BIT(10) 70 #define IER_FCIE BIT(9) 71 #define IER_RDIE BIT(1) 72 #define IER_TDIE BIT(0) 73 #define DER_RDDE BIT(1) 74 #define DER_TDDE BIT(0) 75 #define CFGR1_PCSCFG BIT(27) 76 #define CFGR1_PINCFG (BIT(24)|BIT(25)) 77 #define CFGR1_PCSPOL BIT(8) 78 #define CFGR1_NOSTALL BIT(3) 79 #define CFGR1_MASTER BIT(0) 80 #define FSR_TXCOUNT (0xFF) 81 #define RSR_RXEMPTY BIT(1) 82 #define TCR_CPOL BIT(31) 83 #define TCR_CPHA BIT(30) 84 #define TCR_CONT BIT(21) 85 #define TCR_CONTC BIT(20) 86 #define TCR_RXMSK BIT(19) 87 #define TCR_TXMSK BIT(18) 88 89 static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; 90 91 struct lpspi_config { 92 u8 bpw; 93 u8 chip_select; 94 u8 prescale; 95 u16 mode; 96 u32 speed_hz; 97 }; 98 99 struct fsl_lpspi_data { 100 struct device *dev; 101 void __iomem *base; 102 unsigned long base_phys; 103 struct clk *clk_ipg; 104 struct clk *clk_per; 105 bool is_slave; 106 bool is_first_byte; 107 108 void *rx_buf; 109 const void *tx_buf; 110 void (*tx)(struct fsl_lpspi_data *); 111 void (*rx)(struct fsl_lpspi_data *); 112 113 u32 remain; 114 u8 watermark; 115 u8 txfifosize; 116 u8 rxfifosize; 117 118 struct lpspi_config config; 119 struct completion xfer_done; 120 121 bool slave_aborted; 122 123 /* DMA */ 124 bool usedma; 125 struct completion dma_rx_completion; 126 struct completion dma_tx_completion; 127 128 int chipselect[0]; 129 }; 130 131 static const struct of_device_id fsl_lpspi_dt_ids[] = { 132 { .compatible = "fsl,imx7ulp-spi", }, 133 { /* sentinel */ } 134 }; 135 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); 136 137 #define LPSPI_BUF_RX(type) \ 138 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \ 139 { \ 140 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \ 141 \ 142 if (fsl_lpspi->rx_buf) { \ 143 *(type *)fsl_lpspi->rx_buf = val; \ 144 fsl_lpspi->rx_buf += sizeof(type); \ 145 } \ 146 } 147 148 #define LPSPI_BUF_TX(type) \ 149 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \ 150 { \ 151 type val = 0; \ 152 \ 153 if (fsl_lpspi->tx_buf) { \ 154 val = *(type *)fsl_lpspi->tx_buf; \ 155 fsl_lpspi->tx_buf += sizeof(type); \ 156 } \ 157 \ 158 fsl_lpspi->remain -= sizeof(type); \ 159 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \ 160 } 161 162 LPSPI_BUF_RX(u8) 163 LPSPI_BUF_TX(u8) 164 LPSPI_BUF_RX(u16) 165 LPSPI_BUF_TX(u16) 166 LPSPI_BUF_RX(u32) 167 LPSPI_BUF_TX(u32) 168 169 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi, 170 unsigned int enable) 171 { 172 writel(enable, fsl_lpspi->base + IMX7ULP_IER); 173 } 174 175 static int fsl_lpspi_bytes_per_word(const int bpw) 176 { 177 return DIV_ROUND_UP(bpw, BITS_PER_BYTE); 178 } 179 180 static bool fsl_lpspi_can_dma(struct spi_controller *controller, 181 struct spi_device *spi, 182 struct spi_transfer *transfer) 183 { 184 unsigned int bytes_per_word; 185 186 if (!controller->dma_rx) 187 return false; 188 189 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word); 190 191 switch (bytes_per_word) 192 { 193 case 1: 194 case 2: 195 case 4: 196 break; 197 default: 198 return false; 199 } 200 201 return true; 202 } 203 204 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller) 205 { 206 struct fsl_lpspi_data *fsl_lpspi = 207 spi_controller_get_devdata(controller); 208 int ret; 209 210 ret = pm_runtime_get_sync(fsl_lpspi->dev); 211 if (ret < 0) { 212 dev_err(fsl_lpspi->dev, "failed to enable clock\n"); 213 return ret; 214 } 215 216 return 0; 217 } 218 219 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) 220 { 221 struct fsl_lpspi_data *fsl_lpspi = 222 spi_controller_get_devdata(controller); 223 224 pm_runtime_mark_last_busy(fsl_lpspi->dev); 225 pm_runtime_put_autosuspend(fsl_lpspi->dev); 226 227 return 0; 228 } 229 230 static int fsl_lpspi_prepare_message(struct spi_controller *controller, 231 struct spi_message *msg) 232 { 233 struct fsl_lpspi_data *fsl_lpspi = 234 spi_controller_get_devdata(controller); 235 struct spi_device *spi = msg->spi; 236 int gpio = fsl_lpspi->chipselect[spi->chip_select]; 237 238 if (gpio_is_valid(gpio)) 239 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 240 241 return 0; 242 } 243 244 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) 245 { 246 u8 txfifo_cnt; 247 u32 temp; 248 249 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; 250 251 while (txfifo_cnt < fsl_lpspi->txfifosize) { 252 if (!fsl_lpspi->remain) 253 break; 254 fsl_lpspi->tx(fsl_lpspi); 255 txfifo_cnt++; 256 } 257 258 if (txfifo_cnt < fsl_lpspi->txfifosize) { 259 if (!fsl_lpspi->is_slave) { 260 temp = readl(fsl_lpspi->base + IMX7ULP_TCR); 261 temp &= ~TCR_CONTC; 262 writel(temp, fsl_lpspi->base + IMX7ULP_TCR); 263 } 264 265 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); 266 } else 267 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); 268 } 269 270 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) 271 { 272 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY)) 273 fsl_lpspi->rx(fsl_lpspi); 274 } 275 276 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi) 277 { 278 u32 temp = 0; 279 280 temp |= fsl_lpspi->config.bpw - 1; 281 temp |= (fsl_lpspi->config.mode & 0x3) << 30; 282 if (!fsl_lpspi->is_slave) { 283 temp |= fsl_lpspi->config.prescale << 27; 284 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24; 285 286 /* 287 * Set TCR_CONT will keep SS asserted after current transfer. 288 * For the first transfer, clear TCR_CONTC to assert SS. 289 * For subsequent transfer, set TCR_CONTC to keep SS asserted. 290 */ 291 if (!fsl_lpspi->usedma) { 292 temp |= TCR_CONT; 293 if (fsl_lpspi->is_first_byte) 294 temp &= ~TCR_CONTC; 295 else 296 temp |= TCR_CONTC; 297 } 298 } 299 writel(temp, fsl_lpspi->base + IMX7ULP_TCR); 300 301 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); 302 } 303 304 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) 305 { 306 u32 temp; 307 308 if (!fsl_lpspi->usedma) 309 temp = fsl_lpspi->watermark >> 1 | 310 (fsl_lpspi->watermark >> 1) << 16; 311 else 312 temp = fsl_lpspi->watermark >> 1; 313 314 writel(temp, fsl_lpspi->base + IMX7ULP_FCR); 315 316 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp); 317 } 318 319 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) 320 { 321 struct lpspi_config config = fsl_lpspi->config; 322 unsigned int perclk_rate, scldiv; 323 u8 prescale; 324 325 perclk_rate = clk_get_rate(fsl_lpspi->clk_per); 326 327 if (config.speed_hz > perclk_rate / 2) { 328 dev_err(fsl_lpspi->dev, 329 "per-clk should be at least two times of transfer speed"); 330 return -EINVAL; 331 } 332 333 for (prescale = 0; prescale < 8; prescale++) { 334 scldiv = perclk_rate / 335 (clkdivs[prescale] * config.speed_hz) - 2; 336 if (scldiv < 256) { 337 fsl_lpspi->config.prescale = prescale; 338 break; 339 } 340 } 341 342 if (prescale == 8 && scldiv >= 256) 343 return -EINVAL; 344 345 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16), 346 fsl_lpspi->base + IMX7ULP_CCR); 347 348 dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n", 349 perclk_rate, config.speed_hz, prescale, scldiv); 350 351 return 0; 352 } 353 354 static int fsl_lpspi_dma_configure(struct spi_controller *controller) 355 { 356 int ret; 357 enum dma_slave_buswidth buswidth; 358 struct dma_slave_config rx = {}, tx = {}; 359 struct fsl_lpspi_data *fsl_lpspi = 360 spi_controller_get_devdata(controller); 361 362 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) { 363 case 4: 364 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 365 break; 366 case 2: 367 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 368 break; 369 case 1: 370 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 tx.direction = DMA_MEM_TO_DEV; 377 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR; 378 tx.dst_addr_width = buswidth; 379 tx.dst_maxburst = 1; 380 ret = dmaengine_slave_config(controller->dma_tx, &tx); 381 if (ret) { 382 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n", 383 ret); 384 return ret; 385 } 386 387 rx.direction = DMA_DEV_TO_MEM; 388 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR; 389 rx.src_addr_width = buswidth; 390 rx.src_maxburst = 1; 391 ret = dmaengine_slave_config(controller->dma_rx, &rx); 392 if (ret) { 393 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n", 394 ret); 395 return ret; 396 } 397 398 return 0; 399 } 400 401 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) 402 { 403 u32 temp; 404 int ret; 405 406 if (!fsl_lpspi->is_slave) { 407 ret = fsl_lpspi_set_bitrate(fsl_lpspi); 408 if (ret) 409 return ret; 410 } 411 412 fsl_lpspi_set_watermark(fsl_lpspi); 413 414 if (!fsl_lpspi->is_slave) 415 temp = CFGR1_MASTER; 416 else 417 temp = CFGR1_PINCFG; 418 if (fsl_lpspi->config.mode & SPI_CS_HIGH) 419 temp |= CFGR1_PCSPOL; 420 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); 421 422 temp = readl(fsl_lpspi->base + IMX7ULP_CR); 423 temp |= CR_RRF | CR_RTF | CR_MEN; 424 writel(temp, fsl_lpspi->base + IMX7ULP_CR); 425 426 temp = 0; 427 if (fsl_lpspi->usedma) 428 temp = DER_TDDE | DER_RDDE; 429 writel(temp, fsl_lpspi->base + IMX7ULP_DER); 430 431 return 0; 432 } 433 434 static int fsl_lpspi_setup_transfer(struct spi_controller *controller, 435 struct spi_device *spi, 436 struct spi_transfer *t) 437 { 438 struct fsl_lpspi_data *fsl_lpspi = 439 spi_controller_get_devdata(spi->controller); 440 441 if (t == NULL) 442 return -EINVAL; 443 444 fsl_lpspi->config.mode = spi->mode; 445 fsl_lpspi->config.bpw = t->bits_per_word; 446 fsl_lpspi->config.speed_hz = t->speed_hz; 447 fsl_lpspi->config.chip_select = spi->chip_select; 448 449 if (!fsl_lpspi->config.speed_hz) 450 fsl_lpspi->config.speed_hz = spi->max_speed_hz; 451 if (!fsl_lpspi->config.bpw) 452 fsl_lpspi->config.bpw = spi->bits_per_word; 453 454 /* Initialize the functions for transfer */ 455 if (fsl_lpspi->config.bpw <= 8) { 456 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8; 457 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8; 458 } else if (fsl_lpspi->config.bpw <= 16) { 459 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16; 460 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16; 461 } else { 462 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32; 463 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; 464 } 465 466 if (t->len <= fsl_lpspi->txfifosize) 467 fsl_lpspi->watermark = t->len; 468 else 469 fsl_lpspi->watermark = fsl_lpspi->txfifosize; 470 471 if (fsl_lpspi_can_dma(controller, spi, t)) 472 fsl_lpspi->usedma = 1; 473 else 474 fsl_lpspi->usedma = 0; 475 476 return fsl_lpspi_config(fsl_lpspi); 477 } 478 479 static int fsl_lpspi_slave_abort(struct spi_controller *controller) 480 { 481 struct fsl_lpspi_data *fsl_lpspi = 482 spi_controller_get_devdata(controller); 483 484 fsl_lpspi->slave_aborted = true; 485 if (!fsl_lpspi->usedma) 486 complete(&fsl_lpspi->xfer_done); 487 else { 488 complete(&fsl_lpspi->dma_tx_completion); 489 complete(&fsl_lpspi->dma_rx_completion); 490 } 491 492 return 0; 493 } 494 495 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) 496 { 497 struct fsl_lpspi_data *fsl_lpspi = 498 spi_controller_get_devdata(controller); 499 500 if (fsl_lpspi->is_slave) { 501 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) || 502 fsl_lpspi->slave_aborted) { 503 dev_dbg(fsl_lpspi->dev, "interrupted\n"); 504 return -EINTR; 505 } 506 } else { 507 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) { 508 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n"); 509 return -ETIMEDOUT; 510 } 511 } 512 513 return 0; 514 } 515 516 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) 517 { 518 u32 temp; 519 520 if (!fsl_lpspi->usedma) { 521 /* Disable all interrupt */ 522 fsl_lpspi_intctrl(fsl_lpspi, 0); 523 } 524 525 /* W1C for all flags in SR */ 526 temp = 0x3F << 8; 527 writel(temp, fsl_lpspi->base + IMX7ULP_SR); 528 529 /* Clear FIFO and disable module */ 530 temp = CR_RRF | CR_RTF; 531 writel(temp, fsl_lpspi->base + IMX7ULP_CR); 532 533 return 0; 534 } 535 536 static void fsl_lpspi_dma_rx_callback(void *cookie) 537 { 538 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; 539 540 complete(&fsl_lpspi->dma_rx_completion); 541 } 542 543 static void fsl_lpspi_dma_tx_callback(void *cookie) 544 { 545 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; 546 547 complete(&fsl_lpspi->dma_tx_completion); 548 } 549 550 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, 551 int size) 552 { 553 unsigned long timeout = 0; 554 555 /* Time with actual data transfer and CS change delay related to HW */ 556 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz; 557 558 /* Add extra second for scheduler related activities */ 559 timeout += 1; 560 561 /* Double calculated timeout */ 562 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); 563 } 564 565 static int fsl_lpspi_dma_transfer(struct spi_controller *controller, 566 struct fsl_lpspi_data *fsl_lpspi, 567 struct spi_transfer *transfer) 568 { 569 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 570 unsigned long transfer_timeout; 571 unsigned long timeout; 572 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 573 int ret; 574 575 ret = fsl_lpspi_dma_configure(controller); 576 if (ret) 577 return ret; 578 579 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, 580 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 581 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 582 if (!desc_rx) 583 return -EINVAL; 584 585 desc_rx->callback = fsl_lpspi_dma_rx_callback; 586 desc_rx->callback_param = (void *)fsl_lpspi; 587 dmaengine_submit(desc_rx); 588 reinit_completion(&fsl_lpspi->dma_rx_completion); 589 dma_async_issue_pending(controller->dma_rx); 590 591 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, 592 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 593 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 594 if (!desc_tx) { 595 dmaengine_terminate_all(controller->dma_tx); 596 return -EINVAL; 597 } 598 599 desc_tx->callback = fsl_lpspi_dma_tx_callback; 600 desc_tx->callback_param = (void *)fsl_lpspi; 601 dmaengine_submit(desc_tx); 602 reinit_completion(&fsl_lpspi->dma_tx_completion); 603 dma_async_issue_pending(controller->dma_tx); 604 605 fsl_lpspi->slave_aborted = false; 606 607 if (!fsl_lpspi->is_slave) { 608 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi, 609 transfer->len); 610 611 /* Wait eDMA to finish the data transfer.*/ 612 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, 613 transfer_timeout); 614 if (!timeout) { 615 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n"); 616 dmaengine_terminate_all(controller->dma_tx); 617 dmaengine_terminate_all(controller->dma_rx); 618 fsl_lpspi_reset(fsl_lpspi); 619 return -ETIMEDOUT; 620 } 621 622 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, 623 transfer_timeout); 624 if (!timeout) { 625 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n"); 626 dmaengine_terminate_all(controller->dma_tx); 627 dmaengine_terminate_all(controller->dma_rx); 628 fsl_lpspi_reset(fsl_lpspi); 629 return -ETIMEDOUT; 630 } 631 } else { 632 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) || 633 fsl_lpspi->slave_aborted) { 634 dev_dbg(fsl_lpspi->dev, 635 "I/O Error in DMA TX interrupted\n"); 636 dmaengine_terminate_all(controller->dma_tx); 637 dmaengine_terminate_all(controller->dma_rx); 638 fsl_lpspi_reset(fsl_lpspi); 639 return -EINTR; 640 } 641 642 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) || 643 fsl_lpspi->slave_aborted) { 644 dev_dbg(fsl_lpspi->dev, 645 "I/O Error in DMA RX interrupted\n"); 646 dmaengine_terminate_all(controller->dma_tx); 647 dmaengine_terminate_all(controller->dma_rx); 648 fsl_lpspi_reset(fsl_lpspi); 649 return -EINTR; 650 } 651 } 652 653 fsl_lpspi_reset(fsl_lpspi); 654 655 return 0; 656 } 657 658 static void fsl_lpspi_dma_exit(struct spi_controller *controller) 659 { 660 if (controller->dma_rx) { 661 dma_release_channel(controller->dma_rx); 662 controller->dma_rx = NULL; 663 } 664 665 if (controller->dma_tx) { 666 dma_release_channel(controller->dma_tx); 667 controller->dma_tx = NULL; 668 } 669 } 670 671 static int fsl_lpspi_dma_init(struct device *dev, 672 struct fsl_lpspi_data *fsl_lpspi, 673 struct spi_controller *controller) 674 { 675 int ret; 676 677 /* Prepare for TX DMA: */ 678 controller->dma_tx = dma_request_chan(dev, "tx"); 679 if (IS_ERR(controller->dma_tx)) { 680 ret = PTR_ERR(controller->dma_tx); 681 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); 682 controller->dma_tx = NULL; 683 goto err; 684 } 685 686 /* Prepare for RX DMA: */ 687 controller->dma_rx = dma_request_chan(dev, "rx"); 688 if (IS_ERR(controller->dma_rx)) { 689 ret = PTR_ERR(controller->dma_rx); 690 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); 691 controller->dma_rx = NULL; 692 goto err; 693 } 694 695 init_completion(&fsl_lpspi->dma_rx_completion); 696 init_completion(&fsl_lpspi->dma_tx_completion); 697 controller->can_dma = fsl_lpspi_can_dma; 698 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES; 699 700 return 0; 701 err: 702 fsl_lpspi_dma_exit(controller); 703 return ret; 704 } 705 706 static int fsl_lpspi_pio_transfer(struct spi_controller *controller, 707 struct spi_transfer *t) 708 { 709 struct fsl_lpspi_data *fsl_lpspi = 710 spi_controller_get_devdata(controller); 711 int ret; 712 713 fsl_lpspi->tx_buf = t->tx_buf; 714 fsl_lpspi->rx_buf = t->rx_buf; 715 fsl_lpspi->remain = t->len; 716 717 reinit_completion(&fsl_lpspi->xfer_done); 718 fsl_lpspi->slave_aborted = false; 719 720 fsl_lpspi_write_tx_fifo(fsl_lpspi); 721 722 ret = fsl_lpspi_wait_for_completion(controller); 723 if (ret) 724 return ret; 725 726 fsl_lpspi_reset(fsl_lpspi); 727 728 return 0; 729 } 730 731 static int fsl_lpspi_transfer_one(struct spi_controller *controller, 732 struct spi_device *spi, 733 struct spi_transfer *t) 734 { 735 struct fsl_lpspi_data *fsl_lpspi = 736 spi_controller_get_devdata(controller); 737 int ret; 738 739 fsl_lpspi->is_first_byte = true; 740 ret = fsl_lpspi_setup_transfer(controller, spi, t); 741 if (ret < 0) 742 return ret; 743 744 fsl_lpspi_set_cmd(fsl_lpspi); 745 fsl_lpspi->is_first_byte = false; 746 747 if (fsl_lpspi->usedma) 748 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t); 749 else 750 ret = fsl_lpspi_pio_transfer(controller, t); 751 if (ret < 0) 752 return ret; 753 754 return 0; 755 } 756 757 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) 758 { 759 u32 temp_SR, temp_IER; 760 struct fsl_lpspi_data *fsl_lpspi = dev_id; 761 762 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); 763 fsl_lpspi_intctrl(fsl_lpspi, 0); 764 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); 765 766 fsl_lpspi_read_rx_fifo(fsl_lpspi); 767 768 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { 769 fsl_lpspi_write_tx_fifo(fsl_lpspi); 770 return IRQ_HANDLED; 771 } 772 773 if (temp_SR & SR_MBF || 774 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) { 775 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); 776 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); 777 return IRQ_HANDLED; 778 } 779 780 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { 781 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); 782 complete(&fsl_lpspi->xfer_done); 783 return IRQ_HANDLED; 784 } 785 786 return IRQ_NONE; 787 } 788 789 #ifdef CONFIG_PM 790 static int fsl_lpspi_runtime_resume(struct device *dev) 791 { 792 struct spi_controller *controller = dev_get_drvdata(dev); 793 struct fsl_lpspi_data *fsl_lpspi; 794 int ret; 795 796 fsl_lpspi = spi_controller_get_devdata(controller); 797 798 ret = clk_prepare_enable(fsl_lpspi->clk_per); 799 if (ret) 800 return ret; 801 802 ret = clk_prepare_enable(fsl_lpspi->clk_ipg); 803 if (ret) { 804 clk_disable_unprepare(fsl_lpspi->clk_per); 805 return ret; 806 } 807 808 return 0; 809 } 810 811 static int fsl_lpspi_runtime_suspend(struct device *dev) 812 { 813 struct spi_controller *controller = dev_get_drvdata(dev); 814 struct fsl_lpspi_data *fsl_lpspi; 815 816 fsl_lpspi = spi_controller_get_devdata(controller); 817 818 clk_disable_unprepare(fsl_lpspi->clk_per); 819 clk_disable_unprepare(fsl_lpspi->clk_ipg); 820 821 return 0; 822 } 823 #endif 824 825 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) 826 { 827 struct device *dev = fsl_lpspi->dev; 828 829 pm_runtime_enable(dev); 830 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT); 831 pm_runtime_use_autosuspend(dev); 832 833 return 0; 834 } 835 836 static int fsl_lpspi_probe(struct platform_device *pdev) 837 { 838 struct device_node *np = pdev->dev.of_node; 839 struct fsl_lpspi_data *fsl_lpspi; 840 struct spi_controller *controller; 841 struct spi_imx_master *lpspi_platform_info = 842 dev_get_platdata(&pdev->dev); 843 struct resource *res; 844 int i, ret, irq; 845 u32 temp; 846 bool is_slave; 847 848 is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); 849 if (is_slave) 850 controller = spi_alloc_slave(&pdev->dev, 851 sizeof(struct fsl_lpspi_data)); 852 else 853 controller = spi_alloc_master(&pdev->dev, 854 sizeof(struct fsl_lpspi_data)); 855 856 if (!controller) 857 return -ENOMEM; 858 859 platform_set_drvdata(pdev, controller); 860 861 fsl_lpspi = spi_controller_get_devdata(controller); 862 fsl_lpspi->dev = &pdev->dev; 863 fsl_lpspi->is_slave = is_slave; 864 865 if (!fsl_lpspi->is_slave) { 866 for (i = 0; i < controller->num_chipselect; i++) { 867 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 868 869 if (!gpio_is_valid(cs_gpio) && lpspi_platform_info) 870 cs_gpio = lpspi_platform_info->chipselect[i]; 871 872 fsl_lpspi->chipselect[i] = cs_gpio; 873 if (!gpio_is_valid(cs_gpio)) 874 continue; 875 876 ret = devm_gpio_request(&pdev->dev, 877 fsl_lpspi->chipselect[i], 878 DRIVER_NAME); 879 if (ret) { 880 dev_err(&pdev->dev, "can't get cs gpios\n"); 881 goto out_controller_put; 882 } 883 } 884 controller->cs_gpios = fsl_lpspi->chipselect; 885 controller->prepare_message = fsl_lpspi_prepare_message; 886 } 887 888 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); 889 controller->transfer_one = fsl_lpspi_transfer_one; 890 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; 891 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; 892 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 893 controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; 894 controller->dev.of_node = pdev->dev.of_node; 895 controller->bus_num = pdev->id; 896 controller->slave_abort = fsl_lpspi_slave_abort; 897 898 init_completion(&fsl_lpspi->xfer_done); 899 900 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 901 fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res); 902 if (IS_ERR(fsl_lpspi->base)) { 903 ret = PTR_ERR(fsl_lpspi->base); 904 goto out_controller_put; 905 } 906 fsl_lpspi->base_phys = res->start; 907 908 irq = platform_get_irq(pdev, 0); 909 if (irq < 0) { 910 ret = irq; 911 goto out_controller_put; 912 } 913 914 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0, 915 dev_name(&pdev->dev), fsl_lpspi); 916 if (ret) { 917 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 918 goto out_controller_put; 919 } 920 921 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per"); 922 if (IS_ERR(fsl_lpspi->clk_per)) { 923 ret = PTR_ERR(fsl_lpspi->clk_per); 924 goto out_controller_put; 925 } 926 927 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 928 if (IS_ERR(fsl_lpspi->clk_ipg)) { 929 ret = PTR_ERR(fsl_lpspi->clk_ipg); 930 goto out_controller_put; 931 } 932 933 /* enable the clock */ 934 ret = fsl_lpspi_init_rpm(fsl_lpspi); 935 if (ret) 936 goto out_controller_put; 937 938 ret = pm_runtime_get_sync(fsl_lpspi->dev); 939 if (ret < 0) { 940 dev_err(fsl_lpspi->dev, "failed to enable clock\n"); 941 goto out_controller_put; 942 } 943 944 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); 945 fsl_lpspi->txfifosize = 1 << (temp & 0x0f); 946 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f); 947 948 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); 949 if (ret == -EPROBE_DEFER) 950 goto out_controller_put; 951 952 if (ret < 0) 953 dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); 954 955 ret = devm_spi_register_controller(&pdev->dev, controller); 956 if (ret < 0) { 957 dev_err(&pdev->dev, "spi_register_controller error.\n"); 958 goto out_controller_put; 959 } 960 961 return 0; 962 963 out_controller_put: 964 spi_controller_put(controller); 965 966 return ret; 967 } 968 969 static int fsl_lpspi_remove(struct platform_device *pdev) 970 { 971 struct spi_controller *controller = platform_get_drvdata(pdev); 972 struct fsl_lpspi_data *fsl_lpspi = 973 spi_controller_get_devdata(controller); 974 975 pm_runtime_disable(fsl_lpspi->dev); 976 977 spi_master_put(controller); 978 979 return 0; 980 } 981 982 #ifdef CONFIG_PM_SLEEP 983 static int fsl_lpspi_suspend(struct device *dev) 984 { 985 int ret; 986 987 pinctrl_pm_select_sleep_state(dev); 988 ret = pm_runtime_force_suspend(dev); 989 return ret; 990 } 991 992 static int fsl_lpspi_resume(struct device *dev) 993 { 994 int ret; 995 996 ret = pm_runtime_force_resume(dev); 997 if (ret) { 998 dev_err(dev, "Error in resume: %d\n", ret); 999 return ret; 1000 } 1001 1002 pinctrl_pm_select_default_state(dev); 1003 1004 return 0; 1005 } 1006 #endif /* CONFIG_PM_SLEEP */ 1007 1008 static const struct dev_pm_ops fsl_lpspi_pm_ops = { 1009 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, 1010 fsl_lpspi_runtime_resume, NULL) 1011 SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) 1012 }; 1013 1014 static struct platform_driver fsl_lpspi_driver = { 1015 .driver = { 1016 .name = DRIVER_NAME, 1017 .of_match_table = fsl_lpspi_dt_ids, 1018 .pm = &fsl_lpspi_pm_ops, 1019 }, 1020 .probe = fsl_lpspi_probe, 1021 .remove = fsl_lpspi_remove, 1022 }; 1023 module_platform_driver(fsl_lpspi_driver); 1024 1025 MODULE_DESCRIPTION("LPSPI Controller driver"); 1026 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>"); 1027 MODULE_LICENSE("GPL"); 1028