1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/sched.h> 30 #include <linux/scatterlist.h> 31 #include <linux/spi/spi.h> 32 33 #include <linux/platform_data/dma-ep93xx.h> 34 #include <linux/platform_data/spi-ep93xx.h> 35 36 #define SSPCR0 0x0000 37 #define SSPCR0_MODE_SHIFT 6 38 #define SSPCR0_SCR_SHIFT 8 39 40 #define SSPCR1 0x0004 41 #define SSPCR1_RIE BIT(0) 42 #define SSPCR1_TIE BIT(1) 43 #define SSPCR1_RORIE BIT(2) 44 #define SSPCR1_LBM BIT(3) 45 #define SSPCR1_SSE BIT(4) 46 #define SSPCR1_MS BIT(5) 47 #define SSPCR1_SOD BIT(6) 48 49 #define SSPDR 0x0008 50 51 #define SSPSR 0x000c 52 #define SSPSR_TFE BIT(0) 53 #define SSPSR_TNF BIT(1) 54 #define SSPSR_RNE BIT(2) 55 #define SSPSR_RFF BIT(3) 56 #define SSPSR_BSY BIT(4) 57 #define SSPCPSR 0x0010 58 59 #define SSPIIR 0x0014 60 #define SSPIIR_RIS BIT(0) 61 #define SSPIIR_TIS BIT(1) 62 #define SSPIIR_RORIS BIT(2) 63 #define SSPICR SSPIIR 64 65 /* timeout in milliseconds */ 66 #define SPI_TIMEOUT 5 67 /* maximum depth of RX/TX FIFO */ 68 #define SPI_FIFO_SIZE 8 69 70 /** 71 * struct ep93xx_spi - EP93xx SPI controller structure 72 * @pdev: pointer to platform device 73 * @clk: clock for the controller 74 * @regs_base: pointer to ioremap()'d registers 75 * @sspdr_phys: physical address of the SSPDR register 76 * @min_rate: minimum clock rate (in Hz) supported by the controller 77 * @max_rate: maximum clock rate (in Hz) supported by the controller 78 * @wait: wait here until given transfer is completed 79 * @current_msg: message that is currently processed (or %NULL if none) 80 * @tx: current byte in transfer to transmit 81 * @rx: current byte in transfer to receive 82 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 83 * frame decreases this level and sending one frame increases it. 84 * @dma_rx: RX DMA channel 85 * @dma_tx: TX DMA channel 86 * @dma_rx_data: RX parameters passed to the DMA engine 87 * @dma_tx_data: TX parameters passed to the DMA engine 88 * @rx_sgt: sg table for RX transfers 89 * @tx_sgt: sg table for TX transfers 90 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 91 * the client 92 */ 93 struct ep93xx_spi { 94 const struct platform_device *pdev; 95 struct clk *clk; 96 void __iomem *regs_base; 97 unsigned long sspdr_phys; 98 unsigned long min_rate; 99 unsigned long max_rate; 100 struct completion wait; 101 struct spi_message *current_msg; 102 size_t tx; 103 size_t rx; 104 size_t fifo_level; 105 struct dma_chan *dma_rx; 106 struct dma_chan *dma_tx; 107 struct ep93xx_dma_data dma_rx_data; 108 struct ep93xx_dma_data dma_tx_data; 109 struct sg_table rx_sgt; 110 struct sg_table tx_sgt; 111 void *zeropage; 112 }; 113 114 /** 115 * struct ep93xx_spi_chip - SPI device hardware settings 116 * @spi: back pointer to the SPI device 117 * @ops: private chip operations 118 */ 119 struct ep93xx_spi_chip { 120 const struct spi_device *spi; 121 struct ep93xx_spi_chip_ops *ops; 122 }; 123 124 /* converts bits per word to CR0.DSS value */ 125 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 126 127 static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi, 128 u16 reg, u8 value) 129 { 130 writeb(value, espi->regs_base + reg); 131 } 132 133 static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) 134 { 135 return readb(spi->regs_base + reg); 136 } 137 138 static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi, 139 u16 reg, u16 value) 140 { 141 writew(value, espi->regs_base + reg); 142 } 143 144 static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) 145 { 146 return readw(spi->regs_base + reg); 147 } 148 149 static int ep93xx_spi_enable(const struct ep93xx_spi *espi) 150 { 151 u8 regval; 152 int err; 153 154 err = clk_enable(espi->clk); 155 if (err) 156 return err; 157 158 regval = ep93xx_spi_read_u8(espi, SSPCR1); 159 regval |= SSPCR1_SSE; 160 ep93xx_spi_write_u8(espi, SSPCR1, regval); 161 162 return 0; 163 } 164 165 static void ep93xx_spi_disable(const struct ep93xx_spi *espi) 166 { 167 u8 regval; 168 169 regval = ep93xx_spi_read_u8(espi, SSPCR1); 170 regval &= ~SSPCR1_SSE; 171 ep93xx_spi_write_u8(espi, SSPCR1, regval); 172 173 clk_disable(espi->clk); 174 } 175 176 static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) 177 { 178 u8 regval; 179 180 regval = ep93xx_spi_read_u8(espi, SSPCR1); 181 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 182 ep93xx_spi_write_u8(espi, SSPCR1, regval); 183 } 184 185 static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) 186 { 187 u8 regval; 188 189 regval = ep93xx_spi_read_u8(espi, SSPCR1); 190 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 191 ep93xx_spi_write_u8(espi, SSPCR1, regval); 192 } 193 194 /** 195 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 196 * @espi: ep93xx SPI controller struct 197 * @rate: desired SPI output clock rate 198 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider 199 * @div_scr: pointer to return the scr divider 200 */ 201 static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, 202 unsigned long rate, 203 u8 *div_cpsr, u8 *div_scr) 204 { 205 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 206 int cpsr, scr; 207 208 /* 209 * Make sure that max value is between values supported by the 210 * controller. Note that minimum value is already checked in 211 * ep93xx_spi_transfer_one_message(). 212 */ 213 rate = clamp(rate, espi->min_rate, espi->max_rate); 214 215 /* 216 * Calculate divisors so that we can get speed according the 217 * following formula: 218 * rate = spi_clock_rate / (cpsr * (1 + scr)) 219 * 220 * cpsr must be even number and starts from 2, scr can be any number 221 * between 0 and 255. 222 */ 223 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 224 for (scr = 0; scr <= 255; scr++) { 225 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 226 *div_scr = (u8)scr; 227 *div_cpsr = (u8)cpsr; 228 return 0; 229 } 230 } 231 } 232 233 return -EINVAL; 234 } 235 236 static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) 237 { 238 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); 239 int value = (spi->mode & SPI_CS_HIGH) ? control : !control; 240 241 if (chip->ops && chip->ops->cs_control) 242 chip->ops->cs_control(spi, value); 243 } 244 245 /** 246 * ep93xx_spi_setup() - setup an SPI device 247 * @spi: SPI device to setup 248 * 249 * This function sets up SPI device mode, speed etc. Can be called multiple 250 * times for a single device. Returns %0 in case of success, negative error in 251 * case of failure. When this function returns success, the device is 252 * deselected. 253 */ 254 static int ep93xx_spi_setup(struct spi_device *spi) 255 { 256 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 257 struct ep93xx_spi_chip *chip; 258 259 chip = spi_get_ctldata(spi); 260 if (!chip) { 261 dev_dbg(&espi->pdev->dev, "initial setup for %s\n", 262 spi->modalias); 263 264 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 265 if (!chip) 266 return -ENOMEM; 267 268 chip->spi = spi; 269 chip->ops = spi->controller_data; 270 271 if (chip->ops && chip->ops->setup) { 272 int ret = chip->ops->setup(spi); 273 if (ret) { 274 kfree(chip); 275 return ret; 276 } 277 } 278 279 spi_set_ctldata(spi, chip); 280 } 281 282 ep93xx_spi_cs_control(spi, false); 283 return 0; 284 } 285 286 /** 287 * ep93xx_spi_cleanup() - cleans up master controller specific state 288 * @spi: SPI device to cleanup 289 * 290 * This function releases master controller specific state for given @spi 291 * device. 292 */ 293 static void ep93xx_spi_cleanup(struct spi_device *spi) 294 { 295 struct ep93xx_spi_chip *chip; 296 297 chip = spi_get_ctldata(spi); 298 if (chip) { 299 if (chip->ops && chip->ops->cleanup) 300 chip->ops->cleanup(spi); 301 spi_set_ctldata(spi, NULL); 302 kfree(chip); 303 } 304 } 305 306 /** 307 * ep93xx_spi_chip_setup() - configures hardware according to given @chip 308 * @espi: ep93xx SPI controller struct 309 * @chip: chip specific settings 310 * @speed_hz: transfer speed 311 * @bits_per_word: transfer bits_per_word 312 */ 313 static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, 314 const struct ep93xx_spi_chip *chip, 315 u32 speed_hz, u8 bits_per_word) 316 { 317 u8 dss = bits_per_word_to_dss(bits_per_word); 318 u8 div_cpsr = 0; 319 u8 div_scr = 0; 320 u16 cr0; 321 int err; 322 323 err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr); 324 if (err) 325 return err; 326 327 cr0 = div_scr << SSPCR0_SCR_SHIFT; 328 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; 329 cr0 |= dss; 330 331 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 332 chip->spi->mode, div_cpsr, div_scr, dss); 333 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0); 334 335 ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr); 336 ep93xx_spi_write_u16(espi, SSPCR0, cr0); 337 338 return 0; 339 } 340 341 static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 342 { 343 if (t->bits_per_word > 8) { 344 u16 tx_val = 0; 345 346 if (t->tx_buf) 347 tx_val = ((u16 *)t->tx_buf)[espi->tx]; 348 ep93xx_spi_write_u16(espi, SSPDR, tx_val); 349 espi->tx += sizeof(tx_val); 350 } else { 351 u8 tx_val = 0; 352 353 if (t->tx_buf) 354 tx_val = ((u8 *)t->tx_buf)[espi->tx]; 355 ep93xx_spi_write_u8(espi, SSPDR, tx_val); 356 espi->tx += sizeof(tx_val); 357 } 358 } 359 360 static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 361 { 362 if (t->bits_per_word > 8) { 363 u16 rx_val; 364 365 rx_val = ep93xx_spi_read_u16(espi, SSPDR); 366 if (t->rx_buf) 367 ((u16 *)t->rx_buf)[espi->rx] = rx_val; 368 espi->rx += sizeof(rx_val); 369 } else { 370 u8 rx_val; 371 372 rx_val = ep93xx_spi_read_u8(espi, SSPDR); 373 if (t->rx_buf) 374 ((u8 *)t->rx_buf)[espi->rx] = rx_val; 375 espi->rx += sizeof(rx_val); 376 } 377 } 378 379 /** 380 * ep93xx_spi_read_write() - perform next RX/TX transfer 381 * @espi: ep93xx SPI controller struct 382 * 383 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 384 * called several times, the whole transfer will be completed. Returns 385 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 386 * 387 * When this function is finished, RX FIFO should be empty and TX FIFO should be 388 * full. 389 */ 390 static int ep93xx_spi_read_write(struct ep93xx_spi *espi) 391 { 392 struct spi_message *msg = espi->current_msg; 393 struct spi_transfer *t = msg->state; 394 395 /* read as long as RX FIFO has frames in it */ 396 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { 397 ep93xx_do_read(espi, t); 398 espi->fifo_level--; 399 } 400 401 /* write as long as TX FIFO has room */ 402 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 403 ep93xx_do_write(espi, t); 404 espi->fifo_level++; 405 } 406 407 if (espi->rx == t->len) 408 return 0; 409 410 return -EINPROGRESS; 411 } 412 413 static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) 414 { 415 /* 416 * Now everything is set up for the current transfer. We prime the TX 417 * FIFO, enable interrupts, and wait for the transfer to complete. 418 */ 419 if (ep93xx_spi_read_write(espi)) { 420 ep93xx_spi_enable_interrupts(espi); 421 wait_for_completion(&espi->wait); 422 } 423 } 424 425 /** 426 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 427 * @espi: ep93xx SPI controller struct 428 * @dir: DMA transfer direction 429 * 430 * Function configures the DMA, maps the buffer and prepares the DMA 431 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 432 * in case of failure. 433 */ 434 static struct dma_async_tx_descriptor * 435 ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) 436 { 437 struct spi_transfer *t = espi->current_msg->state; 438 struct dma_async_tx_descriptor *txd; 439 enum dma_slave_buswidth buswidth; 440 struct dma_slave_config conf; 441 struct scatterlist *sg; 442 struct sg_table *sgt; 443 struct dma_chan *chan; 444 const void *buf, *pbuf; 445 size_t len = t->len; 446 int i, ret, nents; 447 448 if (t->bits_per_word > 8) 449 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 450 else 451 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 452 453 memset(&conf, 0, sizeof(conf)); 454 conf.direction = dir; 455 456 if (dir == DMA_DEV_TO_MEM) { 457 chan = espi->dma_rx; 458 buf = t->rx_buf; 459 sgt = &espi->rx_sgt; 460 461 conf.src_addr = espi->sspdr_phys; 462 conf.src_addr_width = buswidth; 463 } else { 464 chan = espi->dma_tx; 465 buf = t->tx_buf; 466 sgt = &espi->tx_sgt; 467 468 conf.dst_addr = espi->sspdr_phys; 469 conf.dst_addr_width = buswidth; 470 } 471 472 ret = dmaengine_slave_config(chan, &conf); 473 if (ret) 474 return ERR_PTR(ret); 475 476 /* 477 * We need to split the transfer into PAGE_SIZE'd chunks. This is 478 * because we are using @espi->zeropage to provide a zero RX buffer 479 * for the TX transfers and we have only allocated one page for that. 480 * 481 * For performance reasons we allocate a new sg_table only when 482 * needed. Otherwise we will re-use the current one. Eventually the 483 * last sg_table is released in ep93xx_spi_release_dma(). 484 */ 485 486 nents = DIV_ROUND_UP(len, PAGE_SIZE); 487 if (nents != sgt->nents) { 488 sg_free_table(sgt); 489 490 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 491 if (ret) 492 return ERR_PTR(ret); 493 } 494 495 pbuf = buf; 496 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 497 size_t bytes = min_t(size_t, len, PAGE_SIZE); 498 499 if (buf) { 500 sg_set_page(sg, virt_to_page(pbuf), bytes, 501 offset_in_page(pbuf)); 502 } else { 503 sg_set_page(sg, virt_to_page(espi->zeropage), 504 bytes, 0); 505 } 506 507 pbuf += bytes; 508 len -= bytes; 509 } 510 511 if (WARN_ON(len)) { 512 dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len); 513 return ERR_PTR(-EINVAL); 514 } 515 516 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 517 if (!nents) 518 return ERR_PTR(-ENOMEM); 519 520 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); 521 if (!txd) { 522 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 523 return ERR_PTR(-ENOMEM); 524 } 525 return txd; 526 } 527 528 /** 529 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 530 * @espi: ep93xx SPI controller struct 531 * @dir: DMA transfer direction 532 * 533 * Function finishes with the DMA transfer. After this, the DMA buffer is 534 * unmapped. 535 */ 536 static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 537 enum dma_transfer_direction dir) 538 { 539 struct dma_chan *chan; 540 struct sg_table *sgt; 541 542 if (dir == DMA_DEV_TO_MEM) { 543 chan = espi->dma_rx; 544 sgt = &espi->rx_sgt; 545 } else { 546 chan = espi->dma_tx; 547 sgt = &espi->tx_sgt; 548 } 549 550 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 551 } 552 553 static void ep93xx_spi_dma_callback(void *callback_param) 554 { 555 complete(callback_param); 556 } 557 558 static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) 559 { 560 struct spi_message *msg = espi->current_msg; 561 struct dma_async_tx_descriptor *rxd, *txd; 562 563 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); 564 if (IS_ERR(rxd)) { 565 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 566 msg->status = PTR_ERR(rxd); 567 return; 568 } 569 570 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); 571 if (IS_ERR(txd)) { 572 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 573 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 574 msg->status = PTR_ERR(txd); 575 return; 576 } 577 578 /* We are ready when RX is done */ 579 rxd->callback = ep93xx_spi_dma_callback; 580 rxd->callback_param = &espi->wait; 581 582 /* Now submit both descriptors and wait while they finish */ 583 dmaengine_submit(rxd); 584 dmaengine_submit(txd); 585 586 dma_async_issue_pending(espi->dma_rx); 587 dma_async_issue_pending(espi->dma_tx); 588 589 wait_for_completion(&espi->wait); 590 591 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); 592 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 593 } 594 595 /** 596 * ep93xx_spi_process_transfer() - processes one SPI transfer 597 * @espi: ep93xx SPI controller struct 598 * @msg: current message 599 * @t: transfer to process 600 * 601 * This function processes one SPI transfer given in @t. Function waits until 602 * transfer is complete (may sleep) and updates @msg->status based on whether 603 * transfer was successfully processed or not. 604 */ 605 static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, 606 struct spi_message *msg, 607 struct spi_transfer *t) 608 { 609 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); 610 int err; 611 612 msg->state = t; 613 614 err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word); 615 if (err) { 616 dev_err(&espi->pdev->dev, 617 "failed to setup chip for transfer\n"); 618 msg->status = err; 619 return; 620 } 621 622 espi->rx = 0; 623 espi->tx = 0; 624 625 /* 626 * There is no point of setting up DMA for the transfers which will 627 * fit into the FIFO and can be transferred with a single interrupt. 628 * So in these cases we will be using PIO and don't bother for DMA. 629 */ 630 if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 631 ep93xx_spi_dma_transfer(espi); 632 else 633 ep93xx_spi_pio_transfer(espi); 634 635 /* 636 * In case of error during transmit, we bail out from processing 637 * the message. 638 */ 639 if (msg->status) 640 return; 641 642 msg->actual_length += t->len; 643 644 /* 645 * After this transfer is finished, perform any possible 646 * post-transfer actions requested by the protocol driver. 647 */ 648 if (t->delay_usecs) { 649 set_current_state(TASK_UNINTERRUPTIBLE); 650 schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 651 } 652 if (t->cs_change) { 653 if (!list_is_last(&t->transfer_list, &msg->transfers)) { 654 /* 655 * In case protocol driver is asking us to drop the 656 * chipselect briefly, we let the scheduler to handle 657 * any "delay" here. 658 */ 659 ep93xx_spi_cs_control(msg->spi, false); 660 cond_resched(); 661 ep93xx_spi_cs_control(msg->spi, true); 662 } 663 } 664 } 665 666 /* 667 * ep93xx_spi_process_message() - process one SPI message 668 * @espi: ep93xx SPI controller struct 669 * @msg: message to process 670 * 671 * This function processes a single SPI message. We go through all transfers in 672 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 673 * asserted during the whole message (unless per transfer cs_change is set). 674 * 675 * @msg->status contains %0 in case of success or negative error code in case of 676 * failure. 677 */ 678 static void ep93xx_spi_process_message(struct ep93xx_spi *espi, 679 struct spi_message *msg) 680 { 681 unsigned long timeout; 682 struct spi_transfer *t; 683 int err; 684 685 /* 686 * Enable the SPI controller and its clock. 687 */ 688 err = ep93xx_spi_enable(espi); 689 if (err) { 690 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); 691 msg->status = err; 692 return; 693 } 694 695 /* 696 * Just to be sure: flush any data from RX FIFO. 697 */ 698 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 699 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { 700 if (time_after(jiffies, timeout)) { 701 dev_warn(&espi->pdev->dev, 702 "timeout while flushing RX FIFO\n"); 703 msg->status = -ETIMEDOUT; 704 return; 705 } 706 ep93xx_spi_read_u16(espi, SSPDR); 707 } 708 709 /* 710 * We explicitly handle FIFO level. This way we don't have to check TX 711 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 712 */ 713 espi->fifo_level = 0; 714 715 /* 716 * Assert the chipselect. 717 */ 718 ep93xx_spi_cs_control(msg->spi, true); 719 720 list_for_each_entry(t, &msg->transfers, transfer_list) { 721 ep93xx_spi_process_transfer(espi, msg, t); 722 if (msg->status) 723 break; 724 } 725 726 /* 727 * Now the whole message is transferred (or failed for some reason). We 728 * deselect the device and disable the SPI controller. 729 */ 730 ep93xx_spi_cs_control(msg->spi, false); 731 ep93xx_spi_disable(espi); 732 } 733 734 static int ep93xx_spi_transfer_one_message(struct spi_master *master, 735 struct spi_message *msg) 736 { 737 struct ep93xx_spi *espi = spi_master_get_devdata(master); 738 struct spi_transfer *t; 739 740 /* first validate each transfer */ 741 list_for_each_entry(t, &msg->transfers, transfer_list) { 742 if (t->speed_hz < espi->min_rate) 743 return -EINVAL; 744 } 745 746 msg->state = NULL; 747 msg->status = 0; 748 msg->actual_length = 0; 749 750 espi->current_msg = msg; 751 ep93xx_spi_process_message(espi, msg); 752 espi->current_msg = NULL; 753 754 spi_finalize_current_message(master); 755 756 return 0; 757 } 758 759 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 760 { 761 struct ep93xx_spi *espi = dev_id; 762 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); 763 764 /* 765 * If we got ROR (receive overrun) interrupt we know that something is 766 * wrong. Just abort the message. 767 */ 768 if (unlikely(irq_status & SSPIIR_RORIS)) { 769 /* clear the overrun interrupt */ 770 ep93xx_spi_write_u8(espi, SSPICR, 0); 771 dev_warn(&espi->pdev->dev, 772 "receive overrun, aborting the message\n"); 773 espi->current_msg->status = -EIO; 774 } else { 775 /* 776 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 777 * simply execute next data transfer. 778 */ 779 if (ep93xx_spi_read_write(espi)) { 780 /* 781 * In normal case, there still is some processing left 782 * for current transfer. Let's wait for the next 783 * interrupt then. 784 */ 785 return IRQ_HANDLED; 786 } 787 } 788 789 /* 790 * Current transfer is finished, either with error or with success. In 791 * any case we disable interrupts and notify the worker to handle 792 * any post-processing of the message. 793 */ 794 ep93xx_spi_disable_interrupts(espi); 795 complete(&espi->wait); 796 return IRQ_HANDLED; 797 } 798 799 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 800 { 801 if (ep93xx_dma_chan_is_m2p(chan)) 802 return false; 803 804 chan->private = filter_param; 805 return true; 806 } 807 808 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 809 { 810 dma_cap_mask_t mask; 811 int ret; 812 813 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 814 if (!espi->zeropage) 815 return -ENOMEM; 816 817 dma_cap_zero(mask); 818 dma_cap_set(DMA_SLAVE, mask); 819 820 espi->dma_rx_data.port = EP93XX_DMA_SSP; 821 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 822 espi->dma_rx_data.name = "ep93xx-spi-rx"; 823 824 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 825 &espi->dma_rx_data); 826 if (!espi->dma_rx) { 827 ret = -ENODEV; 828 goto fail_free_page; 829 } 830 831 espi->dma_tx_data.port = EP93XX_DMA_SSP; 832 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 833 espi->dma_tx_data.name = "ep93xx-spi-tx"; 834 835 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 836 &espi->dma_tx_data); 837 if (!espi->dma_tx) { 838 ret = -ENODEV; 839 goto fail_release_rx; 840 } 841 842 return 0; 843 844 fail_release_rx: 845 dma_release_channel(espi->dma_rx); 846 espi->dma_rx = NULL; 847 fail_free_page: 848 free_page((unsigned long)espi->zeropage); 849 850 return ret; 851 } 852 853 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 854 { 855 if (espi->dma_rx) { 856 dma_release_channel(espi->dma_rx); 857 sg_free_table(&espi->rx_sgt); 858 } 859 if (espi->dma_tx) { 860 dma_release_channel(espi->dma_tx); 861 sg_free_table(&espi->tx_sgt); 862 } 863 864 if (espi->zeropage) 865 free_page((unsigned long)espi->zeropage); 866 } 867 868 static int ep93xx_spi_probe(struct platform_device *pdev) 869 { 870 struct spi_master *master; 871 struct ep93xx_spi_info *info; 872 struct ep93xx_spi *espi; 873 struct resource *res; 874 int irq; 875 int error; 876 877 info = dev_get_platdata(&pdev->dev); 878 879 irq = platform_get_irq(pdev, 0); 880 if (irq < 0) { 881 dev_err(&pdev->dev, "failed to get irq resources\n"); 882 return -EBUSY; 883 } 884 885 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 886 if (!res) { 887 dev_err(&pdev->dev, "unable to get iomem resource\n"); 888 return -ENODEV; 889 } 890 891 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 892 if (!master) 893 return -ENOMEM; 894 895 master->setup = ep93xx_spi_setup; 896 master->transfer_one_message = ep93xx_spi_transfer_one_message; 897 master->cleanup = ep93xx_spi_cleanup; 898 master->bus_num = pdev->id; 899 master->num_chipselect = info->num_chipselect; 900 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 901 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 902 903 platform_set_drvdata(pdev, master); 904 905 espi = spi_master_get_devdata(master); 906 907 espi->clk = devm_clk_get(&pdev->dev, NULL); 908 if (IS_ERR(espi->clk)) { 909 dev_err(&pdev->dev, "unable to get spi clock\n"); 910 error = PTR_ERR(espi->clk); 911 goto fail_release_master; 912 } 913 914 init_completion(&espi->wait); 915 916 /* 917 * Calculate maximum and minimum supported clock rates 918 * for the controller. 919 */ 920 espi->max_rate = clk_get_rate(espi->clk) / 2; 921 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); 922 espi->pdev = pdev; 923 924 espi->sspdr_phys = res->start + SSPDR; 925 926 espi->regs_base = devm_ioremap_resource(&pdev->dev, res); 927 if (IS_ERR(espi->regs_base)) { 928 error = PTR_ERR(espi->regs_base); 929 goto fail_release_master; 930 } 931 932 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 933 0, "ep93xx-spi", espi); 934 if (error) { 935 dev_err(&pdev->dev, "failed to request irq\n"); 936 goto fail_release_master; 937 } 938 939 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 940 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 941 942 /* make sure that the hardware is disabled */ 943 ep93xx_spi_write_u8(espi, SSPCR1, 0); 944 945 error = devm_spi_register_master(&pdev->dev, master); 946 if (error) { 947 dev_err(&pdev->dev, "failed to register SPI master\n"); 948 goto fail_free_dma; 949 } 950 951 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 952 (unsigned long)res->start, irq); 953 954 return 0; 955 956 fail_free_dma: 957 ep93xx_spi_release_dma(espi); 958 fail_release_master: 959 spi_master_put(master); 960 961 return error; 962 } 963 964 static int ep93xx_spi_remove(struct platform_device *pdev) 965 { 966 struct spi_master *master = platform_get_drvdata(pdev); 967 struct ep93xx_spi *espi = spi_master_get_devdata(master); 968 969 ep93xx_spi_release_dma(espi); 970 971 return 0; 972 } 973 974 static struct platform_driver ep93xx_spi_driver = { 975 .driver = { 976 .name = "ep93xx-spi", 977 .owner = THIS_MODULE, 978 }, 979 .probe = ep93xx_spi_probe, 980 .remove = ep93xx_spi_remove, 981 }; 982 module_platform_driver(ep93xx_spi_driver); 983 984 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 985 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 986 MODULE_LICENSE("GPL"); 987 MODULE_ALIAS("platform:ep93xx-spi"); 988