1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/workqueue.h> 30 #include <linux/sched.h> 31 #include <linux/scatterlist.h> 32 #include <linux/spi/spi.h> 33 34 #include <linux/platform_data/dma-ep93xx.h> 35 #include <linux/platform_data/spi-ep93xx.h> 36 37 #define SSPCR0 0x0000 38 #define SSPCR0_MODE_SHIFT 6 39 #define SSPCR0_SCR_SHIFT 8 40 41 #define SSPCR1 0x0004 42 #define SSPCR1_RIE BIT(0) 43 #define SSPCR1_TIE BIT(1) 44 #define SSPCR1_RORIE BIT(2) 45 #define SSPCR1_LBM BIT(3) 46 #define SSPCR1_SSE BIT(4) 47 #define SSPCR1_MS BIT(5) 48 #define SSPCR1_SOD BIT(6) 49 50 #define SSPDR 0x0008 51 52 #define SSPSR 0x000c 53 #define SSPSR_TFE BIT(0) 54 #define SSPSR_TNF BIT(1) 55 #define SSPSR_RNE BIT(2) 56 #define SSPSR_RFF BIT(3) 57 #define SSPSR_BSY BIT(4) 58 #define SSPCPSR 0x0010 59 60 #define SSPIIR 0x0014 61 #define SSPIIR_RIS BIT(0) 62 #define SSPIIR_TIS BIT(1) 63 #define SSPIIR_RORIS BIT(2) 64 #define SSPICR SSPIIR 65 66 /* timeout in milliseconds */ 67 #define SPI_TIMEOUT 5 68 /* maximum depth of RX/TX FIFO */ 69 #define SPI_FIFO_SIZE 8 70 71 /** 72 * struct ep93xx_spi - EP93xx SPI controller structure 73 * @lock: spinlock that protects concurrent accesses to fields @running, 74 * @current_msg and @msg_queue 75 * @pdev: pointer to platform device 76 * @clk: clock for the controller 77 * @regs_base: pointer to ioremap()'d registers 78 * @sspdr_phys: physical address of the SSPDR register 79 * @min_rate: minimum clock rate (in Hz) supported by the controller 80 * @max_rate: maximum clock rate (in Hz) supported by the controller 81 * @running: is the queue running 82 * @wq: workqueue used by the driver 83 * @msg_work: work that is queued for the driver 84 * @wait: wait here until given transfer is completed 85 * @msg_queue: queue for the messages 86 * @current_msg: message that is currently processed (or %NULL if none) 87 * @tx: current byte in transfer to transmit 88 * @rx: current byte in transfer to receive 89 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 90 * frame decreases this level and sending one frame increases it. 91 * @dma_rx: RX DMA channel 92 * @dma_tx: TX DMA channel 93 * @dma_rx_data: RX parameters passed to the DMA engine 94 * @dma_tx_data: TX parameters passed to the DMA engine 95 * @rx_sgt: sg table for RX transfers 96 * @tx_sgt: sg table for TX transfers 97 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 98 * the client 99 * 100 * This structure holds EP93xx SPI controller specific information. When 101 * @running is %true, driver accepts transfer requests from protocol drivers. 102 * @current_msg is used to hold pointer to the message that is currently 103 * processed. If @current_msg is %NULL, it means that no processing is going 104 * on. 105 * 106 * Most of the fields are only written once and they can be accessed without 107 * taking the @lock. Fields that are accessed concurrently are: @current_msg, 108 * @running, and @msg_queue. 109 */ 110 struct ep93xx_spi { 111 spinlock_t lock; 112 const struct platform_device *pdev; 113 struct clk *clk; 114 void __iomem *regs_base; 115 unsigned long sspdr_phys; 116 unsigned long min_rate; 117 unsigned long max_rate; 118 bool running; 119 struct workqueue_struct *wq; 120 struct work_struct msg_work; 121 struct completion wait; 122 struct list_head msg_queue; 123 struct spi_message *current_msg; 124 size_t tx; 125 size_t rx; 126 size_t fifo_level; 127 struct dma_chan *dma_rx; 128 struct dma_chan *dma_tx; 129 struct ep93xx_dma_data dma_rx_data; 130 struct ep93xx_dma_data dma_tx_data; 131 struct sg_table rx_sgt; 132 struct sg_table tx_sgt; 133 void *zeropage; 134 }; 135 136 /** 137 * struct ep93xx_spi_chip - SPI device hardware settings 138 * @spi: back pointer to the SPI device 139 * @ops: private chip operations 140 */ 141 struct ep93xx_spi_chip { 142 const struct spi_device *spi; 143 struct ep93xx_spi_chip_ops *ops; 144 }; 145 146 /* converts bits per word to CR0.DSS value */ 147 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 148 149 static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi, 150 u16 reg, u8 value) 151 { 152 writeb(value, espi->regs_base + reg); 153 } 154 155 static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) 156 { 157 return readb(spi->regs_base + reg); 158 } 159 160 static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi, 161 u16 reg, u16 value) 162 { 163 writew(value, espi->regs_base + reg); 164 } 165 166 static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) 167 { 168 return readw(spi->regs_base + reg); 169 } 170 171 static int ep93xx_spi_enable(const struct ep93xx_spi *espi) 172 { 173 u8 regval; 174 int err; 175 176 err = clk_enable(espi->clk); 177 if (err) 178 return err; 179 180 regval = ep93xx_spi_read_u8(espi, SSPCR1); 181 regval |= SSPCR1_SSE; 182 ep93xx_spi_write_u8(espi, SSPCR1, regval); 183 184 return 0; 185 } 186 187 static void ep93xx_spi_disable(const struct ep93xx_spi *espi) 188 { 189 u8 regval; 190 191 regval = ep93xx_spi_read_u8(espi, SSPCR1); 192 regval &= ~SSPCR1_SSE; 193 ep93xx_spi_write_u8(espi, SSPCR1, regval); 194 195 clk_disable(espi->clk); 196 } 197 198 static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) 199 { 200 u8 regval; 201 202 regval = ep93xx_spi_read_u8(espi, SSPCR1); 203 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 204 ep93xx_spi_write_u8(espi, SSPCR1, regval); 205 } 206 207 static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) 208 { 209 u8 regval; 210 211 regval = ep93xx_spi_read_u8(espi, SSPCR1); 212 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 213 ep93xx_spi_write_u8(espi, SSPCR1, regval); 214 } 215 216 /** 217 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 218 * @espi: ep93xx SPI controller struct 219 * @rate: desired SPI output clock rate 220 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider 221 * @div_scr: pointer to return the scr divider 222 */ 223 static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, 224 unsigned long rate, 225 u8 *div_cpsr, u8 *div_scr) 226 { 227 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 228 int cpsr, scr; 229 230 /* 231 * Make sure that max value is between values supported by the 232 * controller. Note that minimum value is already checked in 233 * ep93xx_spi_transfer(). 234 */ 235 rate = clamp(rate, espi->min_rate, espi->max_rate); 236 237 /* 238 * Calculate divisors so that we can get speed according the 239 * following formula: 240 * rate = spi_clock_rate / (cpsr * (1 + scr)) 241 * 242 * cpsr must be even number and starts from 2, scr can be any number 243 * between 0 and 255. 244 */ 245 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 246 for (scr = 0; scr <= 255; scr++) { 247 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 248 *div_scr = (u8)scr; 249 *div_cpsr = (u8)cpsr; 250 return 0; 251 } 252 } 253 } 254 255 return -EINVAL; 256 } 257 258 static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) 259 { 260 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); 261 int value = (spi->mode & SPI_CS_HIGH) ? control : !control; 262 263 if (chip->ops && chip->ops->cs_control) 264 chip->ops->cs_control(spi, value); 265 } 266 267 /** 268 * ep93xx_spi_setup() - setup an SPI device 269 * @spi: SPI device to setup 270 * 271 * This function sets up SPI device mode, speed etc. Can be called multiple 272 * times for a single device. Returns %0 in case of success, negative error in 273 * case of failure. When this function returns success, the device is 274 * deselected. 275 */ 276 static int ep93xx_spi_setup(struct spi_device *spi) 277 { 278 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 279 struct ep93xx_spi_chip *chip; 280 281 chip = spi_get_ctldata(spi); 282 if (!chip) { 283 dev_dbg(&espi->pdev->dev, "initial setup for %s\n", 284 spi->modalias); 285 286 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 287 if (!chip) 288 return -ENOMEM; 289 290 chip->spi = spi; 291 chip->ops = spi->controller_data; 292 293 if (chip->ops && chip->ops->setup) { 294 int ret = chip->ops->setup(spi); 295 if (ret) { 296 kfree(chip); 297 return ret; 298 } 299 } 300 301 spi_set_ctldata(spi, chip); 302 } 303 304 ep93xx_spi_cs_control(spi, false); 305 return 0; 306 } 307 308 /** 309 * ep93xx_spi_transfer() - queue message to be transferred 310 * @spi: target SPI device 311 * @msg: message to be transferred 312 * 313 * This function is called by SPI device drivers when they are going to transfer 314 * a new message. It simply puts the message in the queue and schedules 315 * workqueue to perform the actual transfer later on. 316 * 317 * Returns %0 on success and negative error in case of failure. 318 */ 319 static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) 320 { 321 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 322 struct spi_transfer *t; 323 unsigned long flags; 324 325 if (!msg || !msg->complete) 326 return -EINVAL; 327 328 /* first validate each transfer */ 329 list_for_each_entry(t, &msg->transfers, transfer_list) { 330 if (t->speed_hz && t->speed_hz < espi->min_rate) 331 return -EINVAL; 332 } 333 334 /* 335 * Now that we own the message, let's initialize it so that it is 336 * suitable for us. We use @msg->status to signal whether there was 337 * error in transfer and @msg->state is used to hold pointer to the 338 * current transfer (or %NULL if no active current transfer). 339 */ 340 msg->state = NULL; 341 msg->status = 0; 342 msg->actual_length = 0; 343 344 spin_lock_irqsave(&espi->lock, flags); 345 if (!espi->running) { 346 spin_unlock_irqrestore(&espi->lock, flags); 347 return -ESHUTDOWN; 348 } 349 list_add_tail(&msg->queue, &espi->msg_queue); 350 queue_work(espi->wq, &espi->msg_work); 351 spin_unlock_irqrestore(&espi->lock, flags); 352 353 return 0; 354 } 355 356 /** 357 * ep93xx_spi_cleanup() - cleans up master controller specific state 358 * @spi: SPI device to cleanup 359 * 360 * This function releases master controller specific state for given @spi 361 * device. 362 */ 363 static void ep93xx_spi_cleanup(struct spi_device *spi) 364 { 365 struct ep93xx_spi_chip *chip; 366 367 chip = spi_get_ctldata(spi); 368 if (chip) { 369 if (chip->ops && chip->ops->cleanup) 370 chip->ops->cleanup(spi); 371 spi_set_ctldata(spi, NULL); 372 kfree(chip); 373 } 374 } 375 376 /** 377 * ep93xx_spi_chip_setup() - configures hardware according to given @chip 378 * @espi: ep93xx SPI controller struct 379 * @chip: chip specific settings 380 * @speed_hz: transfer speed 381 * @bits_per_word: transfer bits_per_word 382 */ 383 static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, 384 const struct ep93xx_spi_chip *chip, 385 u32 speed_hz, u8 bits_per_word) 386 { 387 u8 dss = bits_per_word_to_dss(bits_per_word); 388 u8 div_cpsr = 0; 389 u8 div_scr = 0; 390 u16 cr0; 391 int err; 392 393 err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr); 394 if (err) 395 return err; 396 397 cr0 = div_scr << SSPCR0_SCR_SHIFT; 398 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; 399 cr0 |= dss; 400 401 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 402 chip->spi->mode, div_cpsr, div_scr, dss); 403 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); 404 405 ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr); 406 ep93xx_spi_write_u16(espi, SSPCR0, cr0); 407 408 return 0; 409 } 410 411 static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 412 { 413 if (t->bits_per_word > 8) { 414 u16 tx_val = 0; 415 416 if (t->tx_buf) 417 tx_val = ((u16 *)t->tx_buf)[espi->tx]; 418 ep93xx_spi_write_u16(espi, SSPDR, tx_val); 419 espi->tx += sizeof(tx_val); 420 } else { 421 u8 tx_val = 0; 422 423 if (t->tx_buf) 424 tx_val = ((u8 *)t->tx_buf)[espi->tx]; 425 ep93xx_spi_write_u8(espi, SSPDR, tx_val); 426 espi->tx += sizeof(tx_val); 427 } 428 } 429 430 static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 431 { 432 if (t->bits_per_word > 8) { 433 u16 rx_val; 434 435 rx_val = ep93xx_spi_read_u16(espi, SSPDR); 436 if (t->rx_buf) 437 ((u16 *)t->rx_buf)[espi->rx] = rx_val; 438 espi->rx += sizeof(rx_val); 439 } else { 440 u8 rx_val; 441 442 rx_val = ep93xx_spi_read_u8(espi, SSPDR); 443 if (t->rx_buf) 444 ((u8 *)t->rx_buf)[espi->rx] = rx_val; 445 espi->rx += sizeof(rx_val); 446 } 447 } 448 449 /** 450 * ep93xx_spi_read_write() - perform next RX/TX transfer 451 * @espi: ep93xx SPI controller struct 452 * 453 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 454 * called several times, the whole transfer will be completed. Returns 455 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 456 * 457 * When this function is finished, RX FIFO should be empty and TX FIFO should be 458 * full. 459 */ 460 static int ep93xx_spi_read_write(struct ep93xx_spi *espi) 461 { 462 struct spi_message *msg = espi->current_msg; 463 struct spi_transfer *t = msg->state; 464 465 /* read as long as RX FIFO has frames in it */ 466 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { 467 ep93xx_do_read(espi, t); 468 espi->fifo_level--; 469 } 470 471 /* write as long as TX FIFO has room */ 472 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 473 ep93xx_do_write(espi, t); 474 espi->fifo_level++; 475 } 476 477 if (espi->rx == t->len) 478 return 0; 479 480 return -EINPROGRESS; 481 } 482 483 static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) 484 { 485 /* 486 * Now everything is set up for the current transfer. We prime the TX 487 * FIFO, enable interrupts, and wait for the transfer to complete. 488 */ 489 if (ep93xx_spi_read_write(espi)) { 490 ep93xx_spi_enable_interrupts(espi); 491 wait_for_completion(&espi->wait); 492 } 493 } 494 495 /** 496 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 497 * @espi: ep93xx SPI controller struct 498 * @dir: DMA transfer direction 499 * 500 * Function configures the DMA, maps the buffer and prepares the DMA 501 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 502 * in case of failure. 503 */ 504 static struct dma_async_tx_descriptor * 505 ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) 506 { 507 struct spi_transfer *t = espi->current_msg->state; 508 struct dma_async_tx_descriptor *txd; 509 enum dma_slave_buswidth buswidth; 510 struct dma_slave_config conf; 511 struct scatterlist *sg; 512 struct sg_table *sgt; 513 struct dma_chan *chan; 514 const void *buf, *pbuf; 515 size_t len = t->len; 516 int i, ret, nents; 517 518 if (t->bits_per_word > 8) 519 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 520 else 521 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 522 523 memset(&conf, 0, sizeof(conf)); 524 conf.direction = dir; 525 526 if (dir == DMA_DEV_TO_MEM) { 527 chan = espi->dma_rx; 528 buf = t->rx_buf; 529 sgt = &espi->rx_sgt; 530 531 conf.src_addr = espi->sspdr_phys; 532 conf.src_addr_width = buswidth; 533 } else { 534 chan = espi->dma_tx; 535 buf = t->tx_buf; 536 sgt = &espi->tx_sgt; 537 538 conf.dst_addr = espi->sspdr_phys; 539 conf.dst_addr_width = buswidth; 540 } 541 542 ret = dmaengine_slave_config(chan, &conf); 543 if (ret) 544 return ERR_PTR(ret); 545 546 /* 547 * We need to split the transfer into PAGE_SIZE'd chunks. This is 548 * because we are using @espi->zeropage to provide a zero RX buffer 549 * for the TX transfers and we have only allocated one page for that. 550 * 551 * For performance reasons we allocate a new sg_table only when 552 * needed. Otherwise we will re-use the current one. Eventually the 553 * last sg_table is released in ep93xx_spi_release_dma(). 554 */ 555 556 nents = DIV_ROUND_UP(len, PAGE_SIZE); 557 if (nents != sgt->nents) { 558 sg_free_table(sgt); 559 560 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 561 if (ret) 562 return ERR_PTR(ret); 563 } 564 565 pbuf = buf; 566 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 567 size_t bytes = min_t(size_t, len, PAGE_SIZE); 568 569 if (buf) { 570 sg_set_page(sg, virt_to_page(pbuf), bytes, 571 offset_in_page(pbuf)); 572 } else { 573 sg_set_page(sg, virt_to_page(espi->zeropage), 574 bytes, 0); 575 } 576 577 pbuf += bytes; 578 len -= bytes; 579 } 580 581 if (WARN_ON(len)) { 582 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); 583 return ERR_PTR(-EINVAL); 584 } 585 586 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 587 if (!nents) 588 return ERR_PTR(-ENOMEM); 589 590 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); 591 if (!txd) { 592 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 593 return ERR_PTR(-ENOMEM); 594 } 595 return txd; 596 } 597 598 /** 599 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 600 * @espi: ep93xx SPI controller struct 601 * @dir: DMA transfer direction 602 * 603 * Function finishes with the DMA transfer. After this, the DMA buffer is 604 * unmapped. 605 */ 606 static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 607 enum dma_transfer_direction dir) 608 { 609 struct dma_chan *chan; 610 struct sg_table *sgt; 611 612 if (dir == DMA_DEV_TO_MEM) { 613 chan = espi->dma_rx; 614 sgt = &espi->rx_sgt; 615 } else { 616 chan = espi->dma_tx; 617 sgt = &espi->tx_sgt; 618 } 619 620 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 621 } 622 623 static void ep93xx_spi_dma_callback(void *callback_param) 624 { 625 complete(callback_param); 626 } 627 628 static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) 629 { 630 struct spi_message *msg = espi->current_msg; 631 struct dma_async_tx_descriptor *rxd, *txd; 632 633 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); 634 if (IS_ERR(rxd)) { 635 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 636 msg->status = PTR_ERR(rxd); 637 return; 638 } 639 640 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); 641 if (IS_ERR(txd)) { 642 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 643 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 644 msg->status = PTR_ERR(txd); 645 return; 646 } 647 648 /* We are ready when RX is done */ 649 rxd->callback = ep93xx_spi_dma_callback; 650 rxd->callback_param = &espi->wait; 651 652 /* Now submit both descriptors and wait while they finish */ 653 dmaengine_submit(rxd); 654 dmaengine_submit(txd); 655 656 dma_async_issue_pending(espi->dma_rx); 657 dma_async_issue_pending(espi->dma_tx); 658 659 wait_for_completion(&espi->wait); 660 661 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); 662 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 663 } 664 665 /** 666 * ep93xx_spi_process_transfer() - processes one SPI transfer 667 * @espi: ep93xx SPI controller struct 668 * @msg: current message 669 * @t: transfer to process 670 * 671 * This function processes one SPI transfer given in @t. Function waits until 672 * transfer is complete (may sleep) and updates @msg->status based on whether 673 * transfer was successfully processed or not. 674 */ 675 static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, 676 struct spi_message *msg, 677 struct spi_transfer *t) 678 { 679 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); 680 int err; 681 682 msg->state = t; 683 684 err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word); 685 if (err) { 686 dev_err(&espi->pdev->dev, 687 "failed to setup chip for transfer\n"); 688 msg->status = err; 689 return; 690 } 691 692 espi->rx = 0; 693 espi->tx = 0; 694 695 /* 696 * There is no point of setting up DMA for the transfers which will 697 * fit into the FIFO and can be transferred with a single interrupt. 698 * So in these cases we will be using PIO and don't bother for DMA. 699 */ 700 if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 701 ep93xx_spi_dma_transfer(espi); 702 else 703 ep93xx_spi_pio_transfer(espi); 704 705 /* 706 * In case of error during transmit, we bail out from processing 707 * the message. 708 */ 709 if (msg->status) 710 return; 711 712 msg->actual_length += t->len; 713 714 /* 715 * After this transfer is finished, perform any possible 716 * post-transfer actions requested by the protocol driver. 717 */ 718 if (t->delay_usecs) { 719 set_current_state(TASK_UNINTERRUPTIBLE); 720 schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 721 } 722 if (t->cs_change) { 723 if (!list_is_last(&t->transfer_list, &msg->transfers)) { 724 /* 725 * In case protocol driver is asking us to drop the 726 * chipselect briefly, we let the scheduler to handle 727 * any "delay" here. 728 */ 729 ep93xx_spi_cs_control(msg->spi, false); 730 cond_resched(); 731 ep93xx_spi_cs_control(msg->spi, true); 732 } 733 } 734 } 735 736 /* 737 * ep93xx_spi_process_message() - process one SPI message 738 * @espi: ep93xx SPI controller struct 739 * @msg: message to process 740 * 741 * This function processes a single SPI message. We go through all transfers in 742 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 743 * asserted during the whole message (unless per transfer cs_change is set). 744 * 745 * @msg->status contains %0 in case of success or negative error code in case of 746 * failure. 747 */ 748 static void ep93xx_spi_process_message(struct ep93xx_spi *espi, 749 struct spi_message *msg) 750 { 751 unsigned long timeout; 752 struct spi_transfer *t; 753 int err; 754 755 /* 756 * Enable the SPI controller and its clock. 757 */ 758 err = ep93xx_spi_enable(espi); 759 if (err) { 760 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); 761 msg->status = err; 762 return; 763 } 764 765 /* 766 * Just to be sure: flush any data from RX FIFO. 767 */ 768 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 769 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { 770 if (time_after(jiffies, timeout)) { 771 dev_warn(&espi->pdev->dev, 772 "timeout while flushing RX FIFO\n"); 773 msg->status = -ETIMEDOUT; 774 return; 775 } 776 ep93xx_spi_read_u16(espi, SSPDR); 777 } 778 779 /* 780 * We explicitly handle FIFO level. This way we don't have to check TX 781 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 782 */ 783 espi->fifo_level = 0; 784 785 /* 786 * Assert the chipselect. 787 */ 788 ep93xx_spi_cs_control(msg->spi, true); 789 790 list_for_each_entry(t, &msg->transfers, transfer_list) { 791 ep93xx_spi_process_transfer(espi, msg, t); 792 if (msg->status) 793 break; 794 } 795 796 /* 797 * Now the whole message is transferred (or failed for some reason). We 798 * deselect the device and disable the SPI controller. 799 */ 800 ep93xx_spi_cs_control(msg->spi, false); 801 ep93xx_spi_disable(espi); 802 } 803 804 #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) 805 806 /** 807 * ep93xx_spi_work() - EP93xx SPI workqueue worker function 808 * @work: work struct 809 * 810 * Workqueue worker function. This function is called when there are new 811 * SPI messages to be processed. Message is taken out from the queue and then 812 * passed to ep93xx_spi_process_message(). 813 * 814 * After message is transferred, protocol driver is notified by calling 815 * @msg->complete(). In case of error, @msg->status is set to negative error 816 * number, otherwise it contains zero (and @msg->actual_length is updated). 817 */ 818 static void ep93xx_spi_work(struct work_struct *work) 819 { 820 struct ep93xx_spi *espi = work_to_espi(work); 821 struct spi_message *msg; 822 823 spin_lock_irq(&espi->lock); 824 if (!espi->running || espi->current_msg || 825 list_empty(&espi->msg_queue)) { 826 spin_unlock_irq(&espi->lock); 827 return; 828 } 829 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); 830 list_del_init(&msg->queue); 831 espi->current_msg = msg; 832 spin_unlock_irq(&espi->lock); 833 834 ep93xx_spi_process_message(espi, msg); 835 836 /* 837 * Update the current message and re-schedule ourselves if there are 838 * more messages in the queue. 839 */ 840 spin_lock_irq(&espi->lock); 841 espi->current_msg = NULL; 842 if (espi->running && !list_empty(&espi->msg_queue)) 843 queue_work(espi->wq, &espi->msg_work); 844 spin_unlock_irq(&espi->lock); 845 846 /* notify the protocol driver that we are done with this message */ 847 msg->complete(msg->context); 848 } 849 850 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 851 { 852 struct ep93xx_spi *espi = dev_id; 853 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); 854 855 /* 856 * If we got ROR (receive overrun) interrupt we know that something is 857 * wrong. Just abort the message. 858 */ 859 if (unlikely(irq_status & SSPIIR_RORIS)) { 860 /* clear the overrun interrupt */ 861 ep93xx_spi_write_u8(espi, SSPICR, 0); 862 dev_warn(&espi->pdev->dev, 863 "receive overrun, aborting the message\n"); 864 espi->current_msg->status = -EIO; 865 } else { 866 /* 867 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 868 * simply execute next data transfer. 869 */ 870 if (ep93xx_spi_read_write(espi)) { 871 /* 872 * In normal case, there still is some processing left 873 * for current transfer. Let's wait for the next 874 * interrupt then. 875 */ 876 return IRQ_HANDLED; 877 } 878 } 879 880 /* 881 * Current transfer is finished, either with error or with success. In 882 * any case we disable interrupts and notify the worker to handle 883 * any post-processing of the message. 884 */ 885 ep93xx_spi_disable_interrupts(espi); 886 complete(&espi->wait); 887 return IRQ_HANDLED; 888 } 889 890 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 891 { 892 if (ep93xx_dma_chan_is_m2p(chan)) 893 return false; 894 895 chan->private = filter_param; 896 return true; 897 } 898 899 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 900 { 901 dma_cap_mask_t mask; 902 int ret; 903 904 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 905 if (!espi->zeropage) 906 return -ENOMEM; 907 908 dma_cap_zero(mask); 909 dma_cap_set(DMA_SLAVE, mask); 910 911 espi->dma_rx_data.port = EP93XX_DMA_SSP; 912 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 913 espi->dma_rx_data.name = "ep93xx-spi-rx"; 914 915 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 916 &espi->dma_rx_data); 917 if (!espi->dma_rx) { 918 ret = -ENODEV; 919 goto fail_free_page; 920 } 921 922 espi->dma_tx_data.port = EP93XX_DMA_SSP; 923 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 924 espi->dma_tx_data.name = "ep93xx-spi-tx"; 925 926 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 927 &espi->dma_tx_data); 928 if (!espi->dma_tx) { 929 ret = -ENODEV; 930 goto fail_release_rx; 931 } 932 933 return 0; 934 935 fail_release_rx: 936 dma_release_channel(espi->dma_rx); 937 espi->dma_rx = NULL; 938 fail_free_page: 939 free_page((unsigned long)espi->zeropage); 940 941 return ret; 942 } 943 944 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 945 { 946 if (espi->dma_rx) { 947 dma_release_channel(espi->dma_rx); 948 sg_free_table(&espi->rx_sgt); 949 } 950 if (espi->dma_tx) { 951 dma_release_channel(espi->dma_tx); 952 sg_free_table(&espi->tx_sgt); 953 } 954 955 if (espi->zeropage) 956 free_page((unsigned long)espi->zeropage); 957 } 958 959 static int ep93xx_spi_probe(struct platform_device *pdev) 960 { 961 struct spi_master *master; 962 struct ep93xx_spi_info *info; 963 struct ep93xx_spi *espi; 964 struct resource *res; 965 int irq; 966 int error; 967 968 info = pdev->dev.platform_data; 969 970 irq = platform_get_irq(pdev, 0); 971 if (irq < 0) { 972 dev_err(&pdev->dev, "failed to get irq resources\n"); 973 return -EBUSY; 974 } 975 976 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 977 if (!res) { 978 dev_err(&pdev->dev, "unable to get iomem resource\n"); 979 return -ENODEV; 980 } 981 982 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 983 if (!master) 984 return -ENOMEM; 985 986 master->setup = ep93xx_spi_setup; 987 master->transfer = ep93xx_spi_transfer; 988 master->cleanup = ep93xx_spi_cleanup; 989 master->bus_num = pdev->id; 990 master->num_chipselect = info->num_chipselect; 991 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 992 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 993 994 platform_set_drvdata(pdev, master); 995 996 espi = spi_master_get_devdata(master); 997 998 espi->clk = devm_clk_get(&pdev->dev, NULL); 999 if (IS_ERR(espi->clk)) { 1000 dev_err(&pdev->dev, "unable to get spi clock\n"); 1001 error = PTR_ERR(espi->clk); 1002 goto fail_release_master; 1003 } 1004 1005 spin_lock_init(&espi->lock); 1006 init_completion(&espi->wait); 1007 1008 /* 1009 * Calculate maximum and minimum supported clock rates 1010 * for the controller. 1011 */ 1012 espi->max_rate = clk_get_rate(espi->clk) / 2; 1013 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); 1014 espi->pdev = pdev; 1015 1016 espi->sspdr_phys = res->start + SSPDR; 1017 1018 espi->regs_base = devm_ioremap_resource(&pdev->dev, res); 1019 if (IS_ERR(espi->regs_base)) { 1020 error = PTR_ERR(espi->regs_base); 1021 goto fail_release_master; 1022 } 1023 1024 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 1025 0, "ep93xx-spi", espi); 1026 if (error) { 1027 dev_err(&pdev->dev, "failed to request irq\n"); 1028 goto fail_release_master; 1029 } 1030 1031 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 1032 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 1033 1034 espi->wq = create_singlethread_workqueue("ep93xx_spid"); 1035 if (!espi->wq) { 1036 dev_err(&pdev->dev, "unable to create workqueue\n"); 1037 error = -ENOMEM; 1038 goto fail_free_dma; 1039 } 1040 INIT_WORK(&espi->msg_work, ep93xx_spi_work); 1041 INIT_LIST_HEAD(&espi->msg_queue); 1042 espi->running = true; 1043 1044 /* make sure that the hardware is disabled */ 1045 ep93xx_spi_write_u8(espi, SSPCR1, 0); 1046 1047 error = spi_register_master(master); 1048 if (error) { 1049 dev_err(&pdev->dev, "failed to register SPI master\n"); 1050 goto fail_free_queue; 1051 } 1052 1053 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 1054 (unsigned long)res->start, irq); 1055 1056 return 0; 1057 1058 fail_free_queue: 1059 destroy_workqueue(espi->wq); 1060 fail_free_dma: 1061 ep93xx_spi_release_dma(espi); 1062 fail_release_master: 1063 spi_master_put(master); 1064 1065 return error; 1066 } 1067 1068 static int ep93xx_spi_remove(struct platform_device *pdev) 1069 { 1070 struct spi_master *master = platform_get_drvdata(pdev); 1071 struct ep93xx_spi *espi = spi_master_get_devdata(master); 1072 1073 spin_lock_irq(&espi->lock); 1074 espi->running = false; 1075 spin_unlock_irq(&espi->lock); 1076 1077 destroy_workqueue(espi->wq); 1078 1079 /* 1080 * Complete remaining messages with %-ESHUTDOWN status. 1081 */ 1082 spin_lock_irq(&espi->lock); 1083 while (!list_empty(&espi->msg_queue)) { 1084 struct spi_message *msg; 1085 1086 msg = list_first_entry(&espi->msg_queue, 1087 struct spi_message, queue); 1088 list_del_init(&msg->queue); 1089 msg->status = -ESHUTDOWN; 1090 spin_unlock_irq(&espi->lock); 1091 msg->complete(msg->context); 1092 spin_lock_irq(&espi->lock); 1093 } 1094 spin_unlock_irq(&espi->lock); 1095 1096 ep93xx_spi_release_dma(espi); 1097 1098 spi_unregister_master(master); 1099 return 0; 1100 } 1101 1102 static struct platform_driver ep93xx_spi_driver = { 1103 .driver = { 1104 .name = "ep93xx-spi", 1105 .owner = THIS_MODULE, 1106 }, 1107 .probe = ep93xx_spi_probe, 1108 .remove = ep93xx_spi_remove, 1109 }; 1110 module_platform_driver(ep93xx_spi_driver); 1111 1112 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 1113 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1114 MODULE_LICENSE("GPL"); 1115 MODULE_ALIAS("platform:ep93xx-spi"); 1116