1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/sched.h> 30 #include <linux/scatterlist.h> 31 #include <linux/spi/spi.h> 32 33 #include <linux/platform_data/dma-ep93xx.h> 34 #include <linux/platform_data/spi-ep93xx.h> 35 36 #define SSPCR0 0x0000 37 #define SSPCR0_MODE_SHIFT 6 38 #define SSPCR0_SCR_SHIFT 8 39 40 #define SSPCR1 0x0004 41 #define SSPCR1_RIE BIT(0) 42 #define SSPCR1_TIE BIT(1) 43 #define SSPCR1_RORIE BIT(2) 44 #define SSPCR1_LBM BIT(3) 45 #define SSPCR1_SSE BIT(4) 46 #define SSPCR1_MS BIT(5) 47 #define SSPCR1_SOD BIT(6) 48 49 #define SSPDR 0x0008 50 51 #define SSPSR 0x000c 52 #define SSPSR_TFE BIT(0) 53 #define SSPSR_TNF BIT(1) 54 #define SSPSR_RNE BIT(2) 55 #define SSPSR_RFF BIT(3) 56 #define SSPSR_BSY BIT(4) 57 #define SSPCPSR 0x0010 58 59 #define SSPIIR 0x0014 60 #define SSPIIR_RIS BIT(0) 61 #define SSPIIR_TIS BIT(1) 62 #define SSPIIR_RORIS BIT(2) 63 #define SSPICR SSPIIR 64 65 /* timeout in milliseconds */ 66 #define SPI_TIMEOUT 5 67 /* maximum depth of RX/TX FIFO */ 68 #define SPI_FIFO_SIZE 8 69 70 /** 71 * struct ep93xx_spi - EP93xx SPI controller structure 72 * @clk: clock for the controller 73 * @mmio: pointer to ioremap()'d registers 74 * @sspdr_phys: physical address of the SSPDR register 75 * @tx: current byte in transfer to transmit 76 * @rx: current byte in transfer to receive 77 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 78 * frame decreases this level and sending one frame increases it. 79 * @dma_rx: RX DMA channel 80 * @dma_tx: TX DMA channel 81 * @dma_rx_data: RX parameters passed to the DMA engine 82 * @dma_tx_data: TX parameters passed to the DMA engine 83 * @rx_sgt: sg table for RX transfers 84 * @tx_sgt: sg table for TX transfers 85 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 86 * the client 87 */ 88 struct ep93xx_spi { 89 struct clk *clk; 90 void __iomem *mmio; 91 unsigned long sspdr_phys; 92 size_t tx; 93 size_t rx; 94 size_t fifo_level; 95 struct dma_chan *dma_rx; 96 struct dma_chan *dma_tx; 97 struct ep93xx_dma_data dma_rx_data; 98 struct ep93xx_dma_data dma_tx_data; 99 struct sg_table rx_sgt; 100 struct sg_table tx_sgt; 101 void *zeropage; 102 }; 103 104 /* converts bits per word to CR0.DSS value */ 105 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 106 107 /** 108 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 109 * @master: SPI master 110 * @rate: desired SPI output clock rate 111 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider 112 * @div_scr: pointer to return the scr divider 113 */ 114 static int ep93xx_spi_calc_divisors(struct spi_master *master, 115 u32 rate, u8 *div_cpsr, u8 *div_scr) 116 { 117 struct ep93xx_spi *espi = spi_master_get_devdata(master); 118 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 119 int cpsr, scr; 120 121 /* 122 * Make sure that max value is between values supported by the 123 * controller. 124 */ 125 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz); 126 127 /* 128 * Calculate divisors so that we can get speed according the 129 * following formula: 130 * rate = spi_clock_rate / (cpsr * (1 + scr)) 131 * 132 * cpsr must be even number and starts from 2, scr can be any number 133 * between 0 and 255. 134 */ 135 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 136 for (scr = 0; scr <= 255; scr++) { 137 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 138 *div_scr = (u8)scr; 139 *div_cpsr = (u8)cpsr; 140 return 0; 141 } 142 } 143 } 144 145 return -EINVAL; 146 } 147 148 static int ep93xx_spi_chip_setup(struct spi_master *master, 149 struct spi_device *spi, 150 struct spi_transfer *xfer) 151 { 152 struct ep93xx_spi *espi = spi_master_get_devdata(master); 153 u8 dss = bits_per_word_to_dss(xfer->bits_per_word); 154 u8 div_cpsr = 0; 155 u8 div_scr = 0; 156 u16 cr0; 157 int err; 158 159 err = ep93xx_spi_calc_divisors(master, xfer->speed_hz, 160 &div_cpsr, &div_scr); 161 if (err) 162 return err; 163 164 cr0 = div_scr << SSPCR0_SCR_SHIFT; 165 cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT; 166 cr0 |= dss; 167 168 dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 169 spi->mode, div_cpsr, div_scr, dss); 170 dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0); 171 172 writel(div_cpsr, espi->mmio + SSPCPSR); 173 writel(cr0, espi->mmio + SSPCR0); 174 175 return 0; 176 } 177 178 static void ep93xx_do_write(struct spi_master *master) 179 { 180 struct ep93xx_spi *espi = spi_master_get_devdata(master); 181 struct spi_transfer *xfer = master->cur_msg->state; 182 u32 val = 0; 183 184 if (xfer->bits_per_word > 8) { 185 if (xfer->tx_buf) 186 val = ((u16 *)xfer->tx_buf)[espi->tx]; 187 espi->tx += 2; 188 } else { 189 if (xfer->tx_buf) 190 val = ((u8 *)xfer->tx_buf)[espi->tx]; 191 espi->tx += 1; 192 } 193 writel(val, espi->mmio + SSPDR); 194 } 195 196 static void ep93xx_do_read(struct spi_master *master) 197 { 198 struct ep93xx_spi *espi = spi_master_get_devdata(master); 199 struct spi_transfer *xfer = master->cur_msg->state; 200 u32 val; 201 202 val = readl(espi->mmio + SSPDR); 203 if (xfer->bits_per_word > 8) { 204 if (xfer->rx_buf) 205 ((u16 *)xfer->rx_buf)[espi->rx] = val; 206 espi->rx += 2; 207 } else { 208 if (xfer->rx_buf) 209 ((u8 *)xfer->rx_buf)[espi->rx] = val; 210 espi->rx += 1; 211 } 212 } 213 214 /** 215 * ep93xx_spi_read_write() - perform next RX/TX transfer 216 * @espi: ep93xx SPI controller struct 217 * 218 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 219 * called several times, the whole transfer will be completed. Returns 220 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 221 * 222 * When this function is finished, RX FIFO should be empty and TX FIFO should be 223 * full. 224 */ 225 static int ep93xx_spi_read_write(struct spi_master *master) 226 { 227 struct ep93xx_spi *espi = spi_master_get_devdata(master); 228 struct spi_transfer *xfer = master->cur_msg->state; 229 230 /* read as long as RX FIFO has frames in it */ 231 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) { 232 ep93xx_do_read(master); 233 espi->fifo_level--; 234 } 235 236 /* write as long as TX FIFO has room */ 237 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) { 238 ep93xx_do_write(master); 239 espi->fifo_level++; 240 } 241 242 if (espi->rx == xfer->len) 243 return 0; 244 245 return -EINPROGRESS; 246 } 247 248 static enum dma_transfer_direction 249 ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir) 250 { 251 switch (dir) { 252 case DMA_TO_DEVICE: 253 return DMA_MEM_TO_DEV; 254 case DMA_FROM_DEVICE: 255 return DMA_DEV_TO_MEM; 256 default: 257 return DMA_TRANS_NONE; 258 } 259 } 260 261 /** 262 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 263 * @master: SPI master 264 * @dir: DMA transfer direction 265 * 266 * Function configures the DMA, maps the buffer and prepares the DMA 267 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 268 * in case of failure. 269 */ 270 static struct dma_async_tx_descriptor * 271 ep93xx_spi_dma_prepare(struct spi_master *master, 272 enum dma_data_direction dir) 273 { 274 struct ep93xx_spi *espi = spi_master_get_devdata(master); 275 struct spi_transfer *xfer = master->cur_msg->state; 276 struct dma_async_tx_descriptor *txd; 277 enum dma_slave_buswidth buswidth; 278 struct dma_slave_config conf; 279 struct scatterlist *sg; 280 struct sg_table *sgt; 281 struct dma_chan *chan; 282 const void *buf, *pbuf; 283 size_t len = xfer->len; 284 int i, ret, nents; 285 286 if (xfer->bits_per_word > 8) 287 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 288 else 289 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 290 291 memset(&conf, 0, sizeof(conf)); 292 conf.direction = ep93xx_dma_data_to_trans_dir(dir); 293 294 if (dir == DMA_FROM_DEVICE) { 295 chan = espi->dma_rx; 296 buf = xfer->rx_buf; 297 sgt = &espi->rx_sgt; 298 299 conf.src_addr = espi->sspdr_phys; 300 conf.src_addr_width = buswidth; 301 } else { 302 chan = espi->dma_tx; 303 buf = xfer->tx_buf; 304 sgt = &espi->tx_sgt; 305 306 conf.dst_addr = espi->sspdr_phys; 307 conf.dst_addr_width = buswidth; 308 } 309 310 ret = dmaengine_slave_config(chan, &conf); 311 if (ret) 312 return ERR_PTR(ret); 313 314 /* 315 * We need to split the transfer into PAGE_SIZE'd chunks. This is 316 * because we are using @espi->zeropage to provide a zero RX buffer 317 * for the TX transfers and we have only allocated one page for that. 318 * 319 * For performance reasons we allocate a new sg_table only when 320 * needed. Otherwise we will re-use the current one. Eventually the 321 * last sg_table is released in ep93xx_spi_release_dma(). 322 */ 323 324 nents = DIV_ROUND_UP(len, PAGE_SIZE); 325 if (nents != sgt->nents) { 326 sg_free_table(sgt); 327 328 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 329 if (ret) 330 return ERR_PTR(ret); 331 } 332 333 pbuf = buf; 334 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 335 size_t bytes = min_t(size_t, len, PAGE_SIZE); 336 337 if (buf) { 338 sg_set_page(sg, virt_to_page(pbuf), bytes, 339 offset_in_page(pbuf)); 340 } else { 341 sg_set_page(sg, virt_to_page(espi->zeropage), 342 bytes, 0); 343 } 344 345 pbuf += bytes; 346 len -= bytes; 347 } 348 349 if (WARN_ON(len)) { 350 dev_warn(&master->dev, "len = %zu expected 0!\n", len); 351 return ERR_PTR(-EINVAL); 352 } 353 354 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 355 if (!nents) 356 return ERR_PTR(-ENOMEM); 357 358 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction, 359 DMA_CTRL_ACK); 360 if (!txd) { 361 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 362 return ERR_PTR(-ENOMEM); 363 } 364 return txd; 365 } 366 367 /** 368 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 369 * @master: SPI master 370 * @dir: DMA transfer direction 371 * 372 * Function finishes with the DMA transfer. After this, the DMA buffer is 373 * unmapped. 374 */ 375 static void ep93xx_spi_dma_finish(struct spi_master *master, 376 enum dma_data_direction dir) 377 { 378 struct ep93xx_spi *espi = spi_master_get_devdata(master); 379 struct dma_chan *chan; 380 struct sg_table *sgt; 381 382 if (dir == DMA_FROM_DEVICE) { 383 chan = espi->dma_rx; 384 sgt = &espi->rx_sgt; 385 } else { 386 chan = espi->dma_tx; 387 sgt = &espi->tx_sgt; 388 } 389 390 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 391 } 392 393 static void ep93xx_spi_dma_callback(void *callback_param) 394 { 395 struct spi_master *master = callback_param; 396 397 ep93xx_spi_dma_finish(master, DMA_TO_DEVICE); 398 ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); 399 400 spi_finalize_current_transfer(master); 401 } 402 403 static int ep93xx_spi_dma_transfer(struct spi_master *master) 404 { 405 struct ep93xx_spi *espi = spi_master_get_devdata(master); 406 struct dma_async_tx_descriptor *rxd, *txd; 407 408 rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE); 409 if (IS_ERR(rxd)) { 410 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 411 return PTR_ERR(rxd); 412 } 413 414 txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE); 415 if (IS_ERR(txd)) { 416 ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); 417 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); 418 return PTR_ERR(txd); 419 } 420 421 /* We are ready when RX is done */ 422 rxd->callback = ep93xx_spi_dma_callback; 423 rxd->callback_param = master; 424 425 /* Now submit both descriptors and start DMA */ 426 dmaengine_submit(rxd); 427 dmaengine_submit(txd); 428 429 dma_async_issue_pending(espi->dma_rx); 430 dma_async_issue_pending(espi->dma_tx); 431 432 /* signal that we need to wait for completion */ 433 return 1; 434 } 435 436 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 437 { 438 struct spi_master *master = dev_id; 439 struct ep93xx_spi *espi = spi_master_get_devdata(master); 440 u32 val; 441 442 /* 443 * If we got ROR (receive overrun) interrupt we know that something is 444 * wrong. Just abort the message. 445 */ 446 if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) { 447 /* clear the overrun interrupt */ 448 writel(0, espi->mmio + SSPICR); 449 dev_warn(&master->dev, 450 "receive overrun, aborting the message\n"); 451 master->cur_msg->status = -EIO; 452 } else { 453 /* 454 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 455 * simply execute next data transfer. 456 */ 457 if (ep93xx_spi_read_write(master)) { 458 /* 459 * In normal case, there still is some processing left 460 * for current transfer. Let's wait for the next 461 * interrupt then. 462 */ 463 return IRQ_HANDLED; 464 } 465 } 466 467 /* 468 * Current transfer is finished, either with error or with success. In 469 * any case we disable interrupts and notify the worker to handle 470 * any post-processing of the message. 471 */ 472 val = readl(espi->mmio + SSPCR1); 473 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 474 writel(val, espi->mmio + SSPCR1); 475 476 spi_finalize_current_transfer(master); 477 478 return IRQ_HANDLED; 479 } 480 481 static int ep93xx_spi_transfer_one(struct spi_master *master, 482 struct spi_device *spi, 483 struct spi_transfer *xfer) 484 { 485 struct ep93xx_spi *espi = spi_master_get_devdata(master); 486 u32 val; 487 int ret; 488 489 ret = ep93xx_spi_chip_setup(master, spi, xfer); 490 if (ret) { 491 dev_err(&master->dev, "failed to setup chip for transfer\n"); 492 return ret; 493 } 494 495 master->cur_msg->state = xfer; 496 espi->rx = 0; 497 espi->tx = 0; 498 499 /* 500 * There is no point of setting up DMA for the transfers which will 501 * fit into the FIFO and can be transferred with a single interrupt. 502 * So in these cases we will be using PIO and don't bother for DMA. 503 */ 504 if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE) 505 return ep93xx_spi_dma_transfer(master); 506 507 /* Using PIO so prime the TX FIFO and enable interrupts */ 508 ep93xx_spi_read_write(master); 509 510 val = readl(espi->mmio + SSPCR1); 511 val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 512 writel(val, espi->mmio + SSPCR1); 513 514 /* signal that we need to wait for completion */ 515 return 1; 516 } 517 518 static int ep93xx_spi_prepare_message(struct spi_master *master, 519 struct spi_message *msg) 520 { 521 struct ep93xx_spi *espi = spi_master_get_devdata(master); 522 unsigned long timeout; 523 524 /* 525 * Just to be sure: flush any data from RX FIFO. 526 */ 527 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 528 while (readl(espi->mmio + SSPSR) & SSPSR_RNE) { 529 if (time_after(jiffies, timeout)) { 530 dev_warn(&master->dev, 531 "timeout while flushing RX FIFO\n"); 532 return -ETIMEDOUT; 533 } 534 readl(espi->mmio + SSPDR); 535 } 536 537 /* 538 * We explicitly handle FIFO level. This way we don't have to check TX 539 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 540 */ 541 espi->fifo_level = 0; 542 543 return 0; 544 } 545 546 static int ep93xx_spi_prepare_hardware(struct spi_master *master) 547 { 548 struct ep93xx_spi *espi = spi_master_get_devdata(master); 549 u32 val; 550 int ret; 551 552 ret = clk_enable(espi->clk); 553 if (ret) 554 return ret; 555 556 val = readl(espi->mmio + SSPCR1); 557 val |= SSPCR1_SSE; 558 writel(val, espi->mmio + SSPCR1); 559 560 return 0; 561 } 562 563 static int ep93xx_spi_unprepare_hardware(struct spi_master *master) 564 { 565 struct ep93xx_spi *espi = spi_master_get_devdata(master); 566 u32 val; 567 568 val = readl(espi->mmio + SSPCR1); 569 val &= ~SSPCR1_SSE; 570 writel(val, espi->mmio + SSPCR1); 571 572 clk_disable(espi->clk); 573 574 return 0; 575 } 576 577 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 578 { 579 if (ep93xx_dma_chan_is_m2p(chan)) 580 return false; 581 582 chan->private = filter_param; 583 return true; 584 } 585 586 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 587 { 588 dma_cap_mask_t mask; 589 int ret; 590 591 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 592 if (!espi->zeropage) 593 return -ENOMEM; 594 595 dma_cap_zero(mask); 596 dma_cap_set(DMA_SLAVE, mask); 597 598 espi->dma_rx_data.port = EP93XX_DMA_SSP; 599 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 600 espi->dma_rx_data.name = "ep93xx-spi-rx"; 601 602 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 603 &espi->dma_rx_data); 604 if (!espi->dma_rx) { 605 ret = -ENODEV; 606 goto fail_free_page; 607 } 608 609 espi->dma_tx_data.port = EP93XX_DMA_SSP; 610 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 611 espi->dma_tx_data.name = "ep93xx-spi-tx"; 612 613 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 614 &espi->dma_tx_data); 615 if (!espi->dma_tx) { 616 ret = -ENODEV; 617 goto fail_release_rx; 618 } 619 620 return 0; 621 622 fail_release_rx: 623 dma_release_channel(espi->dma_rx); 624 espi->dma_rx = NULL; 625 fail_free_page: 626 free_page((unsigned long)espi->zeropage); 627 628 return ret; 629 } 630 631 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 632 { 633 if (espi->dma_rx) { 634 dma_release_channel(espi->dma_rx); 635 sg_free_table(&espi->rx_sgt); 636 } 637 if (espi->dma_tx) { 638 dma_release_channel(espi->dma_tx); 639 sg_free_table(&espi->tx_sgt); 640 } 641 642 if (espi->zeropage) 643 free_page((unsigned long)espi->zeropage); 644 } 645 646 static int ep93xx_spi_probe(struct platform_device *pdev) 647 { 648 struct spi_master *master; 649 struct ep93xx_spi_info *info; 650 struct ep93xx_spi *espi; 651 struct resource *res; 652 int irq; 653 int error; 654 655 info = dev_get_platdata(&pdev->dev); 656 if (!info) { 657 dev_err(&pdev->dev, "missing platform data\n"); 658 return -EINVAL; 659 } 660 661 irq = platform_get_irq(pdev, 0); 662 if (irq < 0) { 663 dev_err(&pdev->dev, "failed to get irq resources\n"); 664 return -EBUSY; 665 } 666 667 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 668 if (!res) { 669 dev_err(&pdev->dev, "unable to get iomem resource\n"); 670 return -ENODEV; 671 } 672 673 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 674 if (!master) 675 return -ENOMEM; 676 677 master->use_gpio_descriptors = true; 678 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware; 679 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware; 680 master->prepare_message = ep93xx_spi_prepare_message; 681 master->transfer_one = ep93xx_spi_transfer_one; 682 master->bus_num = pdev->id; 683 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 684 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 685 /* 686 * The SPI core will count the number of GPIO descriptors to figure 687 * out the number of chip selects available on the platform. 688 */ 689 master->num_chipselect = 0; 690 691 platform_set_drvdata(pdev, master); 692 693 espi = spi_master_get_devdata(master); 694 695 espi->clk = devm_clk_get(&pdev->dev, NULL); 696 if (IS_ERR(espi->clk)) { 697 dev_err(&pdev->dev, "unable to get spi clock\n"); 698 error = PTR_ERR(espi->clk); 699 goto fail_release_master; 700 } 701 702 /* 703 * Calculate maximum and minimum supported clock rates 704 * for the controller. 705 */ 706 master->max_speed_hz = clk_get_rate(espi->clk) / 2; 707 master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256); 708 709 espi->sspdr_phys = res->start + SSPDR; 710 711 espi->mmio = devm_ioremap_resource(&pdev->dev, res); 712 if (IS_ERR(espi->mmio)) { 713 error = PTR_ERR(espi->mmio); 714 goto fail_release_master; 715 } 716 717 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 718 0, "ep93xx-spi", master); 719 if (error) { 720 dev_err(&pdev->dev, "failed to request irq\n"); 721 goto fail_release_master; 722 } 723 724 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 725 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 726 727 /* make sure that the hardware is disabled */ 728 writel(0, espi->mmio + SSPCR1); 729 730 error = devm_spi_register_master(&pdev->dev, master); 731 if (error) { 732 dev_err(&pdev->dev, "failed to register SPI master\n"); 733 goto fail_free_dma; 734 } 735 736 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 737 (unsigned long)res->start, irq); 738 739 return 0; 740 741 fail_free_dma: 742 ep93xx_spi_release_dma(espi); 743 fail_release_master: 744 spi_master_put(master); 745 746 return error; 747 } 748 749 static int ep93xx_spi_remove(struct platform_device *pdev) 750 { 751 struct spi_master *master = platform_get_drvdata(pdev); 752 struct ep93xx_spi *espi = spi_master_get_devdata(master); 753 754 ep93xx_spi_release_dma(espi); 755 756 return 0; 757 } 758 759 static struct platform_driver ep93xx_spi_driver = { 760 .driver = { 761 .name = "ep93xx-spi", 762 }, 763 .probe = ep93xx_spi_probe, 764 .remove = ep93xx_spi_remove, 765 }; 766 module_platform_driver(ep93xx_spi_driver); 767 768 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 769 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 770 MODULE_LICENSE("GPL"); 771 MODULE_ALIAS("platform:ep93xx-spi"); 772