1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/sched.h> 30 #include <linux/scatterlist.h> 31 #include <linux/gpio.h> 32 #include <linux/spi/spi.h> 33 34 #include <linux/platform_data/dma-ep93xx.h> 35 #include <linux/platform_data/spi-ep93xx.h> 36 37 #define SSPCR0 0x0000 38 #define SSPCR0_MODE_SHIFT 6 39 #define SSPCR0_SCR_SHIFT 8 40 41 #define SSPCR1 0x0004 42 #define SSPCR1_RIE BIT(0) 43 #define SSPCR1_TIE BIT(1) 44 #define SSPCR1_RORIE BIT(2) 45 #define SSPCR1_LBM BIT(3) 46 #define SSPCR1_SSE BIT(4) 47 #define SSPCR1_MS BIT(5) 48 #define SSPCR1_SOD BIT(6) 49 50 #define SSPDR 0x0008 51 52 #define SSPSR 0x000c 53 #define SSPSR_TFE BIT(0) 54 #define SSPSR_TNF BIT(1) 55 #define SSPSR_RNE BIT(2) 56 #define SSPSR_RFF BIT(3) 57 #define SSPSR_BSY BIT(4) 58 #define SSPCPSR 0x0010 59 60 #define SSPIIR 0x0014 61 #define SSPIIR_RIS BIT(0) 62 #define SSPIIR_TIS BIT(1) 63 #define SSPIIR_RORIS BIT(2) 64 #define SSPICR SSPIIR 65 66 /* timeout in milliseconds */ 67 #define SPI_TIMEOUT 5 68 /* maximum depth of RX/TX FIFO */ 69 #define SPI_FIFO_SIZE 8 70 71 /** 72 * struct ep93xx_spi - EP93xx SPI controller structure 73 * @clk: clock for the controller 74 * @mmio: pointer to ioremap()'d registers 75 * @sspdr_phys: physical address of the SSPDR register 76 * @tx: current byte in transfer to transmit 77 * @rx: current byte in transfer to receive 78 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 79 * frame decreases this level and sending one frame increases it. 80 * @dma_rx: RX DMA channel 81 * @dma_tx: TX DMA channel 82 * @dma_rx_data: RX parameters passed to the DMA engine 83 * @dma_tx_data: TX parameters passed to the DMA engine 84 * @rx_sgt: sg table for RX transfers 85 * @tx_sgt: sg table for TX transfers 86 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 87 * the client 88 */ 89 struct ep93xx_spi { 90 struct clk *clk; 91 void __iomem *mmio; 92 unsigned long sspdr_phys; 93 size_t tx; 94 size_t rx; 95 size_t fifo_level; 96 struct dma_chan *dma_rx; 97 struct dma_chan *dma_tx; 98 struct ep93xx_dma_data dma_rx_data; 99 struct ep93xx_dma_data dma_tx_data; 100 struct sg_table rx_sgt; 101 struct sg_table tx_sgt; 102 void *zeropage; 103 }; 104 105 /* converts bits per word to CR0.DSS value */ 106 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 107 108 /** 109 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 110 * @master: SPI master 111 * @rate: desired SPI output clock rate 112 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider 113 * @div_scr: pointer to return the scr divider 114 */ 115 static int ep93xx_spi_calc_divisors(struct spi_master *master, 116 u32 rate, u8 *div_cpsr, u8 *div_scr) 117 { 118 struct ep93xx_spi *espi = spi_master_get_devdata(master); 119 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 120 int cpsr, scr; 121 122 /* 123 * Make sure that max value is between values supported by the 124 * controller. 125 */ 126 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz); 127 128 /* 129 * Calculate divisors so that we can get speed according the 130 * following formula: 131 * rate = spi_clock_rate / (cpsr * (1 + scr)) 132 * 133 * cpsr must be even number and starts from 2, scr can be any number 134 * between 0 and 255. 135 */ 136 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 137 for (scr = 0; scr <= 255; scr++) { 138 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 139 *div_scr = (u8)scr; 140 *div_cpsr = (u8)cpsr; 141 return 0; 142 } 143 } 144 } 145 146 return -EINVAL; 147 } 148 149 static int ep93xx_spi_chip_setup(struct spi_master *master, 150 struct spi_device *spi, 151 struct spi_transfer *xfer) 152 { 153 struct ep93xx_spi *espi = spi_master_get_devdata(master); 154 u8 dss = bits_per_word_to_dss(xfer->bits_per_word); 155 u8 div_cpsr = 0; 156 u8 div_scr = 0; 157 u16 cr0; 158 int err; 159 160 err = ep93xx_spi_calc_divisors(master, xfer->speed_hz, 161 &div_cpsr, &div_scr); 162 if (err) 163 return err; 164 165 cr0 = div_scr << SSPCR0_SCR_SHIFT; 166 cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT; 167 cr0 |= dss; 168 169 dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 170 spi->mode, div_cpsr, div_scr, dss); 171 dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0); 172 173 writel(div_cpsr, espi->mmio + SSPCPSR); 174 writel(cr0, espi->mmio + SSPCR0); 175 176 return 0; 177 } 178 179 static void ep93xx_do_write(struct spi_master *master) 180 { 181 struct ep93xx_spi *espi = spi_master_get_devdata(master); 182 struct spi_transfer *xfer = master->cur_msg->state; 183 u32 val = 0; 184 185 if (xfer->bits_per_word > 8) { 186 if (xfer->tx_buf) 187 val = ((u16 *)xfer->tx_buf)[espi->tx]; 188 espi->tx += 2; 189 } else { 190 if (xfer->tx_buf) 191 val = ((u8 *)xfer->tx_buf)[espi->tx]; 192 espi->tx += 1; 193 } 194 writel(val, espi->mmio + SSPDR); 195 } 196 197 static void ep93xx_do_read(struct spi_master *master) 198 { 199 struct ep93xx_spi *espi = spi_master_get_devdata(master); 200 struct spi_transfer *xfer = master->cur_msg->state; 201 u32 val; 202 203 val = readl(espi->mmio + SSPDR); 204 if (xfer->bits_per_word > 8) { 205 if (xfer->rx_buf) 206 ((u16 *)xfer->rx_buf)[espi->rx] = val; 207 espi->rx += 2; 208 } else { 209 if (xfer->rx_buf) 210 ((u8 *)xfer->rx_buf)[espi->rx] = val; 211 espi->rx += 1; 212 } 213 } 214 215 /** 216 * ep93xx_spi_read_write() - perform next RX/TX transfer 217 * @espi: ep93xx SPI controller struct 218 * 219 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 220 * called several times, the whole transfer will be completed. Returns 221 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 222 * 223 * When this function is finished, RX FIFO should be empty and TX FIFO should be 224 * full. 225 */ 226 static int ep93xx_spi_read_write(struct spi_master *master) 227 { 228 struct ep93xx_spi *espi = spi_master_get_devdata(master); 229 struct spi_transfer *xfer = master->cur_msg->state; 230 231 /* read as long as RX FIFO has frames in it */ 232 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) { 233 ep93xx_do_read(master); 234 espi->fifo_level--; 235 } 236 237 /* write as long as TX FIFO has room */ 238 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) { 239 ep93xx_do_write(master); 240 espi->fifo_level++; 241 } 242 243 if (espi->rx == xfer->len) 244 return 0; 245 246 return -EINPROGRESS; 247 } 248 249 /** 250 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 251 * @master: SPI master 252 * @dir: DMA transfer direction 253 * 254 * Function configures the DMA, maps the buffer and prepares the DMA 255 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 256 * in case of failure. 257 */ 258 static struct dma_async_tx_descriptor * 259 ep93xx_spi_dma_prepare(struct spi_master *master, 260 enum dma_transfer_direction dir) 261 { 262 struct ep93xx_spi *espi = spi_master_get_devdata(master); 263 struct spi_transfer *xfer = master->cur_msg->state; 264 struct dma_async_tx_descriptor *txd; 265 enum dma_slave_buswidth buswidth; 266 struct dma_slave_config conf; 267 struct scatterlist *sg; 268 struct sg_table *sgt; 269 struct dma_chan *chan; 270 const void *buf, *pbuf; 271 size_t len = xfer->len; 272 int i, ret, nents; 273 274 if (xfer->bits_per_word > 8) 275 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 276 else 277 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 278 279 memset(&conf, 0, sizeof(conf)); 280 conf.direction = dir; 281 282 if (dir == DMA_DEV_TO_MEM) { 283 chan = espi->dma_rx; 284 buf = xfer->rx_buf; 285 sgt = &espi->rx_sgt; 286 287 conf.src_addr = espi->sspdr_phys; 288 conf.src_addr_width = buswidth; 289 } else { 290 chan = espi->dma_tx; 291 buf = xfer->tx_buf; 292 sgt = &espi->tx_sgt; 293 294 conf.dst_addr = espi->sspdr_phys; 295 conf.dst_addr_width = buswidth; 296 } 297 298 ret = dmaengine_slave_config(chan, &conf); 299 if (ret) 300 return ERR_PTR(ret); 301 302 /* 303 * We need to split the transfer into PAGE_SIZE'd chunks. This is 304 * because we are using @espi->zeropage to provide a zero RX buffer 305 * for the TX transfers and we have only allocated one page for that. 306 * 307 * For performance reasons we allocate a new sg_table only when 308 * needed. Otherwise we will re-use the current one. Eventually the 309 * last sg_table is released in ep93xx_spi_release_dma(). 310 */ 311 312 nents = DIV_ROUND_UP(len, PAGE_SIZE); 313 if (nents != sgt->nents) { 314 sg_free_table(sgt); 315 316 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 317 if (ret) 318 return ERR_PTR(ret); 319 } 320 321 pbuf = buf; 322 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 323 size_t bytes = min_t(size_t, len, PAGE_SIZE); 324 325 if (buf) { 326 sg_set_page(sg, virt_to_page(pbuf), bytes, 327 offset_in_page(pbuf)); 328 } else { 329 sg_set_page(sg, virt_to_page(espi->zeropage), 330 bytes, 0); 331 } 332 333 pbuf += bytes; 334 len -= bytes; 335 } 336 337 if (WARN_ON(len)) { 338 dev_warn(&master->dev, "len = %zu expected 0!\n", len); 339 return ERR_PTR(-EINVAL); 340 } 341 342 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 343 if (!nents) 344 return ERR_PTR(-ENOMEM); 345 346 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); 347 if (!txd) { 348 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 349 return ERR_PTR(-ENOMEM); 350 } 351 return txd; 352 } 353 354 /** 355 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 356 * @master: SPI master 357 * @dir: DMA transfer direction 358 * 359 * Function finishes with the DMA transfer. After this, the DMA buffer is 360 * unmapped. 361 */ 362 static void ep93xx_spi_dma_finish(struct spi_master *master, 363 enum dma_transfer_direction dir) 364 { 365 struct ep93xx_spi *espi = spi_master_get_devdata(master); 366 struct dma_chan *chan; 367 struct sg_table *sgt; 368 369 if (dir == DMA_DEV_TO_MEM) { 370 chan = espi->dma_rx; 371 sgt = &espi->rx_sgt; 372 } else { 373 chan = espi->dma_tx; 374 sgt = &espi->tx_sgt; 375 } 376 377 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 378 } 379 380 static void ep93xx_spi_dma_callback(void *callback_param) 381 { 382 struct spi_master *master = callback_param; 383 384 ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV); 385 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 386 387 spi_finalize_current_transfer(master); 388 } 389 390 static int ep93xx_spi_dma_transfer(struct spi_master *master) 391 { 392 struct ep93xx_spi *espi = spi_master_get_devdata(master); 393 struct dma_async_tx_descriptor *rxd, *txd; 394 395 rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM); 396 if (IS_ERR(rxd)) { 397 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 398 return PTR_ERR(rxd); 399 } 400 401 txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV); 402 if (IS_ERR(txd)) { 403 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 404 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); 405 return PTR_ERR(txd); 406 } 407 408 /* We are ready when RX is done */ 409 rxd->callback = ep93xx_spi_dma_callback; 410 rxd->callback_param = master; 411 412 /* Now submit both descriptors and start DMA */ 413 dmaengine_submit(rxd); 414 dmaengine_submit(txd); 415 416 dma_async_issue_pending(espi->dma_rx); 417 dma_async_issue_pending(espi->dma_tx); 418 419 /* signal that we need to wait for completion */ 420 return 1; 421 } 422 423 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 424 { 425 struct spi_master *master = dev_id; 426 struct ep93xx_spi *espi = spi_master_get_devdata(master); 427 u32 val; 428 429 /* 430 * If we got ROR (receive overrun) interrupt we know that something is 431 * wrong. Just abort the message. 432 */ 433 if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) { 434 /* clear the overrun interrupt */ 435 writel(0, espi->mmio + SSPICR); 436 dev_warn(&master->dev, 437 "receive overrun, aborting the message\n"); 438 master->cur_msg->status = -EIO; 439 } else { 440 /* 441 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 442 * simply execute next data transfer. 443 */ 444 if (ep93xx_spi_read_write(master)) { 445 /* 446 * In normal case, there still is some processing left 447 * for current transfer. Let's wait for the next 448 * interrupt then. 449 */ 450 return IRQ_HANDLED; 451 } 452 } 453 454 /* 455 * Current transfer is finished, either with error or with success. In 456 * any case we disable interrupts and notify the worker to handle 457 * any post-processing of the message. 458 */ 459 val = readl(espi->mmio + SSPCR1); 460 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 461 writel(val, espi->mmio + SSPCR1); 462 463 spi_finalize_current_transfer(master); 464 465 return IRQ_HANDLED; 466 } 467 468 static int ep93xx_spi_transfer_one(struct spi_master *master, 469 struct spi_device *spi, 470 struct spi_transfer *xfer) 471 { 472 struct ep93xx_spi *espi = spi_master_get_devdata(master); 473 u32 val; 474 int ret; 475 476 ret = ep93xx_spi_chip_setup(master, spi, xfer); 477 if (ret) { 478 dev_err(&master->dev, "failed to setup chip for transfer\n"); 479 return ret; 480 } 481 482 master->cur_msg->state = xfer; 483 espi->rx = 0; 484 espi->tx = 0; 485 486 /* 487 * There is no point of setting up DMA for the transfers which will 488 * fit into the FIFO and can be transferred with a single interrupt. 489 * So in these cases we will be using PIO and don't bother for DMA. 490 */ 491 if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE) 492 return ep93xx_spi_dma_transfer(master); 493 494 /* Using PIO so prime the TX FIFO and enable interrupts */ 495 ep93xx_spi_read_write(master); 496 497 val = readl(espi->mmio + SSPCR1); 498 val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 499 writel(val, espi->mmio + SSPCR1); 500 501 /* signal that we need to wait for completion */ 502 return 1; 503 } 504 505 static int ep93xx_spi_prepare_message(struct spi_master *master, 506 struct spi_message *msg) 507 { 508 struct ep93xx_spi *espi = spi_master_get_devdata(master); 509 unsigned long timeout; 510 511 /* 512 * Just to be sure: flush any data from RX FIFO. 513 */ 514 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 515 while (readl(espi->mmio + SSPSR) & SSPSR_RNE) { 516 if (time_after(jiffies, timeout)) { 517 dev_warn(&master->dev, 518 "timeout while flushing RX FIFO\n"); 519 return -ETIMEDOUT; 520 } 521 readl(espi->mmio + SSPDR); 522 } 523 524 /* 525 * We explicitly handle FIFO level. This way we don't have to check TX 526 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 527 */ 528 espi->fifo_level = 0; 529 530 return 0; 531 } 532 533 static int ep93xx_spi_prepare_hardware(struct spi_master *master) 534 { 535 struct ep93xx_spi *espi = spi_master_get_devdata(master); 536 u32 val; 537 int ret; 538 539 ret = clk_enable(espi->clk); 540 if (ret) 541 return ret; 542 543 val = readl(espi->mmio + SSPCR1); 544 val |= SSPCR1_SSE; 545 writel(val, espi->mmio + SSPCR1); 546 547 return 0; 548 } 549 550 static int ep93xx_spi_unprepare_hardware(struct spi_master *master) 551 { 552 struct ep93xx_spi *espi = spi_master_get_devdata(master); 553 u32 val; 554 555 val = readl(espi->mmio + SSPCR1); 556 val &= ~SSPCR1_SSE; 557 writel(val, espi->mmio + SSPCR1); 558 559 clk_disable(espi->clk); 560 561 return 0; 562 } 563 564 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 565 { 566 if (ep93xx_dma_chan_is_m2p(chan)) 567 return false; 568 569 chan->private = filter_param; 570 return true; 571 } 572 573 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 574 { 575 dma_cap_mask_t mask; 576 int ret; 577 578 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 579 if (!espi->zeropage) 580 return -ENOMEM; 581 582 dma_cap_zero(mask); 583 dma_cap_set(DMA_SLAVE, mask); 584 585 espi->dma_rx_data.port = EP93XX_DMA_SSP; 586 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 587 espi->dma_rx_data.name = "ep93xx-spi-rx"; 588 589 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 590 &espi->dma_rx_data); 591 if (!espi->dma_rx) { 592 ret = -ENODEV; 593 goto fail_free_page; 594 } 595 596 espi->dma_tx_data.port = EP93XX_DMA_SSP; 597 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 598 espi->dma_tx_data.name = "ep93xx-spi-tx"; 599 600 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 601 &espi->dma_tx_data); 602 if (!espi->dma_tx) { 603 ret = -ENODEV; 604 goto fail_release_rx; 605 } 606 607 return 0; 608 609 fail_release_rx: 610 dma_release_channel(espi->dma_rx); 611 espi->dma_rx = NULL; 612 fail_free_page: 613 free_page((unsigned long)espi->zeropage); 614 615 return ret; 616 } 617 618 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 619 { 620 if (espi->dma_rx) { 621 dma_release_channel(espi->dma_rx); 622 sg_free_table(&espi->rx_sgt); 623 } 624 if (espi->dma_tx) { 625 dma_release_channel(espi->dma_tx); 626 sg_free_table(&espi->tx_sgt); 627 } 628 629 if (espi->zeropage) 630 free_page((unsigned long)espi->zeropage); 631 } 632 633 static int ep93xx_spi_probe(struct platform_device *pdev) 634 { 635 struct spi_master *master; 636 struct ep93xx_spi_info *info; 637 struct ep93xx_spi *espi; 638 struct resource *res; 639 int irq; 640 int error; 641 int i; 642 643 info = dev_get_platdata(&pdev->dev); 644 if (!info) { 645 dev_err(&pdev->dev, "missing platform data\n"); 646 return -EINVAL; 647 } 648 649 irq = platform_get_irq(pdev, 0); 650 if (irq < 0) { 651 dev_err(&pdev->dev, "failed to get irq resources\n"); 652 return -EBUSY; 653 } 654 655 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 656 if (!res) { 657 dev_err(&pdev->dev, "unable to get iomem resource\n"); 658 return -ENODEV; 659 } 660 661 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 662 if (!master) 663 return -ENOMEM; 664 665 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware; 666 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware; 667 master->prepare_message = ep93xx_spi_prepare_message; 668 master->transfer_one = ep93xx_spi_transfer_one; 669 master->bus_num = pdev->id; 670 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 671 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 672 673 master->num_chipselect = info->num_chipselect; 674 master->cs_gpios = devm_kzalloc(&master->dev, 675 sizeof(int) * master->num_chipselect, 676 GFP_KERNEL); 677 if (!master->cs_gpios) { 678 error = -ENOMEM; 679 goto fail_release_master; 680 } 681 682 for (i = 0; i < master->num_chipselect; i++) { 683 master->cs_gpios[i] = info->chipselect[i]; 684 685 if (!gpio_is_valid(master->cs_gpios[i])) 686 continue; 687 688 error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i], 689 GPIOF_OUT_INIT_HIGH, 690 "ep93xx-spi"); 691 if (error) { 692 dev_err(&pdev->dev, "could not request cs gpio %d\n", 693 master->cs_gpios[i]); 694 goto fail_release_master; 695 } 696 } 697 698 platform_set_drvdata(pdev, master); 699 700 espi = spi_master_get_devdata(master); 701 702 espi->clk = devm_clk_get(&pdev->dev, NULL); 703 if (IS_ERR(espi->clk)) { 704 dev_err(&pdev->dev, "unable to get spi clock\n"); 705 error = PTR_ERR(espi->clk); 706 goto fail_release_master; 707 } 708 709 /* 710 * Calculate maximum and minimum supported clock rates 711 * for the controller. 712 */ 713 master->max_speed_hz = clk_get_rate(espi->clk) / 2; 714 master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256); 715 716 espi->sspdr_phys = res->start + SSPDR; 717 718 espi->mmio = devm_ioremap_resource(&pdev->dev, res); 719 if (IS_ERR(espi->mmio)) { 720 error = PTR_ERR(espi->mmio); 721 goto fail_release_master; 722 } 723 724 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 725 0, "ep93xx-spi", master); 726 if (error) { 727 dev_err(&pdev->dev, "failed to request irq\n"); 728 goto fail_release_master; 729 } 730 731 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 732 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 733 734 /* make sure that the hardware is disabled */ 735 writel(0, espi->mmio + SSPCR1); 736 737 error = devm_spi_register_master(&pdev->dev, master); 738 if (error) { 739 dev_err(&pdev->dev, "failed to register SPI master\n"); 740 goto fail_free_dma; 741 } 742 743 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 744 (unsigned long)res->start, irq); 745 746 return 0; 747 748 fail_free_dma: 749 ep93xx_spi_release_dma(espi); 750 fail_release_master: 751 spi_master_put(master); 752 753 return error; 754 } 755 756 static int ep93xx_spi_remove(struct platform_device *pdev) 757 { 758 struct spi_master *master = platform_get_drvdata(pdev); 759 struct ep93xx_spi *espi = spi_master_get_devdata(master); 760 761 ep93xx_spi_release_dma(espi); 762 763 return 0; 764 } 765 766 static struct platform_driver ep93xx_spi_driver = { 767 .driver = { 768 .name = "ep93xx-spi", 769 }, 770 .probe = ep93xx_spi_probe, 771 .remove = ep93xx_spi_remove, 772 }; 773 module_platform_driver(ep93xx_spi_driver); 774 775 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 776 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 777 MODULE_LICENSE("GPL"); 778 MODULE_ALIAS("platform:ep93xx-spi"); 779