1 /* 2 * Freescale MXS SPI master driver 3 * 4 * Copyright 2012 DENX Software Engineering, GmbH. 5 * Copyright 2012 Freescale Semiconductor, Inc. 6 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. 7 * 8 * Rework and transition to new API by: 9 * Marek Vasut <marex@denx.de> 10 * 11 * Based on previous attempt by: 12 * Fabio Estevam <fabio.estevam@freescale.com> 13 * 14 * Based on code from U-Boot bootloader by: 15 * Marek Vasut <marex@denx.de> 16 * 17 * Based on spi-stmp.c, which is: 18 * Author: Dmitry Pervushin <dimka@embeddedalley.com> 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 * This program is distributed in the hope that it will be useful, 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 28 * GNU General Public License for more details. 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/ioport.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/of_gpio.h> 37 #include <linux/platform_device.h> 38 #include <linux/delay.h> 39 #include <linux/interrupt.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/dmaengine.h> 42 #include <linux/highmem.h> 43 #include <linux/clk.h> 44 #include <linux/err.h> 45 #include <linux/completion.h> 46 #include <linux/gpio.h> 47 #include <linux/regulator/consumer.h> 48 #include <linux/module.h> 49 #include <linux/pinctrl/consumer.h> 50 #include <linux/stmp_device.h> 51 #include <linux/spi/spi.h> 52 #include <linux/spi/mxs-spi.h> 53 54 #define DRIVER_NAME "mxs-spi" 55 56 /* Use 10S timeout for very long transfers, it should suffice. */ 57 #define SSP_TIMEOUT 10000 58 59 #define SG_MAXLEN 0xff00 60 61 struct mxs_spi { 62 struct mxs_ssp ssp; 63 struct completion c; 64 }; 65 66 static int mxs_spi_setup_transfer(struct spi_device *dev, 67 struct spi_transfer *t) 68 { 69 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 70 struct mxs_ssp *ssp = &spi->ssp; 71 uint8_t bits_per_word; 72 uint32_t hz = 0; 73 74 bits_per_word = dev->bits_per_word; 75 if (t && t->bits_per_word) 76 bits_per_word = t->bits_per_word; 77 78 if (bits_per_word != 8) { 79 dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n", 80 __func__, bits_per_word); 81 return -EINVAL; 82 } 83 84 hz = dev->max_speed_hz; 85 if (t && t->speed_hz) 86 hz = min(hz, t->speed_hz); 87 if (hz == 0) { 88 dev_err(&dev->dev, "Cannot continue with zero clock\n"); 89 return -EINVAL; 90 } 91 92 mxs_ssp_set_clk_rate(ssp, hz); 93 94 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 95 BF_SSP_CTRL1_WORD_LENGTH 96 (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 97 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 98 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 99 ssp->base + HW_SSP_CTRL1(ssp)); 100 101 writel(0x0, ssp->base + HW_SSP_CMD0); 102 writel(0x0, ssp->base + HW_SSP_CMD1); 103 104 return 0; 105 } 106 107 static int mxs_spi_setup(struct spi_device *dev) 108 { 109 int err = 0; 110 111 if (!dev->bits_per_word) 112 dev->bits_per_word = 8; 113 114 if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) 115 return -EINVAL; 116 117 err = mxs_spi_setup_transfer(dev, NULL); 118 if (err) { 119 dev_err(&dev->dev, 120 "Failed to setup transfer, error = %d\n", err); 121 } 122 123 return err; 124 } 125 126 static uint32_t mxs_spi_cs_to_reg(unsigned cs) 127 { 128 uint32_t select = 0; 129 130 /* 131 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 132 * 133 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ 134 * in HW_SSP_CTRL0 register do have multiple usage, please refer to 135 * the datasheet for further details. In SPI mode, they are used to 136 * toggle the chip-select lines (nCS pins). 137 */ 138 if (cs & 1) 139 select |= BM_SSP_CTRL0_WAIT_FOR_CMD; 140 if (cs & 2) 141 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; 142 143 return select; 144 } 145 146 static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) 147 { 148 const uint32_t mask = 149 BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; 150 uint32_t select; 151 struct mxs_ssp *ssp = &spi->ssp; 152 153 writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 154 select = mxs_spi_cs_to_reg(cs); 155 writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 156 } 157 158 static inline void mxs_spi_enable(struct mxs_spi *spi) 159 { 160 struct mxs_ssp *ssp = &spi->ssp; 161 162 writel(BM_SSP_CTRL0_LOCK_CS, 163 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 164 writel(BM_SSP_CTRL0_IGNORE_CRC, 165 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 166 } 167 168 static inline void mxs_spi_disable(struct mxs_spi *spi) 169 { 170 struct mxs_ssp *ssp = &spi->ssp; 171 172 writel(BM_SSP_CTRL0_LOCK_CS, 173 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 174 writel(BM_SSP_CTRL0_IGNORE_CRC, 175 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 176 } 177 178 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 179 { 180 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 181 struct mxs_ssp *ssp = &spi->ssp; 182 uint32_t reg; 183 184 do { 185 reg = readl_relaxed(ssp->base + offset); 186 187 if (!set) 188 reg = ~reg; 189 190 reg &= mask; 191 192 if (reg == mask) 193 return 0; 194 } while (time_before(jiffies, timeout)); 195 196 return -ETIMEDOUT; 197 } 198 199 static void mxs_ssp_dma_irq_callback(void *param) 200 { 201 struct mxs_spi *spi = param; 202 complete(&spi->c); 203 } 204 205 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) 206 { 207 struct mxs_ssp *ssp = dev_id; 208 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", 209 __func__, __LINE__, 210 readl(ssp->base + HW_SSP_CTRL1(ssp)), 211 readl(ssp->base + HW_SSP_STATUS(ssp))); 212 return IRQ_HANDLED; 213 } 214 215 static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, 216 unsigned char *buf, int len, 217 int *first, int *last, int write) 218 { 219 struct mxs_ssp *ssp = &spi->ssp; 220 struct dma_async_tx_descriptor *desc = NULL; 221 const bool vmalloced_buf = is_vmalloc_addr(buf); 222 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; 223 const int sgs = DIV_ROUND_UP(len, desc_len); 224 int sg_count; 225 int min, ret; 226 uint32_t ctrl0; 227 struct page *vm_page; 228 void *sg_buf; 229 struct { 230 uint32_t pio[4]; 231 struct scatterlist sg; 232 } *dma_xfer; 233 234 if (!len) 235 return -EINVAL; 236 237 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); 238 if (!dma_xfer) 239 return -ENOMEM; 240 241 INIT_COMPLETION(spi->c); 242 243 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 244 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 245 246 if (*first) 247 ctrl0 |= BM_SSP_CTRL0_LOCK_CS; 248 if (!write) 249 ctrl0 |= BM_SSP_CTRL0_READ; 250 251 /* Queue the DMA data transfer. */ 252 for (sg_count = 0; sg_count < sgs; sg_count++) { 253 min = min(len, desc_len); 254 255 /* Prepare the transfer descriptor. */ 256 if ((sg_count + 1 == sgs) && *last) 257 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 258 259 if (ssp->devid == IMX23_SSP) 260 ctrl0 |= min; 261 262 dma_xfer[sg_count].pio[0] = ctrl0; 263 dma_xfer[sg_count].pio[3] = min; 264 265 if (vmalloced_buf) { 266 vm_page = vmalloc_to_page(buf); 267 if (!vm_page) { 268 ret = -ENOMEM; 269 goto err_vmalloc; 270 } 271 sg_buf = page_address(vm_page) + 272 ((size_t)buf & ~PAGE_MASK); 273 } else { 274 sg_buf = buf; 275 } 276 277 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); 278 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 279 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 280 281 len -= min; 282 buf += min; 283 284 /* Queue the PIO register write transfer. */ 285 desc = dmaengine_prep_slave_sg(ssp->dmach, 286 (struct scatterlist *)dma_xfer[sg_count].pio, 287 (ssp->devid == IMX23_SSP) ? 1 : 4, 288 DMA_TRANS_NONE, 289 sg_count ? DMA_PREP_INTERRUPT : 0); 290 if (!desc) { 291 dev_err(ssp->dev, 292 "Failed to get PIO reg. write descriptor.\n"); 293 ret = -EINVAL; 294 goto err_mapped; 295 } 296 297 desc = dmaengine_prep_slave_sg(ssp->dmach, 298 &dma_xfer[sg_count].sg, 1, 299 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 300 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 301 302 if (!desc) { 303 dev_err(ssp->dev, 304 "Failed to get DMA data write descriptor.\n"); 305 ret = -EINVAL; 306 goto err_mapped; 307 } 308 } 309 310 /* 311 * The last descriptor must have this callback, 312 * to finish the DMA transaction. 313 */ 314 desc->callback = mxs_ssp_dma_irq_callback; 315 desc->callback_param = spi; 316 317 /* Start the transfer. */ 318 dmaengine_submit(desc); 319 dma_async_issue_pending(ssp->dmach); 320 321 ret = wait_for_completion_timeout(&spi->c, 322 msecs_to_jiffies(SSP_TIMEOUT)); 323 if (!ret) { 324 dev_err(ssp->dev, "DMA transfer timeout\n"); 325 ret = -ETIMEDOUT; 326 goto err_vmalloc; 327 } 328 329 ret = 0; 330 331 err_vmalloc: 332 while (--sg_count >= 0) { 333 err_mapped: 334 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 335 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 336 } 337 338 kfree(dma_xfer); 339 340 return ret; 341 } 342 343 static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 344 unsigned char *buf, int len, 345 int *first, int *last, int write) 346 { 347 struct mxs_ssp *ssp = &spi->ssp; 348 349 if (*first) 350 mxs_spi_enable(spi); 351 352 mxs_spi_set_cs(spi, cs); 353 354 while (len--) { 355 if (*last && len == 0) 356 mxs_spi_disable(spi); 357 358 if (ssp->devid == IMX23_SSP) { 359 writel(BM_SSP_CTRL0_XFER_COUNT, 360 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 361 writel(1, 362 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 363 } else { 364 writel(1, ssp->base + HW_SSP_XFER_SIZE); 365 } 366 367 if (write) 368 writel(BM_SSP_CTRL0_READ, 369 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 370 else 371 writel(BM_SSP_CTRL0_READ, 372 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 373 374 writel(BM_SSP_CTRL0_RUN, 375 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 376 377 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 378 return -ETIMEDOUT; 379 380 if (write) 381 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 382 383 writel(BM_SSP_CTRL0_DATA_XFER, 384 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 385 386 if (!write) { 387 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 388 BM_SSP_STATUS_FIFO_EMPTY, 0)) 389 return -ETIMEDOUT; 390 391 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); 392 } 393 394 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) 395 return -ETIMEDOUT; 396 397 buf++; 398 } 399 400 if (len <= 0) 401 return 0; 402 403 return -ETIMEDOUT; 404 } 405 406 static int mxs_spi_transfer_one(struct spi_master *master, 407 struct spi_message *m) 408 { 409 struct mxs_spi *spi = spi_master_get_devdata(master); 410 struct mxs_ssp *ssp = &spi->ssp; 411 int first, last; 412 struct spi_transfer *t, *tmp_t; 413 int status = 0; 414 int cs; 415 416 first = last = 0; 417 418 cs = m->spi->chip_select; 419 420 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { 421 422 status = mxs_spi_setup_transfer(m->spi, t); 423 if (status) 424 break; 425 426 if (&t->transfer_list == m->transfers.next) 427 first = 1; 428 if (&t->transfer_list == m->transfers.prev) 429 last = 1; 430 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { 431 dev_err(ssp->dev, 432 "Cannot send and receive simultaneously\n"); 433 status = -EINVAL; 434 break; 435 } 436 437 /* 438 * Small blocks can be transfered via PIO. 439 * Measured by empiric means: 440 * 441 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 442 * 443 * DMA only: 2.164808 seconds, 473.0KB/s 444 * Combined: 1.676276 seconds, 610.9KB/s 445 */ 446 if (t->len < 32) { 447 writel(BM_SSP_CTRL1_DMA_ENABLE, 448 ssp->base + HW_SSP_CTRL1(ssp) + 449 STMP_OFFSET_REG_CLR); 450 451 if (t->tx_buf) 452 status = mxs_spi_txrx_pio(spi, cs, 453 (void *)t->tx_buf, 454 t->len, &first, &last, 1); 455 if (t->rx_buf) 456 status = mxs_spi_txrx_pio(spi, cs, 457 t->rx_buf, t->len, 458 &first, &last, 0); 459 } else { 460 writel(BM_SSP_CTRL1_DMA_ENABLE, 461 ssp->base + HW_SSP_CTRL1(ssp) + 462 STMP_OFFSET_REG_SET); 463 464 if (t->tx_buf) 465 status = mxs_spi_txrx_dma(spi, cs, 466 (void *)t->tx_buf, t->len, 467 &first, &last, 1); 468 if (t->rx_buf) 469 status = mxs_spi_txrx_dma(spi, cs, 470 t->rx_buf, t->len, 471 &first, &last, 0); 472 } 473 474 if (status) { 475 stmp_reset_block(ssp->base); 476 break; 477 } 478 479 m->actual_length += t->len; 480 first = last = 0; 481 } 482 483 m->status = 0; 484 spi_finalize_current_message(master); 485 486 return status; 487 } 488 489 static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param) 490 { 491 struct mxs_ssp *ssp = param; 492 493 if (!mxs_dma_is_apbh(chan)) 494 return false; 495 496 if (chan->chan_id != ssp->dma_channel) 497 return false; 498 499 chan->private = &ssp->dma_data; 500 501 return true; 502 } 503 504 static const struct of_device_id mxs_spi_dt_ids[] = { 505 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 506 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 507 { /* sentinel */ } 508 }; 509 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); 510 511 static int __devinit mxs_spi_probe(struct platform_device *pdev) 512 { 513 const struct of_device_id *of_id = 514 of_match_device(mxs_spi_dt_ids, &pdev->dev); 515 struct device_node *np = pdev->dev.of_node; 516 struct spi_master *master; 517 struct mxs_spi *spi; 518 struct mxs_ssp *ssp; 519 struct resource *iores, *dmares; 520 struct pinctrl *pinctrl; 521 struct clk *clk; 522 void __iomem *base; 523 int devid, dma_channel, clk_freq; 524 int ret = 0, irq_err, irq_dma; 525 dma_cap_mask_t mask; 526 527 /* 528 * Default clock speed for the SPI core. 160MHz seems to 529 * work reasonably well with most SPI flashes, so use this 530 * as a default. Override with "clock-frequency" DT prop. 531 */ 532 const int clk_freq_default = 160000000; 533 534 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 535 irq_err = platform_get_irq(pdev, 0); 536 irq_dma = platform_get_irq(pdev, 1); 537 if (!iores || irq_err < 0 || irq_dma < 0) 538 return -EINVAL; 539 540 base = devm_request_and_ioremap(&pdev->dev, iores); 541 if (!base) 542 return -EADDRNOTAVAIL; 543 544 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 545 if (IS_ERR(pinctrl)) 546 return PTR_ERR(pinctrl); 547 548 clk = devm_clk_get(&pdev->dev, NULL); 549 if (IS_ERR(clk)) 550 return PTR_ERR(clk); 551 552 if (np) { 553 devid = (enum mxs_ssp_id) of_id->data; 554 /* 555 * TODO: This is a temporary solution and should be changed 556 * to use generic DMA binding later when the helpers get in. 557 */ 558 ret = of_property_read_u32(np, "fsl,ssp-dma-channel", 559 &dma_channel); 560 if (ret) { 561 dev_err(&pdev->dev, 562 "Failed to get DMA channel\n"); 563 return -EINVAL; 564 } 565 566 ret = of_property_read_u32(np, "clock-frequency", 567 &clk_freq); 568 if (ret) 569 clk_freq = clk_freq_default; 570 } else { 571 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 572 if (!dmares) 573 return -EINVAL; 574 devid = pdev->id_entry->driver_data; 575 dma_channel = dmares->start; 576 clk_freq = clk_freq_default; 577 } 578 579 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 580 if (!master) 581 return -ENOMEM; 582 583 master->transfer_one_message = mxs_spi_transfer_one; 584 master->setup = mxs_spi_setup; 585 master->mode_bits = SPI_CPOL | SPI_CPHA; 586 master->num_chipselect = 3; 587 master->dev.of_node = np; 588 master->flags = SPI_MASTER_HALF_DUPLEX; 589 590 spi = spi_master_get_devdata(master); 591 ssp = &spi->ssp; 592 ssp->dev = &pdev->dev; 593 ssp->clk = clk; 594 ssp->base = base; 595 ssp->devid = devid; 596 ssp->dma_channel = dma_channel; 597 598 init_completion(&spi->c); 599 600 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 601 DRIVER_NAME, ssp); 602 if (ret) 603 goto out_master_free; 604 605 dma_cap_zero(mask); 606 dma_cap_set(DMA_SLAVE, mask); 607 ssp->dma_data.chan_irq = irq_dma; 608 ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp); 609 if (!ssp->dmach) { 610 dev_err(ssp->dev, "Failed to request DMA\n"); 611 goto out_master_free; 612 } 613 614 clk_prepare_enable(ssp->clk); 615 clk_set_rate(ssp->clk, clk_freq); 616 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 617 618 stmp_reset_block(ssp->base); 619 620 platform_set_drvdata(pdev, master); 621 622 ret = spi_register_master(master); 623 if (ret) { 624 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 625 goto out_free_dma; 626 } 627 628 return 0; 629 630 out_free_dma: 631 dma_release_channel(ssp->dmach); 632 clk_disable_unprepare(ssp->clk); 633 out_master_free: 634 spi_master_put(master); 635 return ret; 636 } 637 638 static int __devexit mxs_spi_remove(struct platform_device *pdev) 639 { 640 struct spi_master *master; 641 struct mxs_spi *spi; 642 struct mxs_ssp *ssp; 643 644 master = spi_master_get(platform_get_drvdata(pdev)); 645 spi = spi_master_get_devdata(master); 646 ssp = &spi->ssp; 647 648 spi_unregister_master(master); 649 650 dma_release_channel(ssp->dmach); 651 652 clk_disable_unprepare(ssp->clk); 653 654 spi_master_put(master); 655 656 return 0; 657 } 658 659 static struct platform_driver mxs_spi_driver = { 660 .probe = mxs_spi_probe, 661 .remove = __devexit_p(mxs_spi_remove), 662 .driver = { 663 .name = DRIVER_NAME, 664 .owner = THIS_MODULE, 665 .of_match_table = mxs_spi_dt_ids, 666 }, 667 }; 668 669 module_platform_driver(mxs_spi_driver); 670 671 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 672 MODULE_DESCRIPTION("MXS SPI master driver"); 673 MODULE_LICENSE("GPL"); 674 MODULE_ALIAS("platform:mxs-spi"); 675