1 /* 2 * Freescale MXS SPI master driver 3 * 4 * Copyright 2012 DENX Software Engineering, GmbH. 5 * Copyright 2012 Freescale Semiconductor, Inc. 6 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. 7 * 8 * Rework and transition to new API by: 9 * Marek Vasut <marex@denx.de> 10 * 11 * Based on previous attempt by: 12 * Fabio Estevam <fabio.estevam@freescale.com> 13 * 14 * Based on code from U-Boot bootloader by: 15 * Marek Vasut <marex@denx.de> 16 * 17 * Based on spi-stmp.c, which is: 18 * Author: Dmitry Pervushin <dimka@embeddedalley.com> 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 * This program is distributed in the hope that it will be useful, 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 28 * GNU General Public License for more details. 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/ioport.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/of_gpio.h> 37 #include <linux/platform_device.h> 38 #include <linux/delay.h> 39 #include <linux/interrupt.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/dmaengine.h> 42 #include <linux/highmem.h> 43 #include <linux/clk.h> 44 #include <linux/err.h> 45 #include <linux/completion.h> 46 #include <linux/gpio.h> 47 #include <linux/regulator/consumer.h> 48 #include <linux/module.h> 49 #include <linux/stmp_device.h> 50 #include <linux/spi/spi.h> 51 #include <linux/spi/mxs-spi.h> 52 53 #define DRIVER_NAME "mxs-spi" 54 55 /* Use 10S timeout for very long transfers, it should suffice. */ 56 #define SSP_TIMEOUT 10000 57 58 #define SG_MAXLEN 0xff00 59 60 struct mxs_spi { 61 struct mxs_ssp ssp; 62 struct completion c; 63 }; 64 65 static int mxs_spi_setup_transfer(struct spi_device *dev, 66 struct spi_transfer *t) 67 { 68 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 69 struct mxs_ssp *ssp = &spi->ssp; 70 uint32_t hz = 0; 71 72 hz = dev->max_speed_hz; 73 if (t && t->speed_hz) 74 hz = min(hz, t->speed_hz); 75 if (hz == 0) { 76 dev_err(&dev->dev, "Cannot continue with zero clock\n"); 77 return -EINVAL; 78 } 79 80 mxs_ssp_set_clk_rate(ssp, hz); 81 82 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 83 BF_SSP_CTRL1_WORD_LENGTH 84 (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 85 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 86 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 87 ssp->base + HW_SSP_CTRL1(ssp)); 88 89 writel(0x0, ssp->base + HW_SSP_CMD0); 90 writel(0x0, ssp->base + HW_SSP_CMD1); 91 92 return 0; 93 } 94 95 static int mxs_spi_setup(struct spi_device *dev) 96 { 97 int err = 0; 98 99 if (!dev->bits_per_word) 100 dev->bits_per_word = 8; 101 102 if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) 103 return -EINVAL; 104 105 err = mxs_spi_setup_transfer(dev, NULL); 106 if (err) { 107 dev_err(&dev->dev, 108 "Failed to setup transfer, error = %d\n", err); 109 } 110 111 return err; 112 } 113 114 static uint32_t mxs_spi_cs_to_reg(unsigned cs) 115 { 116 uint32_t select = 0; 117 118 /* 119 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 120 * 121 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ 122 * in HW_SSP_CTRL0 register do have multiple usage, please refer to 123 * the datasheet for further details. In SPI mode, they are used to 124 * toggle the chip-select lines (nCS pins). 125 */ 126 if (cs & 1) 127 select |= BM_SSP_CTRL0_WAIT_FOR_CMD; 128 if (cs & 2) 129 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; 130 131 return select; 132 } 133 134 static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) 135 { 136 const uint32_t mask = 137 BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; 138 uint32_t select; 139 struct mxs_ssp *ssp = &spi->ssp; 140 141 writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 142 select = mxs_spi_cs_to_reg(cs); 143 writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 144 } 145 146 static inline void mxs_spi_enable(struct mxs_spi *spi) 147 { 148 struct mxs_ssp *ssp = &spi->ssp; 149 150 writel(BM_SSP_CTRL0_LOCK_CS, 151 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 152 writel(BM_SSP_CTRL0_IGNORE_CRC, 153 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 154 } 155 156 static inline void mxs_spi_disable(struct mxs_spi *spi) 157 { 158 struct mxs_ssp *ssp = &spi->ssp; 159 160 writel(BM_SSP_CTRL0_LOCK_CS, 161 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 162 writel(BM_SSP_CTRL0_IGNORE_CRC, 163 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 164 } 165 166 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 167 { 168 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 169 struct mxs_ssp *ssp = &spi->ssp; 170 uint32_t reg; 171 172 do { 173 reg = readl_relaxed(ssp->base + offset); 174 175 if (!set) 176 reg = ~reg; 177 178 reg &= mask; 179 180 if (reg == mask) 181 return 0; 182 } while (time_before(jiffies, timeout)); 183 184 return -ETIMEDOUT; 185 } 186 187 static void mxs_ssp_dma_irq_callback(void *param) 188 { 189 struct mxs_spi *spi = param; 190 complete(&spi->c); 191 } 192 193 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) 194 { 195 struct mxs_ssp *ssp = dev_id; 196 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", 197 __func__, __LINE__, 198 readl(ssp->base + HW_SSP_CTRL1(ssp)), 199 readl(ssp->base + HW_SSP_STATUS(ssp))); 200 return IRQ_HANDLED; 201 } 202 203 static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, 204 unsigned char *buf, int len, 205 int *first, int *last, int write) 206 { 207 struct mxs_ssp *ssp = &spi->ssp; 208 struct dma_async_tx_descriptor *desc = NULL; 209 const bool vmalloced_buf = is_vmalloc_addr(buf); 210 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; 211 const int sgs = DIV_ROUND_UP(len, desc_len); 212 int sg_count; 213 int min, ret; 214 uint32_t ctrl0; 215 struct page *vm_page; 216 void *sg_buf; 217 struct { 218 uint32_t pio[4]; 219 struct scatterlist sg; 220 } *dma_xfer; 221 222 if (!len) 223 return -EINVAL; 224 225 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); 226 if (!dma_xfer) 227 return -ENOMEM; 228 229 INIT_COMPLETION(spi->c); 230 231 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 232 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 233 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 234 235 if (*first) 236 ctrl0 |= BM_SSP_CTRL0_LOCK_CS; 237 if (!write) 238 ctrl0 |= BM_SSP_CTRL0_READ; 239 240 /* Queue the DMA data transfer. */ 241 for (sg_count = 0; sg_count < sgs; sg_count++) { 242 min = min(len, desc_len); 243 244 /* Prepare the transfer descriptor. */ 245 if ((sg_count + 1 == sgs) && *last) 246 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 247 248 if (ssp->devid == IMX23_SSP) { 249 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 250 ctrl0 |= min; 251 } 252 253 dma_xfer[sg_count].pio[0] = ctrl0; 254 dma_xfer[sg_count].pio[3] = min; 255 256 if (vmalloced_buf) { 257 vm_page = vmalloc_to_page(buf); 258 if (!vm_page) { 259 ret = -ENOMEM; 260 goto err_vmalloc; 261 } 262 sg_buf = page_address(vm_page) + 263 ((size_t)buf & ~PAGE_MASK); 264 } else { 265 sg_buf = buf; 266 } 267 268 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); 269 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 270 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 271 272 len -= min; 273 buf += min; 274 275 /* Queue the PIO register write transfer. */ 276 desc = dmaengine_prep_slave_sg(ssp->dmach, 277 (struct scatterlist *)dma_xfer[sg_count].pio, 278 (ssp->devid == IMX23_SSP) ? 1 : 4, 279 DMA_TRANS_NONE, 280 sg_count ? DMA_PREP_INTERRUPT : 0); 281 if (!desc) { 282 dev_err(ssp->dev, 283 "Failed to get PIO reg. write descriptor.\n"); 284 ret = -EINVAL; 285 goto err_mapped; 286 } 287 288 desc = dmaengine_prep_slave_sg(ssp->dmach, 289 &dma_xfer[sg_count].sg, 1, 290 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 291 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 292 293 if (!desc) { 294 dev_err(ssp->dev, 295 "Failed to get DMA data write descriptor.\n"); 296 ret = -EINVAL; 297 goto err_mapped; 298 } 299 } 300 301 /* 302 * The last descriptor must have this callback, 303 * to finish the DMA transaction. 304 */ 305 desc->callback = mxs_ssp_dma_irq_callback; 306 desc->callback_param = spi; 307 308 /* Start the transfer. */ 309 dmaengine_submit(desc); 310 dma_async_issue_pending(ssp->dmach); 311 312 ret = wait_for_completion_timeout(&spi->c, 313 msecs_to_jiffies(SSP_TIMEOUT)); 314 if (!ret) { 315 dev_err(ssp->dev, "DMA transfer timeout\n"); 316 ret = -ETIMEDOUT; 317 dmaengine_terminate_all(ssp->dmach); 318 goto err_vmalloc; 319 } 320 321 ret = 0; 322 323 err_vmalloc: 324 while (--sg_count >= 0) { 325 err_mapped: 326 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 327 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 328 } 329 330 kfree(dma_xfer); 331 332 return ret; 333 } 334 335 static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 336 unsigned char *buf, int len, 337 int *first, int *last, int write) 338 { 339 struct mxs_ssp *ssp = &spi->ssp; 340 341 if (*first) 342 mxs_spi_enable(spi); 343 344 mxs_spi_set_cs(spi, cs); 345 346 while (len--) { 347 if (*last && len == 0) 348 mxs_spi_disable(spi); 349 350 if (ssp->devid == IMX23_SSP) { 351 writel(BM_SSP_CTRL0_XFER_COUNT, 352 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 353 writel(1, 354 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 355 } else { 356 writel(1, ssp->base + HW_SSP_XFER_SIZE); 357 } 358 359 if (write) 360 writel(BM_SSP_CTRL0_READ, 361 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 362 else 363 writel(BM_SSP_CTRL0_READ, 364 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 365 366 writel(BM_SSP_CTRL0_RUN, 367 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 368 369 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 370 return -ETIMEDOUT; 371 372 if (write) 373 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 374 375 writel(BM_SSP_CTRL0_DATA_XFER, 376 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 377 378 if (!write) { 379 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 380 BM_SSP_STATUS_FIFO_EMPTY, 0)) 381 return -ETIMEDOUT; 382 383 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); 384 } 385 386 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) 387 return -ETIMEDOUT; 388 389 buf++; 390 } 391 392 if (len <= 0) 393 return 0; 394 395 return -ETIMEDOUT; 396 } 397 398 static int mxs_spi_transfer_one(struct spi_master *master, 399 struct spi_message *m) 400 { 401 struct mxs_spi *spi = spi_master_get_devdata(master); 402 struct mxs_ssp *ssp = &spi->ssp; 403 int first, last; 404 struct spi_transfer *t, *tmp_t; 405 int status = 0; 406 int cs; 407 408 first = last = 0; 409 410 cs = m->spi->chip_select; 411 412 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { 413 414 status = mxs_spi_setup_transfer(m->spi, t); 415 if (status) 416 break; 417 418 if (&t->transfer_list == m->transfers.next) 419 first = 1; 420 if (&t->transfer_list == m->transfers.prev) 421 last = 1; 422 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { 423 dev_err(ssp->dev, 424 "Cannot send and receive simultaneously\n"); 425 status = -EINVAL; 426 break; 427 } 428 429 /* 430 * Small blocks can be transfered via PIO. 431 * Measured by empiric means: 432 * 433 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 434 * 435 * DMA only: 2.164808 seconds, 473.0KB/s 436 * Combined: 1.676276 seconds, 610.9KB/s 437 */ 438 if (t->len < 32) { 439 writel(BM_SSP_CTRL1_DMA_ENABLE, 440 ssp->base + HW_SSP_CTRL1(ssp) + 441 STMP_OFFSET_REG_CLR); 442 443 if (t->tx_buf) 444 status = mxs_spi_txrx_pio(spi, cs, 445 (void *)t->tx_buf, 446 t->len, &first, &last, 1); 447 if (t->rx_buf) 448 status = mxs_spi_txrx_pio(spi, cs, 449 t->rx_buf, t->len, 450 &first, &last, 0); 451 } else { 452 writel(BM_SSP_CTRL1_DMA_ENABLE, 453 ssp->base + HW_SSP_CTRL1(ssp) + 454 STMP_OFFSET_REG_SET); 455 456 if (t->tx_buf) 457 status = mxs_spi_txrx_dma(spi, cs, 458 (void *)t->tx_buf, t->len, 459 &first, &last, 1); 460 if (t->rx_buf) 461 status = mxs_spi_txrx_dma(spi, cs, 462 t->rx_buf, t->len, 463 &first, &last, 0); 464 } 465 466 if (status) { 467 stmp_reset_block(ssp->base); 468 break; 469 } 470 471 m->actual_length += t->len; 472 first = last = 0; 473 } 474 475 m->status = status; 476 spi_finalize_current_message(master); 477 478 return status; 479 } 480 481 static const struct of_device_id mxs_spi_dt_ids[] = { 482 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 483 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 484 { /* sentinel */ } 485 }; 486 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); 487 488 static int mxs_spi_probe(struct platform_device *pdev) 489 { 490 const struct of_device_id *of_id = 491 of_match_device(mxs_spi_dt_ids, &pdev->dev); 492 struct device_node *np = pdev->dev.of_node; 493 struct spi_master *master; 494 struct mxs_spi *spi; 495 struct mxs_ssp *ssp; 496 struct resource *iores; 497 struct clk *clk; 498 void __iomem *base; 499 int devid, clk_freq; 500 int ret = 0, irq_err; 501 502 /* 503 * Default clock speed for the SPI core. 160MHz seems to 504 * work reasonably well with most SPI flashes, so use this 505 * as a default. Override with "clock-frequency" DT prop. 506 */ 507 const int clk_freq_default = 160000000; 508 509 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 510 irq_err = platform_get_irq(pdev, 0); 511 if (irq_err < 0) 512 return -EINVAL; 513 514 base = devm_ioremap_resource(&pdev->dev, iores); 515 if (IS_ERR(base)) 516 return PTR_ERR(base); 517 518 clk = devm_clk_get(&pdev->dev, NULL); 519 if (IS_ERR(clk)) 520 return PTR_ERR(clk); 521 522 devid = (enum mxs_ssp_id) of_id->data; 523 ret = of_property_read_u32(np, "clock-frequency", 524 &clk_freq); 525 if (ret) 526 clk_freq = clk_freq_default; 527 528 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 529 if (!master) 530 return -ENOMEM; 531 532 master->transfer_one_message = mxs_spi_transfer_one; 533 master->setup = mxs_spi_setup; 534 master->bits_per_word_mask = SPI_BPW_MASK(8); 535 master->mode_bits = SPI_CPOL | SPI_CPHA; 536 master->num_chipselect = 3; 537 master->dev.of_node = np; 538 master->flags = SPI_MASTER_HALF_DUPLEX; 539 540 spi = spi_master_get_devdata(master); 541 ssp = &spi->ssp; 542 ssp->dev = &pdev->dev; 543 ssp->clk = clk; 544 ssp->base = base; 545 ssp->devid = devid; 546 547 init_completion(&spi->c); 548 549 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 550 DRIVER_NAME, ssp); 551 if (ret) 552 goto out_master_free; 553 554 ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); 555 if (!ssp->dmach) { 556 dev_err(ssp->dev, "Failed to request DMA\n"); 557 ret = -ENODEV; 558 goto out_master_free; 559 } 560 561 ret = clk_prepare_enable(ssp->clk); 562 if (ret) 563 goto out_dma_release; 564 565 clk_set_rate(ssp->clk, clk_freq); 566 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 567 568 ret = stmp_reset_block(ssp->base); 569 if (ret) 570 goto out_disable_clk; 571 572 platform_set_drvdata(pdev, master); 573 574 ret = spi_register_master(master); 575 if (ret) { 576 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 577 goto out_disable_clk; 578 } 579 580 return 0; 581 582 out_disable_clk: 583 clk_disable_unprepare(ssp->clk); 584 out_dma_release: 585 dma_release_channel(ssp->dmach); 586 out_master_free: 587 spi_master_put(master); 588 return ret; 589 } 590 591 static int mxs_spi_remove(struct platform_device *pdev) 592 { 593 struct spi_master *master; 594 struct mxs_spi *spi; 595 struct mxs_ssp *ssp; 596 597 master = spi_master_get(platform_get_drvdata(pdev)); 598 spi = spi_master_get_devdata(master); 599 ssp = &spi->ssp; 600 601 spi_unregister_master(master); 602 clk_disable_unprepare(ssp->clk); 603 dma_release_channel(ssp->dmach); 604 spi_master_put(master); 605 606 return 0; 607 } 608 609 static struct platform_driver mxs_spi_driver = { 610 .probe = mxs_spi_probe, 611 .remove = mxs_spi_remove, 612 .driver = { 613 .name = DRIVER_NAME, 614 .owner = THIS_MODULE, 615 .of_match_table = mxs_spi_dt_ids, 616 }, 617 }; 618 619 module_platform_driver(mxs_spi_driver); 620 621 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 622 MODULE_DESCRIPTION("MXS SPI master driver"); 623 MODULE_LICENSE("GPL"); 624 MODULE_ALIAS("platform:mxs-spi"); 625