1 /* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/clk.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/dmaengine.h> 19 #include <linux/err.h> 20 #include <linux/interrupt.h> 21 #include <linux/spi/spi.h> 22 #include <linux/slab.h> 23 #include <linux/platform_data/atmel.h> 24 #include <linux/platform_data/dma-atmel.h> 25 #include <linux/of.h> 26 27 #include <linux/io.h> 28 #include <linux/gpio.h> 29 30 /* SPI register offsets */ 31 #define SPI_CR 0x0000 32 #define SPI_MR 0x0004 33 #define SPI_RDR 0x0008 34 #define SPI_TDR 0x000c 35 #define SPI_SR 0x0010 36 #define SPI_IER 0x0014 37 #define SPI_IDR 0x0018 38 #define SPI_IMR 0x001c 39 #define SPI_CSR0 0x0030 40 #define SPI_CSR1 0x0034 41 #define SPI_CSR2 0x0038 42 #define SPI_CSR3 0x003c 43 #define SPI_VERSION 0x00fc 44 #define SPI_RPR 0x0100 45 #define SPI_RCR 0x0104 46 #define SPI_TPR 0x0108 47 #define SPI_TCR 0x010c 48 #define SPI_RNPR 0x0110 49 #define SPI_RNCR 0x0114 50 #define SPI_TNPR 0x0118 51 #define SPI_TNCR 0x011c 52 #define SPI_PTCR 0x0120 53 #define SPI_PTSR 0x0124 54 55 /* Bitfields in CR */ 56 #define SPI_SPIEN_OFFSET 0 57 #define SPI_SPIEN_SIZE 1 58 #define SPI_SPIDIS_OFFSET 1 59 #define SPI_SPIDIS_SIZE 1 60 #define SPI_SWRST_OFFSET 7 61 #define SPI_SWRST_SIZE 1 62 #define SPI_LASTXFER_OFFSET 24 63 #define SPI_LASTXFER_SIZE 1 64 65 /* Bitfields in MR */ 66 #define SPI_MSTR_OFFSET 0 67 #define SPI_MSTR_SIZE 1 68 #define SPI_PS_OFFSET 1 69 #define SPI_PS_SIZE 1 70 #define SPI_PCSDEC_OFFSET 2 71 #define SPI_PCSDEC_SIZE 1 72 #define SPI_FDIV_OFFSET 3 73 #define SPI_FDIV_SIZE 1 74 #define SPI_MODFDIS_OFFSET 4 75 #define SPI_MODFDIS_SIZE 1 76 #define SPI_WDRBT_OFFSET 5 77 #define SPI_WDRBT_SIZE 1 78 #define SPI_LLB_OFFSET 7 79 #define SPI_LLB_SIZE 1 80 #define SPI_PCS_OFFSET 16 81 #define SPI_PCS_SIZE 4 82 #define SPI_DLYBCS_OFFSET 24 83 #define SPI_DLYBCS_SIZE 8 84 85 /* Bitfields in RDR */ 86 #define SPI_RD_OFFSET 0 87 #define SPI_RD_SIZE 16 88 89 /* Bitfields in TDR */ 90 #define SPI_TD_OFFSET 0 91 #define SPI_TD_SIZE 16 92 93 /* Bitfields in SR */ 94 #define SPI_RDRF_OFFSET 0 95 #define SPI_RDRF_SIZE 1 96 #define SPI_TDRE_OFFSET 1 97 #define SPI_TDRE_SIZE 1 98 #define SPI_MODF_OFFSET 2 99 #define SPI_MODF_SIZE 1 100 #define SPI_OVRES_OFFSET 3 101 #define SPI_OVRES_SIZE 1 102 #define SPI_ENDRX_OFFSET 4 103 #define SPI_ENDRX_SIZE 1 104 #define SPI_ENDTX_OFFSET 5 105 #define SPI_ENDTX_SIZE 1 106 #define SPI_RXBUFF_OFFSET 6 107 #define SPI_RXBUFF_SIZE 1 108 #define SPI_TXBUFE_OFFSET 7 109 #define SPI_TXBUFE_SIZE 1 110 #define SPI_NSSR_OFFSET 8 111 #define SPI_NSSR_SIZE 1 112 #define SPI_TXEMPTY_OFFSET 9 113 #define SPI_TXEMPTY_SIZE 1 114 #define SPI_SPIENS_OFFSET 16 115 #define SPI_SPIENS_SIZE 1 116 117 /* Bitfields in CSR0 */ 118 #define SPI_CPOL_OFFSET 0 119 #define SPI_CPOL_SIZE 1 120 #define SPI_NCPHA_OFFSET 1 121 #define SPI_NCPHA_SIZE 1 122 #define SPI_CSAAT_OFFSET 3 123 #define SPI_CSAAT_SIZE 1 124 #define SPI_BITS_OFFSET 4 125 #define SPI_BITS_SIZE 4 126 #define SPI_SCBR_OFFSET 8 127 #define SPI_SCBR_SIZE 8 128 #define SPI_DLYBS_OFFSET 16 129 #define SPI_DLYBS_SIZE 8 130 #define SPI_DLYBCT_OFFSET 24 131 #define SPI_DLYBCT_SIZE 8 132 133 /* Bitfields in RCR */ 134 #define SPI_RXCTR_OFFSET 0 135 #define SPI_RXCTR_SIZE 16 136 137 /* Bitfields in TCR */ 138 #define SPI_TXCTR_OFFSET 0 139 #define SPI_TXCTR_SIZE 16 140 141 /* Bitfields in RNCR */ 142 #define SPI_RXNCR_OFFSET 0 143 #define SPI_RXNCR_SIZE 16 144 145 /* Bitfields in TNCR */ 146 #define SPI_TXNCR_OFFSET 0 147 #define SPI_TXNCR_SIZE 16 148 149 /* Bitfields in PTCR */ 150 #define SPI_RXTEN_OFFSET 0 151 #define SPI_RXTEN_SIZE 1 152 #define SPI_RXTDIS_OFFSET 1 153 #define SPI_RXTDIS_SIZE 1 154 #define SPI_TXTEN_OFFSET 8 155 #define SPI_TXTEN_SIZE 1 156 #define SPI_TXTDIS_OFFSET 9 157 #define SPI_TXTDIS_SIZE 1 158 159 /* Constants for BITS */ 160 #define SPI_BITS_8_BPT 0 161 #define SPI_BITS_9_BPT 1 162 #define SPI_BITS_10_BPT 2 163 #define SPI_BITS_11_BPT 3 164 #define SPI_BITS_12_BPT 4 165 #define SPI_BITS_13_BPT 5 166 #define SPI_BITS_14_BPT 6 167 #define SPI_BITS_15_BPT 7 168 #define SPI_BITS_16_BPT 8 169 170 /* Bit manipulation macros */ 171 #define SPI_BIT(name) \ 172 (1 << SPI_##name##_OFFSET) 173 #define SPI_BF(name,value) \ 174 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 175 #define SPI_BFEXT(name,value) \ 176 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 177 #define SPI_BFINS(name,value,old) \ 178 ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 179 | SPI_BF(name,value)) 180 181 /* Register access macros */ 182 #define spi_readl(port,reg) \ 183 __raw_readl((port)->regs + SPI_##reg) 184 #define spi_writel(port,reg,value) \ 185 __raw_writel((value), (port)->regs + SPI_##reg) 186 187 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 188 * cache operations; better heuristics consider wordsize and bitrate. 189 */ 190 #define DMA_MIN_BYTES 16 191 192 struct atmel_spi_dma { 193 struct dma_chan *chan_rx; 194 struct dma_chan *chan_tx; 195 struct scatterlist sgrx; 196 struct scatterlist sgtx; 197 struct dma_async_tx_descriptor *data_desc_rx; 198 struct dma_async_tx_descriptor *data_desc_tx; 199 200 struct at_dma_slave dma_slave; 201 }; 202 203 struct atmel_spi_caps { 204 bool is_spi2; 205 bool has_wdrbt; 206 bool has_dma_support; 207 }; 208 209 /* 210 * The core SPI transfer engine just talks to a register bank to set up 211 * DMA transfers; transfer queue progress is driven by IRQs. The clock 212 * framework provides the base clock, subdivided for each spi_device. 213 */ 214 struct atmel_spi { 215 spinlock_t lock; 216 unsigned long flags; 217 218 phys_addr_t phybase; 219 void __iomem *regs; 220 int irq; 221 struct clk *clk; 222 struct platform_device *pdev; 223 struct spi_device *stay; 224 225 u8 stopping; 226 struct list_head queue; 227 struct tasklet_struct tasklet; 228 struct spi_transfer *current_transfer; 229 unsigned long current_remaining_bytes; 230 struct spi_transfer *next_transfer; 231 unsigned long next_remaining_bytes; 232 int done_status; 233 234 /* scratch buffer */ 235 void *buffer; 236 dma_addr_t buffer_dma; 237 238 struct atmel_spi_caps caps; 239 240 bool use_dma; 241 bool use_pdc; 242 /* dmaengine data */ 243 struct atmel_spi_dma dma; 244 }; 245 246 /* Controller-specific per-slave state */ 247 struct atmel_spi_device { 248 unsigned int npcs_pin; 249 u32 csr; 250 }; 251 252 #define BUFFER_SIZE PAGE_SIZE 253 #define INVALID_DMA_ADDRESS 0xffffffff 254 255 /* 256 * Version 2 of the SPI controller has 257 * - CR.LASTXFER 258 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 259 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 260 * - SPI_CSRx.CSAAT 261 * - SPI_CSRx.SBCR allows faster clocking 262 */ 263 static bool atmel_spi_is_v2(struct atmel_spi *as) 264 { 265 return as->caps.is_spi2; 266 } 267 268 /* 269 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 270 * they assume that spi slave device state will not change on deselect, so 271 * that automagic deselection is OK. ("NPCSx rises if no data is to be 272 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 273 * controllers have CSAAT and friends. 274 * 275 * Since the CSAAT functionality is a bit weird on newer controllers as 276 * well, we use GPIO to control nCSx pins on all controllers, updating 277 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 278 * support active-high chipselects despite the controller's belief that 279 * only active-low devices/systems exists. 280 * 281 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 282 * right when driven with GPIO. ("Mode Fault does not allow more than one 283 * Master on Chip Select 0.") No workaround exists for that ... so for 284 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 285 * and (c) will trigger that first erratum in some cases. 286 */ 287 288 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 289 { 290 struct atmel_spi_device *asd = spi->controller_state; 291 unsigned active = spi->mode & SPI_CS_HIGH; 292 u32 mr; 293 294 if (atmel_spi_is_v2(as)) { 295 spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr); 296 /* For the low SPI version, there is a issue that PDC transfer 297 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS 298 */ 299 spi_writel(as, CSR0, asd->csr); 300 if (as->caps.has_wdrbt) { 301 spi_writel(as, MR, 302 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 303 | SPI_BIT(WDRBT) 304 | SPI_BIT(MODFDIS) 305 | SPI_BIT(MSTR)); 306 } else { 307 spi_writel(as, MR, 308 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 309 | SPI_BIT(MODFDIS) 310 | SPI_BIT(MSTR)); 311 } 312 313 mr = spi_readl(as, MR); 314 gpio_set_value(asd->npcs_pin, active); 315 } else { 316 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 317 int i; 318 u32 csr; 319 320 /* Make sure clock polarity is correct */ 321 for (i = 0; i < spi->master->num_chipselect; i++) { 322 csr = spi_readl(as, CSR0 + 4 * i); 323 if ((csr ^ cpol) & SPI_BIT(CPOL)) 324 spi_writel(as, CSR0 + 4 * i, 325 csr ^ SPI_BIT(CPOL)); 326 } 327 328 mr = spi_readl(as, MR); 329 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 330 if (spi->chip_select != 0) 331 gpio_set_value(asd->npcs_pin, active); 332 spi_writel(as, MR, mr); 333 } 334 335 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 336 asd->npcs_pin, active ? " (high)" : "", 337 mr); 338 } 339 340 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 341 { 342 struct atmel_spi_device *asd = spi->controller_state; 343 unsigned active = spi->mode & SPI_CS_HIGH; 344 u32 mr; 345 346 /* only deactivate *this* device; sometimes transfers to 347 * another device may be active when this routine is called. 348 */ 349 mr = spi_readl(as, MR); 350 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 351 mr = SPI_BFINS(PCS, 0xf, mr); 352 spi_writel(as, MR, mr); 353 } 354 355 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 356 asd->npcs_pin, active ? " (low)" : "", 357 mr); 358 359 if (atmel_spi_is_v2(as) || spi->chip_select != 0) 360 gpio_set_value(asd->npcs_pin, !active); 361 } 362 363 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) 364 { 365 spin_lock_irqsave(&as->lock, as->flags); 366 } 367 368 static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock) 369 { 370 spin_unlock_irqrestore(&as->lock, as->flags); 371 } 372 373 static inline bool atmel_spi_use_dma(struct atmel_spi *as, 374 struct spi_transfer *xfer) 375 { 376 return as->use_dma && xfer->len >= DMA_MIN_BYTES; 377 } 378 379 static inline int atmel_spi_xfer_is_last(struct spi_message *msg, 380 struct spi_transfer *xfer) 381 { 382 return msg->transfers.prev == &xfer->transfer_list; 383 } 384 385 static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) 386 { 387 return xfer->delay_usecs == 0 && !xfer->cs_change; 388 } 389 390 static int atmel_spi_dma_slave_config(struct atmel_spi *as, 391 struct dma_slave_config *slave_config, 392 u8 bits_per_word) 393 { 394 int err = 0; 395 396 if (bits_per_word > 8) { 397 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 398 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 399 } else { 400 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 401 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 402 } 403 404 slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; 405 slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; 406 slave_config->src_maxburst = 1; 407 slave_config->dst_maxburst = 1; 408 slave_config->device_fc = false; 409 410 slave_config->direction = DMA_MEM_TO_DEV; 411 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 412 dev_err(&as->pdev->dev, 413 "failed to configure tx dma channel\n"); 414 err = -EINVAL; 415 } 416 417 slave_config->direction = DMA_DEV_TO_MEM; 418 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 419 dev_err(&as->pdev->dev, 420 "failed to configure rx dma channel\n"); 421 err = -EINVAL; 422 } 423 424 return err; 425 } 426 427 static bool filter(struct dma_chan *chan, void *pdata) 428 { 429 struct atmel_spi_dma *sl_pdata = pdata; 430 struct at_dma_slave *sl; 431 432 if (!sl_pdata) 433 return false; 434 435 sl = &sl_pdata->dma_slave; 436 if (sl->dma_dev == chan->device->dev) { 437 chan->private = sl; 438 return true; 439 } else { 440 return false; 441 } 442 } 443 444 static int atmel_spi_configure_dma(struct atmel_spi *as) 445 { 446 struct dma_slave_config slave_config; 447 struct device *dev = &as->pdev->dev; 448 int err; 449 450 dma_cap_mask_t mask; 451 dma_cap_zero(mask); 452 dma_cap_set(DMA_SLAVE, mask); 453 454 as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter, 455 &as->dma, 456 dev, "tx"); 457 if (!as->dma.chan_tx) { 458 dev_err(dev, 459 "DMA TX channel not available, SPI unable to use DMA\n"); 460 err = -EBUSY; 461 goto error; 462 } 463 464 as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter, 465 &as->dma, 466 dev, "rx"); 467 468 if (!as->dma.chan_rx) { 469 dev_err(dev, 470 "DMA RX channel not available, SPI unable to use DMA\n"); 471 err = -EBUSY; 472 goto error; 473 } 474 475 err = atmel_spi_dma_slave_config(as, &slave_config, 8); 476 if (err) 477 goto error; 478 479 dev_info(&as->pdev->dev, 480 "Using %s (tx) and %s (rx) for DMA transfers\n", 481 dma_chan_name(as->dma.chan_tx), 482 dma_chan_name(as->dma.chan_rx)); 483 return 0; 484 error: 485 if (as->dma.chan_rx) 486 dma_release_channel(as->dma.chan_rx); 487 if (as->dma.chan_tx) 488 dma_release_channel(as->dma.chan_tx); 489 return err; 490 } 491 492 static void atmel_spi_stop_dma(struct atmel_spi *as) 493 { 494 if (as->dma.chan_rx) 495 as->dma.chan_rx->device->device_control(as->dma.chan_rx, 496 DMA_TERMINATE_ALL, 0); 497 if (as->dma.chan_tx) 498 as->dma.chan_tx->device->device_control(as->dma.chan_tx, 499 DMA_TERMINATE_ALL, 0); 500 } 501 502 static void atmel_spi_release_dma(struct atmel_spi *as) 503 { 504 if (as->dma.chan_rx) 505 dma_release_channel(as->dma.chan_rx); 506 if (as->dma.chan_tx) 507 dma_release_channel(as->dma.chan_tx); 508 } 509 510 /* This function is called by the DMA driver from tasklet context */ 511 static void dma_callback(void *data) 512 { 513 struct spi_master *master = data; 514 struct atmel_spi *as = spi_master_get_devdata(master); 515 516 /* trigger SPI tasklet */ 517 tasklet_schedule(&as->tasklet); 518 } 519 520 /* 521 * Next transfer using PIO. 522 * lock is held, spi tasklet is blocked 523 */ 524 static void atmel_spi_next_xfer_pio(struct spi_master *master, 525 struct spi_transfer *xfer) 526 { 527 struct atmel_spi *as = spi_master_get_devdata(master); 528 529 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); 530 531 as->current_remaining_bytes = xfer->len; 532 533 /* Make sure data is not remaining in RDR */ 534 spi_readl(as, RDR); 535 while (spi_readl(as, SR) & SPI_BIT(RDRF)) { 536 spi_readl(as, RDR); 537 cpu_relax(); 538 } 539 540 if (xfer->tx_buf) 541 if (xfer->bits_per_word > 8) 542 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); 543 else 544 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 545 else 546 spi_writel(as, TDR, 0); 547 548 dev_dbg(master->dev.parent, 549 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", 550 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, 551 xfer->bits_per_word); 552 553 /* Enable relevant interrupts */ 554 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 555 } 556 557 /* 558 * Submit next transfer for DMA. 559 * lock is held, spi tasklet is blocked 560 */ 561 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, 562 struct spi_transfer *xfer, 563 u32 *plen) 564 { 565 struct atmel_spi *as = spi_master_get_devdata(master); 566 struct dma_chan *rxchan = as->dma.chan_rx; 567 struct dma_chan *txchan = as->dma.chan_tx; 568 struct dma_async_tx_descriptor *rxdesc; 569 struct dma_async_tx_descriptor *txdesc; 570 struct dma_slave_config slave_config; 571 dma_cookie_t cookie; 572 u32 len = *plen; 573 574 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); 575 576 /* Check that the channels are available */ 577 if (!rxchan || !txchan) 578 return -ENODEV; 579 580 /* release lock for DMA operations */ 581 atmel_spi_unlock(as); 582 583 /* prepare the RX dma transfer */ 584 sg_init_table(&as->dma.sgrx, 1); 585 if (xfer->rx_buf) { 586 as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; 587 } else { 588 as->dma.sgrx.dma_address = as->buffer_dma; 589 if (len > BUFFER_SIZE) 590 len = BUFFER_SIZE; 591 } 592 593 /* prepare the TX dma transfer */ 594 sg_init_table(&as->dma.sgtx, 1); 595 if (xfer->tx_buf) { 596 as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; 597 } else { 598 as->dma.sgtx.dma_address = as->buffer_dma; 599 if (len > BUFFER_SIZE) 600 len = BUFFER_SIZE; 601 memset(as->buffer, 0, len); 602 } 603 604 sg_dma_len(&as->dma.sgtx) = len; 605 sg_dma_len(&as->dma.sgrx) = len; 606 607 *plen = len; 608 609 if (atmel_spi_dma_slave_config(as, &slave_config, 8)) 610 goto err_exit; 611 612 /* Send both scatterlists */ 613 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 614 &as->dma.sgrx, 615 1, 616 DMA_FROM_DEVICE, 617 DMA_PREP_INTERRUPT | DMA_CTRL_ACK, 618 NULL); 619 if (!rxdesc) 620 goto err_dma; 621 622 txdesc = txchan->device->device_prep_slave_sg(txchan, 623 &as->dma.sgtx, 624 1, 625 DMA_TO_DEVICE, 626 DMA_PREP_INTERRUPT | DMA_CTRL_ACK, 627 NULL); 628 if (!txdesc) 629 goto err_dma; 630 631 dev_dbg(master->dev.parent, 632 " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 633 xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma, 634 xfer->rx_buf, (unsigned long long)xfer->rx_dma); 635 636 /* Enable relevant interrupts */ 637 spi_writel(as, IER, SPI_BIT(OVRES)); 638 639 /* Put the callback on the RX transfer only, that should finish last */ 640 rxdesc->callback = dma_callback; 641 rxdesc->callback_param = master; 642 643 /* Submit and fire RX and TX with TX last so we're ready to read! */ 644 cookie = rxdesc->tx_submit(rxdesc); 645 if (dma_submit_error(cookie)) 646 goto err_dma; 647 cookie = txdesc->tx_submit(txdesc); 648 if (dma_submit_error(cookie)) 649 goto err_dma; 650 rxchan->device->device_issue_pending(rxchan); 651 txchan->device->device_issue_pending(txchan); 652 653 /* take back lock */ 654 atmel_spi_lock(as); 655 return 0; 656 657 err_dma: 658 spi_writel(as, IDR, SPI_BIT(OVRES)); 659 atmel_spi_stop_dma(as); 660 err_exit: 661 atmel_spi_lock(as); 662 return -ENOMEM; 663 } 664 665 static void atmel_spi_next_xfer_data(struct spi_master *master, 666 struct spi_transfer *xfer, 667 dma_addr_t *tx_dma, 668 dma_addr_t *rx_dma, 669 u32 *plen) 670 { 671 struct atmel_spi *as = spi_master_get_devdata(master); 672 u32 len = *plen; 673 674 /* use scratch buffer only when rx or tx data is unspecified */ 675 if (xfer->rx_buf) 676 *rx_dma = xfer->rx_dma + xfer->len - *plen; 677 else { 678 *rx_dma = as->buffer_dma; 679 if (len > BUFFER_SIZE) 680 len = BUFFER_SIZE; 681 } 682 683 if (xfer->tx_buf) 684 *tx_dma = xfer->tx_dma + xfer->len - *plen; 685 else { 686 *tx_dma = as->buffer_dma; 687 if (len > BUFFER_SIZE) 688 len = BUFFER_SIZE; 689 memset(as->buffer, 0, len); 690 dma_sync_single_for_device(&as->pdev->dev, 691 as->buffer_dma, len, DMA_TO_DEVICE); 692 } 693 694 *plen = len; 695 } 696 697 /* 698 * Submit next transfer for PDC. 699 * lock is held, spi irq is blocked 700 */ 701 static void atmel_spi_pdc_next_xfer(struct spi_master *master, 702 struct spi_message *msg) 703 { 704 struct atmel_spi *as = spi_master_get_devdata(master); 705 struct spi_transfer *xfer; 706 u32 len, remaining; 707 u32 ieval; 708 dma_addr_t tx_dma, rx_dma; 709 710 if (!as->current_transfer) 711 xfer = list_entry(msg->transfers.next, 712 struct spi_transfer, transfer_list); 713 else if (!as->next_transfer) 714 xfer = list_entry(as->current_transfer->transfer_list.next, 715 struct spi_transfer, transfer_list); 716 else 717 xfer = NULL; 718 719 if (xfer) { 720 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 721 722 len = xfer->len; 723 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 724 remaining = xfer->len - len; 725 726 spi_writel(as, RPR, rx_dma); 727 spi_writel(as, TPR, tx_dma); 728 729 if (msg->spi->bits_per_word > 8) 730 len >>= 1; 731 spi_writel(as, RCR, len); 732 spi_writel(as, TCR, len); 733 734 dev_dbg(&msg->spi->dev, 735 " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 736 xfer, xfer->len, xfer->tx_buf, 737 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 738 (unsigned long long)xfer->rx_dma); 739 } else { 740 xfer = as->next_transfer; 741 remaining = as->next_remaining_bytes; 742 } 743 744 as->current_transfer = xfer; 745 as->current_remaining_bytes = remaining; 746 747 if (remaining > 0) 748 len = remaining; 749 else if (!atmel_spi_xfer_is_last(msg, xfer) 750 && atmel_spi_xfer_can_be_chained(xfer)) { 751 xfer = list_entry(xfer->transfer_list.next, 752 struct spi_transfer, transfer_list); 753 len = xfer->len; 754 } else 755 xfer = NULL; 756 757 as->next_transfer = xfer; 758 759 if (xfer) { 760 u32 total; 761 762 total = len; 763 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 764 as->next_remaining_bytes = total - len; 765 766 spi_writel(as, RNPR, rx_dma); 767 spi_writel(as, TNPR, tx_dma); 768 769 if (msg->spi->bits_per_word > 8) 770 len >>= 1; 771 spi_writel(as, RNCR, len); 772 spi_writel(as, TNCR, len); 773 774 dev_dbg(&msg->spi->dev, 775 " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 776 xfer, xfer->len, xfer->tx_buf, 777 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 778 (unsigned long long)xfer->rx_dma); 779 ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); 780 } else { 781 spi_writel(as, RNCR, 0); 782 spi_writel(as, TNCR, 0); 783 ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); 784 } 785 786 /* REVISIT: We're waiting for ENDRX before we start the next 787 * transfer because we need to handle some difficult timing 788 * issues otherwise. If we wait for ENDTX in one transfer and 789 * then starts waiting for ENDRX in the next, it's difficult 790 * to tell the difference between the ENDRX interrupt we're 791 * actually waiting for and the ENDRX interrupt of the 792 * previous transfer. 793 * 794 * It should be doable, though. Just not now... 795 */ 796 spi_writel(as, IER, ieval); 797 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 798 } 799 800 /* 801 * Choose way to submit next transfer and start it. 802 * lock is held, spi tasklet is blocked 803 */ 804 static void atmel_spi_dma_next_xfer(struct spi_master *master, 805 struct spi_message *msg) 806 { 807 struct atmel_spi *as = spi_master_get_devdata(master); 808 struct spi_transfer *xfer; 809 u32 remaining, len; 810 811 remaining = as->current_remaining_bytes; 812 if (remaining) { 813 xfer = as->current_transfer; 814 len = remaining; 815 } else { 816 if (!as->current_transfer) 817 xfer = list_entry(msg->transfers.next, 818 struct spi_transfer, transfer_list); 819 else 820 xfer = list_entry( 821 as->current_transfer->transfer_list.next, 822 struct spi_transfer, transfer_list); 823 824 as->current_transfer = xfer; 825 len = xfer->len; 826 } 827 828 if (atmel_spi_use_dma(as, xfer)) { 829 u32 total = len; 830 if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) { 831 as->current_remaining_bytes = total - len; 832 return; 833 } else { 834 dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n"); 835 } 836 } 837 838 /* use PIO if error appened using DMA */ 839 atmel_spi_next_xfer_pio(master, xfer); 840 } 841 842 static void atmel_spi_next_message(struct spi_master *master) 843 { 844 struct atmel_spi *as = spi_master_get_devdata(master); 845 struct spi_message *msg; 846 struct spi_device *spi; 847 848 BUG_ON(as->current_transfer); 849 850 msg = list_entry(as->queue.next, struct spi_message, queue); 851 spi = msg->spi; 852 853 dev_dbg(master->dev.parent, "start message %p for %s\n", 854 msg, dev_name(&spi->dev)); 855 856 /* select chip if it's not still active */ 857 if (as->stay) { 858 if (as->stay != spi) { 859 cs_deactivate(as, as->stay); 860 cs_activate(as, spi); 861 } 862 as->stay = NULL; 863 } else 864 cs_activate(as, spi); 865 866 if (as->use_pdc) 867 atmel_spi_pdc_next_xfer(master, msg); 868 else 869 atmel_spi_dma_next_xfer(master, msg); 870 } 871 872 /* 873 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 874 * - The buffer is either valid for CPU access, else NULL 875 * - If the buffer is valid, so is its DMA address 876 * 877 * This driver manages the dma address unless message->is_dma_mapped. 878 */ 879 static int 880 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 881 { 882 struct device *dev = &as->pdev->dev; 883 884 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 885 if (xfer->tx_buf) { 886 /* tx_buf is a const void* where we need a void * for the dma 887 * mapping */ 888 void *nonconst_tx = (void *)xfer->tx_buf; 889 890 xfer->tx_dma = dma_map_single(dev, 891 nonconst_tx, xfer->len, 892 DMA_TO_DEVICE); 893 if (dma_mapping_error(dev, xfer->tx_dma)) 894 return -ENOMEM; 895 } 896 if (xfer->rx_buf) { 897 xfer->rx_dma = dma_map_single(dev, 898 xfer->rx_buf, xfer->len, 899 DMA_FROM_DEVICE); 900 if (dma_mapping_error(dev, xfer->rx_dma)) { 901 if (xfer->tx_buf) 902 dma_unmap_single(dev, 903 xfer->tx_dma, xfer->len, 904 DMA_TO_DEVICE); 905 return -ENOMEM; 906 } 907 } 908 return 0; 909 } 910 911 static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 912 struct spi_transfer *xfer) 913 { 914 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 915 dma_unmap_single(master->dev.parent, xfer->tx_dma, 916 xfer->len, DMA_TO_DEVICE); 917 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 918 dma_unmap_single(master->dev.parent, xfer->rx_dma, 919 xfer->len, DMA_FROM_DEVICE); 920 } 921 922 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) 923 { 924 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 925 } 926 927 static void 928 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 929 struct spi_message *msg, int stay) 930 { 931 if (!stay || as->done_status < 0) 932 cs_deactivate(as, msg->spi); 933 else 934 as->stay = msg->spi; 935 936 list_del(&msg->queue); 937 msg->status = as->done_status; 938 939 dev_dbg(master->dev.parent, 940 "xfer complete: %u bytes transferred\n", 941 msg->actual_length); 942 943 atmel_spi_unlock(as); 944 msg->complete(msg->context); 945 atmel_spi_lock(as); 946 947 as->current_transfer = NULL; 948 as->next_transfer = NULL; 949 as->done_status = 0; 950 951 /* continue if needed */ 952 if (list_empty(&as->queue) || as->stopping) { 953 if (as->use_pdc) 954 atmel_spi_disable_pdc_transfer(as); 955 } else { 956 atmel_spi_next_message(master); 957 } 958 } 959 960 /* Called from IRQ 961 * lock is held 962 * 963 * Must update "current_remaining_bytes" to keep track of data 964 * to transfer. 965 */ 966 static void 967 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) 968 { 969 u8 *txp; 970 u8 *rxp; 971 u16 *txp16; 972 u16 *rxp16; 973 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 974 975 if (xfer->rx_buf) { 976 if (xfer->bits_per_word > 8) { 977 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 978 *rxp16 = spi_readl(as, RDR); 979 } else { 980 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 981 *rxp = spi_readl(as, RDR); 982 } 983 } else { 984 spi_readl(as, RDR); 985 } 986 if (xfer->bits_per_word > 8) { 987 as->current_remaining_bytes -= 2; 988 if (as->current_remaining_bytes < 0) 989 as->current_remaining_bytes = 0; 990 } else { 991 as->current_remaining_bytes--; 992 } 993 994 if (as->current_remaining_bytes) { 995 if (xfer->tx_buf) { 996 if (xfer->bits_per_word > 8) { 997 txp16 = (u16 *)(((u8 *)xfer->tx_buf) 998 + xfer_pos + 2); 999 spi_writel(as, TDR, *txp16); 1000 } else { 1001 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 1002 spi_writel(as, TDR, *txp); 1003 } 1004 } else { 1005 spi_writel(as, TDR, 0); 1006 } 1007 } 1008 } 1009 1010 /* Tasklet 1011 * Called from DMA callback + pio transfer and overrun IRQ. 1012 */ 1013 static void atmel_spi_tasklet_func(unsigned long data) 1014 { 1015 struct spi_master *master = (struct spi_master *)data; 1016 struct atmel_spi *as = spi_master_get_devdata(master); 1017 struct spi_message *msg; 1018 struct spi_transfer *xfer; 1019 1020 dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n"); 1021 1022 atmel_spi_lock(as); 1023 1024 xfer = as->current_transfer; 1025 1026 if (xfer == NULL) 1027 /* already been there */ 1028 goto tasklet_out; 1029 1030 msg = list_entry(as->queue.next, struct spi_message, queue); 1031 1032 if (as->current_remaining_bytes == 0) { 1033 if (as->done_status < 0) { 1034 /* error happened (overrun) */ 1035 if (atmel_spi_use_dma(as, xfer)) 1036 atmel_spi_stop_dma(as); 1037 } else { 1038 /* only update length if no error */ 1039 msg->actual_length += xfer->len; 1040 } 1041 1042 if (atmel_spi_use_dma(as, xfer)) 1043 if (!msg->is_dma_mapped) 1044 atmel_spi_dma_unmap_xfer(master, xfer); 1045 1046 if (xfer->delay_usecs) 1047 udelay(xfer->delay_usecs); 1048 1049 if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) { 1050 /* report completed (or erroneous) message */ 1051 atmel_spi_msg_done(master, as, msg, xfer->cs_change); 1052 } else { 1053 if (xfer->cs_change) { 1054 cs_deactivate(as, msg->spi); 1055 udelay(1); 1056 cs_activate(as, msg->spi); 1057 } 1058 1059 /* 1060 * Not done yet. Submit the next transfer. 1061 * 1062 * FIXME handle protocol options for xfer 1063 */ 1064 atmel_spi_dma_next_xfer(master, msg); 1065 } 1066 } else { 1067 /* 1068 * Keep going, we still have data to send in 1069 * the current transfer. 1070 */ 1071 atmel_spi_dma_next_xfer(master, msg); 1072 } 1073 1074 tasklet_out: 1075 atmel_spi_unlock(as); 1076 } 1077 1078 /* Interrupt 1079 * 1080 * No need for locking in this Interrupt handler: done_status is the 1081 * only information modified. What we need is the update of this field 1082 * before tasklet runs. This is ensured by using barrier. 1083 */ 1084 static irqreturn_t 1085 atmel_spi_pio_interrupt(int irq, void *dev_id) 1086 { 1087 struct spi_master *master = dev_id; 1088 struct atmel_spi *as = spi_master_get_devdata(master); 1089 u32 status, pending, imr; 1090 struct spi_transfer *xfer; 1091 int ret = IRQ_NONE; 1092 1093 imr = spi_readl(as, IMR); 1094 status = spi_readl(as, SR); 1095 pending = status & imr; 1096 1097 if (pending & SPI_BIT(OVRES)) { 1098 ret = IRQ_HANDLED; 1099 spi_writel(as, IDR, SPI_BIT(OVRES)); 1100 dev_warn(master->dev.parent, "overrun\n"); 1101 1102 /* 1103 * When we get an overrun, we disregard the current 1104 * transfer. Data will not be copied back from any 1105 * bounce buffer and msg->actual_len will not be 1106 * updated with the last xfer. 1107 * 1108 * We will also not process any remaning transfers in 1109 * the message. 1110 * 1111 * All actions are done in tasklet with done_status indication 1112 */ 1113 as->done_status = -EIO; 1114 smp_wmb(); 1115 1116 /* Clear any overrun happening while cleaning up */ 1117 spi_readl(as, SR); 1118 1119 tasklet_schedule(&as->tasklet); 1120 1121 } else if (pending & SPI_BIT(RDRF)) { 1122 atmel_spi_lock(as); 1123 1124 if (as->current_remaining_bytes) { 1125 ret = IRQ_HANDLED; 1126 xfer = as->current_transfer; 1127 atmel_spi_pump_pio_data(as, xfer); 1128 if (!as->current_remaining_bytes) { 1129 /* no more data to xfer, kick tasklet */ 1130 spi_writel(as, IDR, pending); 1131 tasklet_schedule(&as->tasklet); 1132 } 1133 } 1134 1135 atmel_spi_unlock(as); 1136 } else { 1137 WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); 1138 ret = IRQ_HANDLED; 1139 spi_writel(as, IDR, pending); 1140 } 1141 1142 return ret; 1143 } 1144 1145 static irqreturn_t 1146 atmel_spi_pdc_interrupt(int irq, void *dev_id) 1147 { 1148 struct spi_master *master = dev_id; 1149 struct atmel_spi *as = spi_master_get_devdata(master); 1150 struct spi_message *msg; 1151 struct spi_transfer *xfer; 1152 u32 status, pending, imr; 1153 int ret = IRQ_NONE; 1154 1155 atmel_spi_lock(as); 1156 1157 xfer = as->current_transfer; 1158 msg = list_entry(as->queue.next, struct spi_message, queue); 1159 1160 imr = spi_readl(as, IMR); 1161 status = spi_readl(as, SR); 1162 pending = status & imr; 1163 1164 if (pending & SPI_BIT(OVRES)) { 1165 int timeout; 1166 1167 ret = IRQ_HANDLED; 1168 1169 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) 1170 | SPI_BIT(OVRES))); 1171 1172 /* 1173 * When we get an overrun, we disregard the current 1174 * transfer. Data will not be copied back from any 1175 * bounce buffer and msg->actual_len will not be 1176 * updated with the last xfer. 1177 * 1178 * We will also not process any remaning transfers in 1179 * the message. 1180 * 1181 * First, stop the transfer and unmap the DMA buffers. 1182 */ 1183 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1184 if (!msg->is_dma_mapped) 1185 atmel_spi_dma_unmap_xfer(master, xfer); 1186 1187 /* REVISIT: udelay in irq is unfriendly */ 1188 if (xfer->delay_usecs) 1189 udelay(xfer->delay_usecs); 1190 1191 dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", 1192 spi_readl(as, TCR), spi_readl(as, RCR)); 1193 1194 /* 1195 * Clean up DMA registers and make sure the data 1196 * registers are empty. 1197 */ 1198 spi_writel(as, RNCR, 0); 1199 spi_writel(as, TNCR, 0); 1200 spi_writel(as, RCR, 0); 1201 spi_writel(as, TCR, 0); 1202 for (timeout = 1000; timeout; timeout--) 1203 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 1204 break; 1205 if (!timeout) 1206 dev_warn(master->dev.parent, 1207 "timeout waiting for TXEMPTY"); 1208 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 1209 spi_readl(as, RDR); 1210 1211 /* Clear any overrun happening while cleaning up */ 1212 spi_readl(as, SR); 1213 1214 as->done_status = -EIO; 1215 atmel_spi_msg_done(master, as, msg, 0); 1216 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { 1217 ret = IRQ_HANDLED; 1218 1219 spi_writel(as, IDR, pending); 1220 1221 if (as->current_remaining_bytes == 0) { 1222 msg->actual_length += xfer->len; 1223 1224 if (!msg->is_dma_mapped) 1225 atmel_spi_dma_unmap_xfer(master, xfer); 1226 1227 /* REVISIT: udelay in irq is unfriendly */ 1228 if (xfer->delay_usecs) 1229 udelay(xfer->delay_usecs); 1230 1231 if (atmel_spi_xfer_is_last(msg, xfer)) { 1232 /* report completed message */ 1233 atmel_spi_msg_done(master, as, msg, 1234 xfer->cs_change); 1235 } else { 1236 if (xfer->cs_change) { 1237 cs_deactivate(as, msg->spi); 1238 udelay(1); 1239 cs_activate(as, msg->spi); 1240 } 1241 1242 /* 1243 * Not done yet. Submit the next transfer. 1244 * 1245 * FIXME handle protocol options for xfer 1246 */ 1247 atmel_spi_pdc_next_xfer(master, msg); 1248 } 1249 } else { 1250 /* 1251 * Keep going, we still have data to send in 1252 * the current transfer. 1253 */ 1254 atmel_spi_pdc_next_xfer(master, msg); 1255 } 1256 } 1257 1258 atmel_spi_unlock(as); 1259 1260 return ret; 1261 } 1262 1263 static int atmel_spi_setup(struct spi_device *spi) 1264 { 1265 struct atmel_spi *as; 1266 struct atmel_spi_device *asd; 1267 u32 scbr, csr; 1268 unsigned int bits = spi->bits_per_word; 1269 unsigned long bus_hz; 1270 unsigned int npcs_pin; 1271 int ret; 1272 1273 as = spi_master_get_devdata(spi->master); 1274 1275 if (as->stopping) 1276 return -ESHUTDOWN; 1277 1278 if (spi->chip_select > spi->master->num_chipselect) { 1279 dev_dbg(&spi->dev, 1280 "setup: invalid chipselect %u (%u defined)\n", 1281 spi->chip_select, spi->master->num_chipselect); 1282 return -EINVAL; 1283 } 1284 1285 /* see notes above re chipselect */ 1286 if (!atmel_spi_is_v2(as) 1287 && spi->chip_select == 0 1288 && (spi->mode & SPI_CS_HIGH)) { 1289 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 1290 return -EINVAL; 1291 } 1292 1293 /* v1 chips start out at half the peripheral bus speed. */ 1294 bus_hz = clk_get_rate(as->clk); 1295 if (!atmel_spi_is_v2(as)) 1296 bus_hz /= 2; 1297 1298 if (spi->max_speed_hz) { 1299 /* 1300 * Calculate the lowest divider that satisfies the 1301 * constraint, assuming div32/fdiv/mbz == 0. 1302 */ 1303 scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); 1304 1305 /* 1306 * If the resulting divider doesn't fit into the 1307 * register bitfield, we can't satisfy the constraint. 1308 */ 1309 if (scbr >= (1 << SPI_SCBR_SIZE)) { 1310 dev_dbg(&spi->dev, 1311 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 1312 spi->max_speed_hz, scbr, bus_hz/255); 1313 return -EINVAL; 1314 } 1315 } else 1316 /* speed zero means "as slow as possible" */ 1317 scbr = 0xff; 1318 1319 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); 1320 if (spi->mode & SPI_CPOL) 1321 csr |= SPI_BIT(CPOL); 1322 if (!(spi->mode & SPI_CPHA)) 1323 csr |= SPI_BIT(NCPHA); 1324 1325 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 1326 * 1327 * DLYBCT would add delays between words, slowing down transfers. 1328 * It could potentially be useful to cope with DMA bottlenecks, but 1329 * in those cases it's probably best to just use a lower bitrate. 1330 */ 1331 csr |= SPI_BF(DLYBS, 0); 1332 csr |= SPI_BF(DLYBCT, 0); 1333 1334 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 1335 npcs_pin = (unsigned int)spi->controller_data; 1336 1337 if (gpio_is_valid(spi->cs_gpio)) 1338 npcs_pin = spi->cs_gpio; 1339 1340 asd = spi->controller_state; 1341 if (!asd) { 1342 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 1343 if (!asd) 1344 return -ENOMEM; 1345 1346 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 1347 if (ret) { 1348 kfree(asd); 1349 return ret; 1350 } 1351 1352 asd->npcs_pin = npcs_pin; 1353 spi->controller_state = asd; 1354 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 1355 } else { 1356 atmel_spi_lock(as); 1357 if (as->stay == spi) 1358 as->stay = NULL; 1359 cs_deactivate(as, spi); 1360 atmel_spi_unlock(as); 1361 } 1362 1363 asd->csr = csr; 1364 1365 dev_dbg(&spi->dev, 1366 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", 1367 bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); 1368 1369 if (!atmel_spi_is_v2(as)) 1370 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 1371 1372 return 0; 1373 } 1374 1375 static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) 1376 { 1377 struct atmel_spi *as; 1378 struct spi_transfer *xfer; 1379 struct device *controller = spi->master->dev.parent; 1380 u8 bits; 1381 struct atmel_spi_device *asd; 1382 1383 as = spi_master_get_devdata(spi->master); 1384 1385 dev_dbg(controller, "new message %p submitted for %s\n", 1386 msg, dev_name(&spi->dev)); 1387 1388 if (unlikely(list_empty(&msg->transfers))) 1389 return -EINVAL; 1390 1391 if (as->stopping) 1392 return -ESHUTDOWN; 1393 1394 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1395 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1396 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 1397 return -EINVAL; 1398 } 1399 1400 if (xfer->bits_per_word) { 1401 asd = spi->controller_state; 1402 bits = (asd->csr >> 4) & 0xf; 1403 if (bits != xfer->bits_per_word - 8) { 1404 dev_dbg(&spi->dev, "you can't yet change " 1405 "bits_per_word in transfers\n"); 1406 return -ENOPROTOOPT; 1407 } 1408 } 1409 1410 if (xfer->bits_per_word > 8) { 1411 if (xfer->len % 2) { 1412 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); 1413 return -EINVAL; 1414 } 1415 } 1416 1417 /* FIXME implement these protocol options!! */ 1418 if (xfer->speed_hz < spi->max_speed_hz) { 1419 dev_dbg(&spi->dev, "can't change speed in transfer\n"); 1420 return -ENOPROTOOPT; 1421 } 1422 1423 /* 1424 * DMA map early, for performance (empties dcache ASAP) and 1425 * better fault reporting. 1426 */ 1427 if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer) 1428 || as->use_pdc)) { 1429 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 1430 return -ENOMEM; 1431 } 1432 } 1433 1434 #ifdef VERBOSE 1435 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1436 dev_dbg(controller, 1437 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 1438 xfer, xfer->len, 1439 xfer->tx_buf, xfer->tx_dma, 1440 xfer->rx_buf, xfer->rx_dma); 1441 } 1442 #endif 1443 1444 msg->status = -EINPROGRESS; 1445 msg->actual_length = 0; 1446 1447 atmel_spi_lock(as); 1448 list_add_tail(&msg->queue, &as->queue); 1449 if (!as->current_transfer) 1450 atmel_spi_next_message(spi->master); 1451 atmel_spi_unlock(as); 1452 1453 return 0; 1454 } 1455 1456 static void atmel_spi_cleanup(struct spi_device *spi) 1457 { 1458 struct atmel_spi *as = spi_master_get_devdata(spi->master); 1459 struct atmel_spi_device *asd = spi->controller_state; 1460 unsigned gpio = (unsigned) spi->controller_data; 1461 1462 if (!asd) 1463 return; 1464 1465 atmel_spi_lock(as); 1466 if (as->stay == spi) { 1467 as->stay = NULL; 1468 cs_deactivate(as, spi); 1469 } 1470 atmel_spi_unlock(as); 1471 1472 spi->controller_state = NULL; 1473 gpio_free(gpio); 1474 kfree(asd); 1475 } 1476 1477 static inline unsigned int atmel_get_version(struct atmel_spi *as) 1478 { 1479 return spi_readl(as, VERSION) & 0x00000fff; 1480 } 1481 1482 static void atmel_get_caps(struct atmel_spi *as) 1483 { 1484 unsigned int version; 1485 1486 version = atmel_get_version(as); 1487 dev_info(&as->pdev->dev, "version: 0x%x\n", version); 1488 1489 as->caps.is_spi2 = version > 0x121; 1490 as->caps.has_wdrbt = version >= 0x210; 1491 as->caps.has_dma_support = version >= 0x212; 1492 } 1493 1494 /*-------------------------------------------------------------------------*/ 1495 1496 static int atmel_spi_probe(struct platform_device *pdev) 1497 { 1498 struct resource *regs; 1499 int irq; 1500 struct clk *clk; 1501 int ret; 1502 struct spi_master *master; 1503 struct atmel_spi *as; 1504 1505 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1506 if (!regs) 1507 return -ENXIO; 1508 1509 irq = platform_get_irq(pdev, 0); 1510 if (irq < 0) 1511 return irq; 1512 1513 clk = clk_get(&pdev->dev, "spi_clk"); 1514 if (IS_ERR(clk)) 1515 return PTR_ERR(clk); 1516 1517 /* setup spi core then atmel-specific driver state */ 1518 ret = -ENOMEM; 1519 master = spi_alloc_master(&pdev->dev, sizeof *as); 1520 if (!master) 1521 goto out_free; 1522 1523 /* the spi->mode bits understood by this driver: */ 1524 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1525 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); 1526 master->dev.of_node = pdev->dev.of_node; 1527 master->bus_num = pdev->id; 1528 master->num_chipselect = master->dev.of_node ? 0 : 4; 1529 master->setup = atmel_spi_setup; 1530 master->transfer = atmel_spi_transfer; 1531 master->cleanup = atmel_spi_cleanup; 1532 platform_set_drvdata(pdev, master); 1533 1534 as = spi_master_get_devdata(master); 1535 1536 /* 1537 * Scratch buffer is used for throwaway rx and tx data. 1538 * It's coherent to minimize dcache pollution. 1539 */ 1540 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 1541 &as->buffer_dma, GFP_KERNEL); 1542 if (!as->buffer) 1543 goto out_free; 1544 1545 spin_lock_init(&as->lock); 1546 INIT_LIST_HEAD(&as->queue); 1547 1548 as->pdev = pdev; 1549 as->regs = ioremap(regs->start, resource_size(regs)); 1550 if (!as->regs) 1551 goto out_free_buffer; 1552 as->phybase = regs->start; 1553 as->irq = irq; 1554 as->clk = clk; 1555 1556 atmel_get_caps(as); 1557 1558 as->use_dma = false; 1559 as->use_pdc = false; 1560 if (as->caps.has_dma_support) { 1561 if (atmel_spi_configure_dma(as) == 0) 1562 as->use_dma = true; 1563 } else { 1564 as->use_pdc = true; 1565 } 1566 1567 if (as->caps.has_dma_support && !as->use_dma) 1568 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); 1569 1570 if (as->use_pdc) { 1571 ret = request_irq(irq, atmel_spi_pdc_interrupt, 0, 1572 dev_name(&pdev->dev), master); 1573 } else { 1574 tasklet_init(&as->tasklet, atmel_spi_tasklet_func, 1575 (unsigned long)master); 1576 1577 ret = request_irq(irq, atmel_spi_pio_interrupt, 0, 1578 dev_name(&pdev->dev), master); 1579 } 1580 if (ret) 1581 goto out_unmap_regs; 1582 1583 /* Initialize the hardware */ 1584 ret = clk_prepare_enable(clk); 1585 if (ret) 1586 goto out_free_irq; 1587 spi_writel(as, CR, SPI_BIT(SWRST)); 1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1589 if (as->caps.has_wdrbt) { 1590 spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) 1591 | SPI_BIT(MSTR)); 1592 } else { 1593 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 1594 } 1595 1596 if (as->use_pdc) 1597 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1598 spi_writel(as, CR, SPI_BIT(SPIEN)); 1599 1600 /* go! */ 1601 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1602 (unsigned long)regs->start, irq); 1603 1604 ret = spi_register_master(master); 1605 if (ret) 1606 goto out_free_dma; 1607 1608 return 0; 1609 1610 out_free_dma: 1611 if (as->use_dma) 1612 atmel_spi_release_dma(as); 1613 1614 spi_writel(as, CR, SPI_BIT(SWRST)); 1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1616 clk_disable_unprepare(clk); 1617 out_free_irq: 1618 free_irq(irq, master); 1619 out_unmap_regs: 1620 iounmap(as->regs); 1621 out_free_buffer: 1622 if (!as->use_pdc) 1623 tasklet_kill(&as->tasklet); 1624 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1625 as->buffer_dma); 1626 out_free: 1627 clk_put(clk); 1628 spi_master_put(master); 1629 return ret; 1630 } 1631 1632 static int atmel_spi_remove(struct platform_device *pdev) 1633 { 1634 struct spi_master *master = platform_get_drvdata(pdev); 1635 struct atmel_spi *as = spi_master_get_devdata(master); 1636 struct spi_message *msg; 1637 struct spi_transfer *xfer; 1638 1639 /* reset the hardware and block queue progress */ 1640 spin_lock_irq(&as->lock); 1641 as->stopping = 1; 1642 if (as->use_dma) { 1643 atmel_spi_stop_dma(as); 1644 atmel_spi_release_dma(as); 1645 } 1646 1647 spi_writel(as, CR, SPI_BIT(SWRST)); 1648 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1649 spi_readl(as, SR); 1650 spin_unlock_irq(&as->lock); 1651 1652 /* Terminate remaining queued transfers */ 1653 list_for_each_entry(msg, &as->queue, queue) { 1654 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1655 if (!msg->is_dma_mapped 1656 && (atmel_spi_use_dma(as, xfer) 1657 || as->use_pdc)) 1658 atmel_spi_dma_unmap_xfer(master, xfer); 1659 } 1660 msg->status = -ESHUTDOWN; 1661 msg->complete(msg->context); 1662 } 1663 1664 if (!as->use_pdc) 1665 tasklet_kill(&as->tasklet); 1666 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1667 as->buffer_dma); 1668 1669 clk_disable_unprepare(as->clk); 1670 clk_put(as->clk); 1671 free_irq(as->irq, master); 1672 iounmap(as->regs); 1673 1674 spi_unregister_master(master); 1675 1676 return 0; 1677 } 1678 1679 #ifdef CONFIG_PM 1680 1681 static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) 1682 { 1683 struct spi_master *master = platform_get_drvdata(pdev); 1684 struct atmel_spi *as = spi_master_get_devdata(master); 1685 1686 clk_disable_unprepare(as->clk); 1687 return 0; 1688 } 1689 1690 static int atmel_spi_resume(struct platform_device *pdev) 1691 { 1692 struct spi_master *master = platform_get_drvdata(pdev); 1693 struct atmel_spi *as = spi_master_get_devdata(master); 1694 1695 return clk_prepare_enable(as->clk); 1696 return 0; 1697 } 1698 1699 #else 1700 #define atmel_spi_suspend NULL 1701 #define atmel_spi_resume NULL 1702 #endif 1703 1704 #if defined(CONFIG_OF) 1705 static const struct of_device_id atmel_spi_dt_ids[] = { 1706 { .compatible = "atmel,at91rm9200-spi" }, 1707 { /* sentinel */ } 1708 }; 1709 1710 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); 1711 #endif 1712 1713 static struct platform_driver atmel_spi_driver = { 1714 .driver = { 1715 .name = "atmel_spi", 1716 .owner = THIS_MODULE, 1717 .of_match_table = of_match_ptr(atmel_spi_dt_ids), 1718 }, 1719 .suspend = atmel_spi_suspend, 1720 .resume = atmel_spi_resume, 1721 .probe = atmel_spi_probe, 1722 .remove = atmel_spi_remove, 1723 }; 1724 module_platform_driver(atmel_spi_driver); 1725 1726 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 1727 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1728 MODULE_LICENSE("GPL"); 1729 MODULE_ALIAS("platform:atmel_spi"); 1730