1 /* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/clk.h> 13 #include <linux/module.h> 14 #include <linux/platform_device.h> 15 #include <linux/delay.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dmaengine.h> 18 #include <linux/err.h> 19 #include <linux/interrupt.h> 20 #include <linux/spi/spi.h> 21 #include <linux/slab.h> 22 #include <linux/platform_data/atmel.h> 23 #include <linux/platform_data/dma-atmel.h> 24 #include <linux/of.h> 25 26 #include <linux/io.h> 27 #include <linux/gpio.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/pm_runtime.h> 30 31 /* SPI register offsets */ 32 #define SPI_CR 0x0000 33 #define SPI_MR 0x0004 34 #define SPI_RDR 0x0008 35 #define SPI_TDR 0x000c 36 #define SPI_SR 0x0010 37 #define SPI_IER 0x0014 38 #define SPI_IDR 0x0018 39 #define SPI_IMR 0x001c 40 #define SPI_CSR0 0x0030 41 #define SPI_CSR1 0x0034 42 #define SPI_CSR2 0x0038 43 #define SPI_CSR3 0x003c 44 #define SPI_VERSION 0x00fc 45 #define SPI_RPR 0x0100 46 #define SPI_RCR 0x0104 47 #define SPI_TPR 0x0108 48 #define SPI_TCR 0x010c 49 #define SPI_RNPR 0x0110 50 #define SPI_RNCR 0x0114 51 #define SPI_TNPR 0x0118 52 #define SPI_TNCR 0x011c 53 #define SPI_PTCR 0x0120 54 #define SPI_PTSR 0x0124 55 56 /* Bitfields in CR */ 57 #define SPI_SPIEN_OFFSET 0 58 #define SPI_SPIEN_SIZE 1 59 #define SPI_SPIDIS_OFFSET 1 60 #define SPI_SPIDIS_SIZE 1 61 #define SPI_SWRST_OFFSET 7 62 #define SPI_SWRST_SIZE 1 63 #define SPI_LASTXFER_OFFSET 24 64 #define SPI_LASTXFER_SIZE 1 65 66 /* Bitfields in MR */ 67 #define SPI_MSTR_OFFSET 0 68 #define SPI_MSTR_SIZE 1 69 #define SPI_PS_OFFSET 1 70 #define SPI_PS_SIZE 1 71 #define SPI_PCSDEC_OFFSET 2 72 #define SPI_PCSDEC_SIZE 1 73 #define SPI_FDIV_OFFSET 3 74 #define SPI_FDIV_SIZE 1 75 #define SPI_MODFDIS_OFFSET 4 76 #define SPI_MODFDIS_SIZE 1 77 #define SPI_WDRBT_OFFSET 5 78 #define SPI_WDRBT_SIZE 1 79 #define SPI_LLB_OFFSET 7 80 #define SPI_LLB_SIZE 1 81 #define SPI_PCS_OFFSET 16 82 #define SPI_PCS_SIZE 4 83 #define SPI_DLYBCS_OFFSET 24 84 #define SPI_DLYBCS_SIZE 8 85 86 /* Bitfields in RDR */ 87 #define SPI_RD_OFFSET 0 88 #define SPI_RD_SIZE 16 89 90 /* Bitfields in TDR */ 91 #define SPI_TD_OFFSET 0 92 #define SPI_TD_SIZE 16 93 94 /* Bitfields in SR */ 95 #define SPI_RDRF_OFFSET 0 96 #define SPI_RDRF_SIZE 1 97 #define SPI_TDRE_OFFSET 1 98 #define SPI_TDRE_SIZE 1 99 #define SPI_MODF_OFFSET 2 100 #define SPI_MODF_SIZE 1 101 #define SPI_OVRES_OFFSET 3 102 #define SPI_OVRES_SIZE 1 103 #define SPI_ENDRX_OFFSET 4 104 #define SPI_ENDRX_SIZE 1 105 #define SPI_ENDTX_OFFSET 5 106 #define SPI_ENDTX_SIZE 1 107 #define SPI_RXBUFF_OFFSET 6 108 #define SPI_RXBUFF_SIZE 1 109 #define SPI_TXBUFE_OFFSET 7 110 #define SPI_TXBUFE_SIZE 1 111 #define SPI_NSSR_OFFSET 8 112 #define SPI_NSSR_SIZE 1 113 #define SPI_TXEMPTY_OFFSET 9 114 #define SPI_TXEMPTY_SIZE 1 115 #define SPI_SPIENS_OFFSET 16 116 #define SPI_SPIENS_SIZE 1 117 118 /* Bitfields in CSR0 */ 119 #define SPI_CPOL_OFFSET 0 120 #define SPI_CPOL_SIZE 1 121 #define SPI_NCPHA_OFFSET 1 122 #define SPI_NCPHA_SIZE 1 123 #define SPI_CSAAT_OFFSET 3 124 #define SPI_CSAAT_SIZE 1 125 #define SPI_BITS_OFFSET 4 126 #define SPI_BITS_SIZE 4 127 #define SPI_SCBR_OFFSET 8 128 #define SPI_SCBR_SIZE 8 129 #define SPI_DLYBS_OFFSET 16 130 #define SPI_DLYBS_SIZE 8 131 #define SPI_DLYBCT_OFFSET 24 132 #define SPI_DLYBCT_SIZE 8 133 134 /* Bitfields in RCR */ 135 #define SPI_RXCTR_OFFSET 0 136 #define SPI_RXCTR_SIZE 16 137 138 /* Bitfields in TCR */ 139 #define SPI_TXCTR_OFFSET 0 140 #define SPI_TXCTR_SIZE 16 141 142 /* Bitfields in RNCR */ 143 #define SPI_RXNCR_OFFSET 0 144 #define SPI_RXNCR_SIZE 16 145 146 /* Bitfields in TNCR */ 147 #define SPI_TXNCR_OFFSET 0 148 #define SPI_TXNCR_SIZE 16 149 150 /* Bitfields in PTCR */ 151 #define SPI_RXTEN_OFFSET 0 152 #define SPI_RXTEN_SIZE 1 153 #define SPI_RXTDIS_OFFSET 1 154 #define SPI_RXTDIS_SIZE 1 155 #define SPI_TXTEN_OFFSET 8 156 #define SPI_TXTEN_SIZE 1 157 #define SPI_TXTDIS_OFFSET 9 158 #define SPI_TXTDIS_SIZE 1 159 160 /* Constants for BITS */ 161 #define SPI_BITS_8_BPT 0 162 #define SPI_BITS_9_BPT 1 163 #define SPI_BITS_10_BPT 2 164 #define SPI_BITS_11_BPT 3 165 #define SPI_BITS_12_BPT 4 166 #define SPI_BITS_13_BPT 5 167 #define SPI_BITS_14_BPT 6 168 #define SPI_BITS_15_BPT 7 169 #define SPI_BITS_16_BPT 8 170 171 /* Bit manipulation macros */ 172 #define SPI_BIT(name) \ 173 (1 << SPI_##name##_OFFSET) 174 #define SPI_BF(name, value) \ 175 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 176 #define SPI_BFEXT(name, value) \ 177 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 178 #define SPI_BFINS(name, value, old) \ 179 (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 180 | SPI_BF(name, value)) 181 182 /* Register access macros */ 183 #define spi_readl(port, reg) \ 184 __raw_readl((port)->regs + SPI_##reg) 185 #define spi_writel(port, reg, value) \ 186 __raw_writel((value), (port)->regs + SPI_##reg) 187 188 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 189 * cache operations; better heuristics consider wordsize and bitrate. 190 */ 191 #define DMA_MIN_BYTES 16 192 193 #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 194 195 #define AUTOSUSPEND_TIMEOUT 2000 196 197 struct atmel_spi_dma { 198 struct dma_chan *chan_rx; 199 struct dma_chan *chan_tx; 200 struct scatterlist sgrx; 201 struct scatterlist sgtx; 202 struct dma_async_tx_descriptor *data_desc_rx; 203 struct dma_async_tx_descriptor *data_desc_tx; 204 205 struct at_dma_slave dma_slave; 206 }; 207 208 struct atmel_spi_caps { 209 bool is_spi2; 210 bool has_wdrbt; 211 bool has_dma_support; 212 }; 213 214 /* 215 * The core SPI transfer engine just talks to a register bank to set up 216 * DMA transfers; transfer queue progress is driven by IRQs. The clock 217 * framework provides the base clock, subdivided for each spi_device. 218 */ 219 struct atmel_spi { 220 spinlock_t lock; 221 unsigned long flags; 222 223 phys_addr_t phybase; 224 void __iomem *regs; 225 int irq; 226 struct clk *clk; 227 struct platform_device *pdev; 228 229 struct spi_transfer *current_transfer; 230 int current_remaining_bytes; 231 int done_status; 232 233 struct completion xfer_completion; 234 235 /* scratch buffer */ 236 void *buffer; 237 dma_addr_t buffer_dma; 238 239 struct atmel_spi_caps caps; 240 241 bool use_dma; 242 bool use_pdc; 243 /* dmaengine data */ 244 struct atmel_spi_dma dma; 245 246 bool keep_cs; 247 bool cs_active; 248 }; 249 250 /* Controller-specific per-slave state */ 251 struct atmel_spi_device { 252 unsigned int npcs_pin; 253 u32 csr; 254 }; 255 256 #define BUFFER_SIZE PAGE_SIZE 257 #define INVALID_DMA_ADDRESS 0xffffffff 258 259 /* 260 * Version 2 of the SPI controller has 261 * - CR.LASTXFER 262 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 263 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 264 * - SPI_CSRx.CSAAT 265 * - SPI_CSRx.SBCR allows faster clocking 266 */ 267 static bool atmel_spi_is_v2(struct atmel_spi *as) 268 { 269 return as->caps.is_spi2; 270 } 271 272 /* 273 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 274 * they assume that spi slave device state will not change on deselect, so 275 * that automagic deselection is OK. ("NPCSx rises if no data is to be 276 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 277 * controllers have CSAAT and friends. 278 * 279 * Since the CSAAT functionality is a bit weird on newer controllers as 280 * well, we use GPIO to control nCSx pins on all controllers, updating 281 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 282 * support active-high chipselects despite the controller's belief that 283 * only active-low devices/systems exists. 284 * 285 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 286 * right when driven with GPIO. ("Mode Fault does not allow more than one 287 * Master on Chip Select 0.") No workaround exists for that ... so for 288 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 289 * and (c) will trigger that first erratum in some cases. 290 */ 291 292 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 293 { 294 struct atmel_spi_device *asd = spi->controller_state; 295 unsigned active = spi->mode & SPI_CS_HIGH; 296 u32 mr; 297 298 if (atmel_spi_is_v2(as)) { 299 spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr); 300 /* For the low SPI version, there is a issue that PDC transfer 301 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS 302 */ 303 spi_writel(as, CSR0, asd->csr); 304 if (as->caps.has_wdrbt) { 305 spi_writel(as, MR, 306 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 307 | SPI_BIT(WDRBT) 308 | SPI_BIT(MODFDIS) 309 | SPI_BIT(MSTR)); 310 } else { 311 spi_writel(as, MR, 312 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 313 | SPI_BIT(MODFDIS) 314 | SPI_BIT(MSTR)); 315 } 316 317 mr = spi_readl(as, MR); 318 gpio_set_value(asd->npcs_pin, active); 319 } else { 320 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 321 int i; 322 u32 csr; 323 324 /* Make sure clock polarity is correct */ 325 for (i = 0; i < spi->master->num_chipselect; i++) { 326 csr = spi_readl(as, CSR0 + 4 * i); 327 if ((csr ^ cpol) & SPI_BIT(CPOL)) 328 spi_writel(as, CSR0 + 4 * i, 329 csr ^ SPI_BIT(CPOL)); 330 } 331 332 mr = spi_readl(as, MR); 333 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 334 if (spi->chip_select != 0) 335 gpio_set_value(asd->npcs_pin, active); 336 spi_writel(as, MR, mr); 337 } 338 339 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 340 asd->npcs_pin, active ? " (high)" : "", 341 mr); 342 } 343 344 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 345 { 346 struct atmel_spi_device *asd = spi->controller_state; 347 unsigned active = spi->mode & SPI_CS_HIGH; 348 u32 mr; 349 350 /* only deactivate *this* device; sometimes transfers to 351 * another device may be active when this routine is called. 352 */ 353 mr = spi_readl(as, MR); 354 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 355 mr = SPI_BFINS(PCS, 0xf, mr); 356 spi_writel(as, MR, mr); 357 } 358 359 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 360 asd->npcs_pin, active ? " (low)" : "", 361 mr); 362 363 if (atmel_spi_is_v2(as) || spi->chip_select != 0) 364 gpio_set_value(asd->npcs_pin, !active); 365 } 366 367 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) 368 { 369 spin_lock_irqsave(&as->lock, as->flags); 370 } 371 372 static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock) 373 { 374 spin_unlock_irqrestore(&as->lock, as->flags); 375 } 376 377 static inline bool atmel_spi_use_dma(struct atmel_spi *as, 378 struct spi_transfer *xfer) 379 { 380 return as->use_dma && xfer->len >= DMA_MIN_BYTES; 381 } 382 383 static int atmel_spi_dma_slave_config(struct atmel_spi *as, 384 struct dma_slave_config *slave_config, 385 u8 bits_per_word) 386 { 387 int err = 0; 388 389 if (bits_per_word > 8) { 390 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 391 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 392 } else { 393 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 394 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 395 } 396 397 slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; 398 slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; 399 slave_config->src_maxburst = 1; 400 slave_config->dst_maxburst = 1; 401 slave_config->device_fc = false; 402 403 slave_config->direction = DMA_MEM_TO_DEV; 404 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 405 dev_err(&as->pdev->dev, 406 "failed to configure tx dma channel\n"); 407 err = -EINVAL; 408 } 409 410 slave_config->direction = DMA_DEV_TO_MEM; 411 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 412 dev_err(&as->pdev->dev, 413 "failed to configure rx dma channel\n"); 414 err = -EINVAL; 415 } 416 417 return err; 418 } 419 420 static int atmel_spi_configure_dma(struct atmel_spi *as) 421 { 422 struct dma_slave_config slave_config; 423 struct device *dev = &as->pdev->dev; 424 int err; 425 426 dma_cap_mask_t mask; 427 dma_cap_zero(mask); 428 dma_cap_set(DMA_SLAVE, mask); 429 430 as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx"); 431 if (IS_ERR(as->dma.chan_tx)) { 432 err = PTR_ERR(as->dma.chan_tx); 433 if (err == -EPROBE_DEFER) { 434 dev_warn(dev, "no DMA channel available at the moment\n"); 435 return err; 436 } 437 dev_err(dev, 438 "DMA TX channel not available, SPI unable to use DMA\n"); 439 err = -EBUSY; 440 goto error; 441 } 442 443 /* 444 * No reason to check EPROBE_DEFER here since we have already requested 445 * tx channel. If it fails here, it's for another reason. 446 */ 447 as->dma.chan_rx = dma_request_slave_channel(dev, "rx"); 448 449 if (!as->dma.chan_rx) { 450 dev_err(dev, 451 "DMA RX channel not available, SPI unable to use DMA\n"); 452 err = -EBUSY; 453 goto error; 454 } 455 456 err = atmel_spi_dma_slave_config(as, &slave_config, 8); 457 if (err) 458 goto error; 459 460 dev_info(&as->pdev->dev, 461 "Using %s (tx) and %s (rx) for DMA transfers\n", 462 dma_chan_name(as->dma.chan_tx), 463 dma_chan_name(as->dma.chan_rx)); 464 return 0; 465 error: 466 if (as->dma.chan_rx) 467 dma_release_channel(as->dma.chan_rx); 468 if (!IS_ERR(as->dma.chan_tx)) 469 dma_release_channel(as->dma.chan_tx); 470 return err; 471 } 472 473 static void atmel_spi_stop_dma(struct atmel_spi *as) 474 { 475 if (as->dma.chan_rx) 476 dmaengine_terminate_all(as->dma.chan_rx); 477 if (as->dma.chan_tx) 478 dmaengine_terminate_all(as->dma.chan_tx); 479 } 480 481 static void atmel_spi_release_dma(struct atmel_spi *as) 482 { 483 if (as->dma.chan_rx) 484 dma_release_channel(as->dma.chan_rx); 485 if (as->dma.chan_tx) 486 dma_release_channel(as->dma.chan_tx); 487 } 488 489 /* This function is called by the DMA driver from tasklet context */ 490 static void dma_callback(void *data) 491 { 492 struct spi_master *master = data; 493 struct atmel_spi *as = spi_master_get_devdata(master); 494 495 complete(&as->xfer_completion); 496 } 497 498 /* 499 * Next transfer using PIO. 500 */ 501 static void atmel_spi_next_xfer_pio(struct spi_master *master, 502 struct spi_transfer *xfer) 503 { 504 struct atmel_spi *as = spi_master_get_devdata(master); 505 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 506 507 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); 508 509 /* Make sure data is not remaining in RDR */ 510 spi_readl(as, RDR); 511 while (spi_readl(as, SR) & SPI_BIT(RDRF)) { 512 spi_readl(as, RDR); 513 cpu_relax(); 514 } 515 516 if (xfer->tx_buf) { 517 if (xfer->bits_per_word > 8) 518 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); 519 else 520 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); 521 } else { 522 spi_writel(as, TDR, 0); 523 } 524 525 dev_dbg(master->dev.parent, 526 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", 527 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, 528 xfer->bits_per_word); 529 530 /* Enable relevant interrupts */ 531 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 532 } 533 534 /* 535 * Submit next transfer for DMA. 536 */ 537 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, 538 struct spi_transfer *xfer, 539 u32 *plen) 540 { 541 struct atmel_spi *as = spi_master_get_devdata(master); 542 struct dma_chan *rxchan = as->dma.chan_rx; 543 struct dma_chan *txchan = as->dma.chan_tx; 544 struct dma_async_tx_descriptor *rxdesc; 545 struct dma_async_tx_descriptor *txdesc; 546 struct dma_slave_config slave_config; 547 dma_cookie_t cookie; 548 u32 len = *plen; 549 550 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); 551 552 /* Check that the channels are available */ 553 if (!rxchan || !txchan) 554 return -ENODEV; 555 556 /* release lock for DMA operations */ 557 atmel_spi_unlock(as); 558 559 /* prepare the RX dma transfer */ 560 sg_init_table(&as->dma.sgrx, 1); 561 if (xfer->rx_buf) { 562 as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; 563 } else { 564 as->dma.sgrx.dma_address = as->buffer_dma; 565 if (len > BUFFER_SIZE) 566 len = BUFFER_SIZE; 567 } 568 569 /* prepare the TX dma transfer */ 570 sg_init_table(&as->dma.sgtx, 1); 571 if (xfer->tx_buf) { 572 as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; 573 } else { 574 as->dma.sgtx.dma_address = as->buffer_dma; 575 if (len > BUFFER_SIZE) 576 len = BUFFER_SIZE; 577 memset(as->buffer, 0, len); 578 } 579 580 sg_dma_len(&as->dma.sgtx) = len; 581 sg_dma_len(&as->dma.sgrx) = len; 582 583 *plen = len; 584 585 if (atmel_spi_dma_slave_config(as, &slave_config, 8)) 586 goto err_exit; 587 588 /* Send both scatterlists */ 589 rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1, 590 DMA_FROM_DEVICE, 591 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 592 if (!rxdesc) 593 goto err_dma; 594 595 txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1, 596 DMA_TO_DEVICE, 597 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 598 if (!txdesc) 599 goto err_dma; 600 601 dev_dbg(master->dev.parent, 602 " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 603 xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma, 604 xfer->rx_buf, (unsigned long long)xfer->rx_dma); 605 606 /* Enable relevant interrupts */ 607 spi_writel(as, IER, SPI_BIT(OVRES)); 608 609 /* Put the callback on the RX transfer only, that should finish last */ 610 rxdesc->callback = dma_callback; 611 rxdesc->callback_param = master; 612 613 /* Submit and fire RX and TX with TX last so we're ready to read! */ 614 cookie = rxdesc->tx_submit(rxdesc); 615 if (dma_submit_error(cookie)) 616 goto err_dma; 617 cookie = txdesc->tx_submit(txdesc); 618 if (dma_submit_error(cookie)) 619 goto err_dma; 620 rxchan->device->device_issue_pending(rxchan); 621 txchan->device->device_issue_pending(txchan); 622 623 /* take back lock */ 624 atmel_spi_lock(as); 625 return 0; 626 627 err_dma: 628 spi_writel(as, IDR, SPI_BIT(OVRES)); 629 atmel_spi_stop_dma(as); 630 err_exit: 631 atmel_spi_lock(as); 632 return -ENOMEM; 633 } 634 635 static void atmel_spi_next_xfer_data(struct spi_master *master, 636 struct spi_transfer *xfer, 637 dma_addr_t *tx_dma, 638 dma_addr_t *rx_dma, 639 u32 *plen) 640 { 641 struct atmel_spi *as = spi_master_get_devdata(master); 642 u32 len = *plen; 643 644 /* use scratch buffer only when rx or tx data is unspecified */ 645 if (xfer->rx_buf) 646 *rx_dma = xfer->rx_dma + xfer->len - *plen; 647 else { 648 *rx_dma = as->buffer_dma; 649 if (len > BUFFER_SIZE) 650 len = BUFFER_SIZE; 651 } 652 653 if (xfer->tx_buf) 654 *tx_dma = xfer->tx_dma + xfer->len - *plen; 655 else { 656 *tx_dma = as->buffer_dma; 657 if (len > BUFFER_SIZE) 658 len = BUFFER_SIZE; 659 memset(as->buffer, 0, len); 660 dma_sync_single_for_device(&as->pdev->dev, 661 as->buffer_dma, len, DMA_TO_DEVICE); 662 } 663 664 *plen = len; 665 } 666 667 static int atmel_spi_set_xfer_speed(struct atmel_spi *as, 668 struct spi_device *spi, 669 struct spi_transfer *xfer) 670 { 671 u32 scbr, csr; 672 unsigned long bus_hz; 673 674 /* v1 chips start out at half the peripheral bus speed. */ 675 bus_hz = clk_get_rate(as->clk); 676 if (!atmel_spi_is_v2(as)) 677 bus_hz /= 2; 678 679 /* 680 * Calculate the lowest divider that satisfies the 681 * constraint, assuming div32/fdiv/mbz == 0. 682 */ 683 if (xfer->speed_hz) 684 scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz); 685 else 686 /* 687 * This can happend if max_speed is null. 688 * In this case, we set the lowest possible speed 689 */ 690 scbr = 0xff; 691 692 /* 693 * If the resulting divider doesn't fit into the 694 * register bitfield, we can't satisfy the constraint. 695 */ 696 if (scbr >= (1 << SPI_SCBR_SIZE)) { 697 dev_err(&spi->dev, 698 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 699 xfer->speed_hz, scbr, bus_hz/255); 700 return -EINVAL; 701 } 702 if (scbr == 0) { 703 dev_err(&spi->dev, 704 "setup: %d Hz too high, scbr %u; max %ld Hz\n", 705 xfer->speed_hz, scbr, bus_hz); 706 return -EINVAL; 707 } 708 csr = spi_readl(as, CSR0 + 4 * spi->chip_select); 709 csr = SPI_BFINS(SCBR, scbr, csr); 710 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 711 712 return 0; 713 } 714 715 /* 716 * Submit next transfer for PDC. 717 * lock is held, spi irq is blocked 718 */ 719 static void atmel_spi_pdc_next_xfer(struct spi_master *master, 720 struct spi_message *msg, 721 struct spi_transfer *xfer) 722 { 723 struct atmel_spi *as = spi_master_get_devdata(master); 724 u32 len; 725 dma_addr_t tx_dma, rx_dma; 726 727 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 728 729 len = as->current_remaining_bytes; 730 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 731 as->current_remaining_bytes -= len; 732 733 spi_writel(as, RPR, rx_dma); 734 spi_writel(as, TPR, tx_dma); 735 736 if (msg->spi->bits_per_word > 8) 737 len >>= 1; 738 spi_writel(as, RCR, len); 739 spi_writel(as, TCR, len); 740 741 dev_dbg(&msg->spi->dev, 742 " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 743 xfer, xfer->len, xfer->tx_buf, 744 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 745 (unsigned long long)xfer->rx_dma); 746 747 if (as->current_remaining_bytes) { 748 len = as->current_remaining_bytes; 749 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 750 as->current_remaining_bytes -= len; 751 752 spi_writel(as, RNPR, rx_dma); 753 spi_writel(as, TNPR, tx_dma); 754 755 if (msg->spi->bits_per_word > 8) 756 len >>= 1; 757 spi_writel(as, RNCR, len); 758 spi_writel(as, TNCR, len); 759 760 dev_dbg(&msg->spi->dev, 761 " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 762 xfer, xfer->len, xfer->tx_buf, 763 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 764 (unsigned long long)xfer->rx_dma); 765 } 766 767 /* REVISIT: We're waiting for ENDRX before we start the next 768 * transfer because we need to handle some difficult timing 769 * issues otherwise. If we wait for ENDTX in one transfer and 770 * then starts waiting for ENDRX in the next, it's difficult 771 * to tell the difference between the ENDRX interrupt we're 772 * actually waiting for and the ENDRX interrupt of the 773 * previous transfer. 774 * 775 * It should be doable, though. Just not now... 776 */ 777 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 779 } 780 781 /* 782 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 783 * - The buffer is either valid for CPU access, else NULL 784 * - If the buffer is valid, so is its DMA address 785 * 786 * This driver manages the dma address unless message->is_dma_mapped. 787 */ 788 static int 789 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 790 { 791 struct device *dev = &as->pdev->dev; 792 793 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 794 if (xfer->tx_buf) { 795 /* tx_buf is a const void* where we need a void * for the dma 796 * mapping */ 797 void *nonconst_tx = (void *)xfer->tx_buf; 798 799 xfer->tx_dma = dma_map_single(dev, 800 nonconst_tx, xfer->len, 801 DMA_TO_DEVICE); 802 if (dma_mapping_error(dev, xfer->tx_dma)) 803 return -ENOMEM; 804 } 805 if (xfer->rx_buf) { 806 xfer->rx_dma = dma_map_single(dev, 807 xfer->rx_buf, xfer->len, 808 DMA_FROM_DEVICE); 809 if (dma_mapping_error(dev, xfer->rx_dma)) { 810 if (xfer->tx_buf) 811 dma_unmap_single(dev, 812 xfer->tx_dma, xfer->len, 813 DMA_TO_DEVICE); 814 return -ENOMEM; 815 } 816 } 817 return 0; 818 } 819 820 static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 821 struct spi_transfer *xfer) 822 { 823 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 824 dma_unmap_single(master->dev.parent, xfer->tx_dma, 825 xfer->len, DMA_TO_DEVICE); 826 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 827 dma_unmap_single(master->dev.parent, xfer->rx_dma, 828 xfer->len, DMA_FROM_DEVICE); 829 } 830 831 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) 832 { 833 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 834 } 835 836 /* Called from IRQ 837 * 838 * Must update "current_remaining_bytes" to keep track of data 839 * to transfer. 840 */ 841 static void 842 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) 843 { 844 u8 *rxp; 845 u16 *rxp16; 846 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 847 848 if (xfer->rx_buf) { 849 if (xfer->bits_per_word > 8) { 850 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 851 *rxp16 = spi_readl(as, RDR); 852 } else { 853 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 854 *rxp = spi_readl(as, RDR); 855 } 856 } else { 857 spi_readl(as, RDR); 858 } 859 if (xfer->bits_per_word > 8) { 860 if (as->current_remaining_bytes > 2) 861 as->current_remaining_bytes -= 2; 862 else 863 as->current_remaining_bytes = 0; 864 } else { 865 as->current_remaining_bytes--; 866 } 867 } 868 869 /* Interrupt 870 * 871 * No need for locking in this Interrupt handler: done_status is the 872 * only information modified. 873 */ 874 static irqreturn_t 875 atmel_spi_pio_interrupt(int irq, void *dev_id) 876 { 877 struct spi_master *master = dev_id; 878 struct atmel_spi *as = spi_master_get_devdata(master); 879 u32 status, pending, imr; 880 struct spi_transfer *xfer; 881 int ret = IRQ_NONE; 882 883 imr = spi_readl(as, IMR); 884 status = spi_readl(as, SR); 885 pending = status & imr; 886 887 if (pending & SPI_BIT(OVRES)) { 888 ret = IRQ_HANDLED; 889 spi_writel(as, IDR, SPI_BIT(OVRES)); 890 dev_warn(master->dev.parent, "overrun\n"); 891 892 /* 893 * When we get an overrun, we disregard the current 894 * transfer. Data will not be copied back from any 895 * bounce buffer and msg->actual_len will not be 896 * updated with the last xfer. 897 * 898 * We will also not process any remaning transfers in 899 * the message. 900 */ 901 as->done_status = -EIO; 902 smp_wmb(); 903 904 /* Clear any overrun happening while cleaning up */ 905 spi_readl(as, SR); 906 907 complete(&as->xfer_completion); 908 909 } else if (pending & SPI_BIT(RDRF)) { 910 atmel_spi_lock(as); 911 912 if (as->current_remaining_bytes) { 913 ret = IRQ_HANDLED; 914 xfer = as->current_transfer; 915 atmel_spi_pump_pio_data(as, xfer); 916 if (!as->current_remaining_bytes) 917 spi_writel(as, IDR, pending); 918 919 complete(&as->xfer_completion); 920 } 921 922 atmel_spi_unlock(as); 923 } else { 924 WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); 925 ret = IRQ_HANDLED; 926 spi_writel(as, IDR, pending); 927 } 928 929 return ret; 930 } 931 932 static irqreturn_t 933 atmel_spi_pdc_interrupt(int irq, void *dev_id) 934 { 935 struct spi_master *master = dev_id; 936 struct atmel_spi *as = spi_master_get_devdata(master); 937 u32 status, pending, imr; 938 int ret = IRQ_NONE; 939 940 imr = spi_readl(as, IMR); 941 status = spi_readl(as, SR); 942 pending = status & imr; 943 944 if (pending & SPI_BIT(OVRES)) { 945 946 ret = IRQ_HANDLED; 947 948 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) 949 | SPI_BIT(OVRES))); 950 951 /* Clear any overrun happening while cleaning up */ 952 spi_readl(as, SR); 953 954 as->done_status = -EIO; 955 956 complete(&as->xfer_completion); 957 958 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { 959 ret = IRQ_HANDLED; 960 961 spi_writel(as, IDR, pending); 962 963 complete(&as->xfer_completion); 964 } 965 966 return ret; 967 } 968 969 static int atmel_spi_setup(struct spi_device *spi) 970 { 971 struct atmel_spi *as; 972 struct atmel_spi_device *asd; 973 u32 csr; 974 unsigned int bits = spi->bits_per_word; 975 unsigned int npcs_pin; 976 int ret; 977 978 as = spi_master_get_devdata(spi->master); 979 980 /* see notes above re chipselect */ 981 if (!atmel_spi_is_v2(as) 982 && spi->chip_select == 0 983 && (spi->mode & SPI_CS_HIGH)) { 984 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 985 return -EINVAL; 986 } 987 988 csr = SPI_BF(BITS, bits - 8); 989 if (spi->mode & SPI_CPOL) 990 csr |= SPI_BIT(CPOL); 991 if (!(spi->mode & SPI_CPHA)) 992 csr |= SPI_BIT(NCPHA); 993 994 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 995 * 996 * DLYBCT would add delays between words, slowing down transfers. 997 * It could potentially be useful to cope with DMA bottlenecks, but 998 * in those cases it's probably best to just use a lower bitrate. 999 */ 1000 csr |= SPI_BF(DLYBS, 0); 1001 csr |= SPI_BF(DLYBCT, 0); 1002 1003 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 1004 npcs_pin = (unsigned long)spi->controller_data; 1005 1006 if (gpio_is_valid(spi->cs_gpio)) 1007 npcs_pin = spi->cs_gpio; 1008 1009 asd = spi->controller_state; 1010 if (!asd) { 1011 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 1012 if (!asd) 1013 return -ENOMEM; 1014 1015 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 1016 if (ret) { 1017 kfree(asd); 1018 return ret; 1019 } 1020 1021 asd->npcs_pin = npcs_pin; 1022 spi->controller_state = asd; 1023 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 1024 } 1025 1026 asd->csr = csr; 1027 1028 dev_dbg(&spi->dev, 1029 "setup: bpw %u mode 0x%x -> csr%d %08x\n", 1030 bits, spi->mode, spi->chip_select, csr); 1031 1032 if (!atmel_spi_is_v2(as)) 1033 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 1034 1035 return 0; 1036 } 1037 1038 static int atmel_spi_one_transfer(struct spi_master *master, 1039 struct spi_message *msg, 1040 struct spi_transfer *xfer) 1041 { 1042 struct atmel_spi *as; 1043 struct spi_device *spi = msg->spi; 1044 u8 bits; 1045 u32 len; 1046 struct atmel_spi_device *asd; 1047 int timeout; 1048 int ret; 1049 1050 as = spi_master_get_devdata(master); 1051 1052 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1053 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 1054 return -EINVAL; 1055 } 1056 1057 if (xfer->bits_per_word) { 1058 asd = spi->controller_state; 1059 bits = (asd->csr >> 4) & 0xf; 1060 if (bits != xfer->bits_per_word - 8) { 1061 dev_dbg(&spi->dev, 1062 "you can't yet change bits_per_word in transfers\n"); 1063 return -ENOPROTOOPT; 1064 } 1065 } 1066 1067 /* 1068 * DMA map early, for performance (empties dcache ASAP) and 1069 * better fault reporting. 1070 */ 1071 if ((!msg->is_dma_mapped) 1072 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) { 1073 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 1074 return -ENOMEM; 1075 } 1076 1077 atmel_spi_set_xfer_speed(as, msg->spi, xfer); 1078 1079 as->done_status = 0; 1080 as->current_transfer = xfer; 1081 as->current_remaining_bytes = xfer->len; 1082 while (as->current_remaining_bytes) { 1083 reinit_completion(&as->xfer_completion); 1084 1085 if (as->use_pdc) { 1086 atmel_spi_pdc_next_xfer(master, msg, xfer); 1087 } else if (atmel_spi_use_dma(as, xfer)) { 1088 len = as->current_remaining_bytes; 1089 ret = atmel_spi_next_xfer_dma_submit(master, 1090 xfer, &len); 1091 if (ret) { 1092 dev_err(&spi->dev, 1093 "unable to use DMA, fallback to PIO\n"); 1094 atmel_spi_next_xfer_pio(master, xfer); 1095 } else { 1096 as->current_remaining_bytes -= len; 1097 if (as->current_remaining_bytes < 0) 1098 as->current_remaining_bytes = 0; 1099 } 1100 } else { 1101 atmel_spi_next_xfer_pio(master, xfer); 1102 } 1103 1104 /* interrupts are disabled, so free the lock for schedule */ 1105 atmel_spi_unlock(as); 1106 ret = wait_for_completion_timeout(&as->xfer_completion, 1107 SPI_DMA_TIMEOUT); 1108 atmel_spi_lock(as); 1109 if (WARN_ON(ret == 0)) { 1110 dev_err(&spi->dev, 1111 "spi trasfer timeout, err %d\n", ret); 1112 as->done_status = -EIO; 1113 } else { 1114 ret = 0; 1115 } 1116 1117 if (as->done_status) 1118 break; 1119 } 1120 1121 if (as->done_status) { 1122 if (as->use_pdc) { 1123 dev_warn(master->dev.parent, 1124 "overrun (%u/%u remaining)\n", 1125 spi_readl(as, TCR), spi_readl(as, RCR)); 1126 1127 /* 1128 * Clean up DMA registers and make sure the data 1129 * registers are empty. 1130 */ 1131 spi_writel(as, RNCR, 0); 1132 spi_writel(as, TNCR, 0); 1133 spi_writel(as, RCR, 0); 1134 spi_writel(as, TCR, 0); 1135 for (timeout = 1000; timeout; timeout--) 1136 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 1137 break; 1138 if (!timeout) 1139 dev_warn(master->dev.parent, 1140 "timeout waiting for TXEMPTY"); 1141 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 1142 spi_readl(as, RDR); 1143 1144 /* Clear any overrun happening while cleaning up */ 1145 spi_readl(as, SR); 1146 1147 } else if (atmel_spi_use_dma(as, xfer)) { 1148 atmel_spi_stop_dma(as); 1149 } 1150 1151 if (!msg->is_dma_mapped 1152 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1153 atmel_spi_dma_unmap_xfer(master, xfer); 1154 1155 return 0; 1156 1157 } else { 1158 /* only update length if no error */ 1159 msg->actual_length += xfer->len; 1160 } 1161 1162 if (!msg->is_dma_mapped 1163 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1164 atmel_spi_dma_unmap_xfer(master, xfer); 1165 1166 if (xfer->delay_usecs) 1167 udelay(xfer->delay_usecs); 1168 1169 if (xfer->cs_change) { 1170 if (list_is_last(&xfer->transfer_list, 1171 &msg->transfers)) { 1172 as->keep_cs = true; 1173 } else { 1174 as->cs_active = !as->cs_active; 1175 if (as->cs_active) 1176 cs_activate(as, msg->spi); 1177 else 1178 cs_deactivate(as, msg->spi); 1179 } 1180 } 1181 1182 return 0; 1183 } 1184 1185 static int atmel_spi_transfer_one_message(struct spi_master *master, 1186 struct spi_message *msg) 1187 { 1188 struct atmel_spi *as; 1189 struct spi_transfer *xfer; 1190 struct spi_device *spi = msg->spi; 1191 int ret = 0; 1192 1193 as = spi_master_get_devdata(master); 1194 1195 dev_dbg(&spi->dev, "new message %p submitted for %s\n", 1196 msg, dev_name(&spi->dev)); 1197 1198 atmel_spi_lock(as); 1199 cs_activate(as, spi); 1200 1201 as->cs_active = true; 1202 as->keep_cs = false; 1203 1204 msg->status = 0; 1205 msg->actual_length = 0; 1206 1207 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1208 ret = atmel_spi_one_transfer(master, msg, xfer); 1209 if (ret) 1210 goto msg_done; 1211 } 1212 1213 if (as->use_pdc) 1214 atmel_spi_disable_pdc_transfer(as); 1215 1216 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1217 dev_dbg(&spi->dev, 1218 " xfer %p: len %u tx %p/%pad rx %p/%pad\n", 1219 xfer, xfer->len, 1220 xfer->tx_buf, &xfer->tx_dma, 1221 xfer->rx_buf, &xfer->rx_dma); 1222 } 1223 1224 msg_done: 1225 if (!as->keep_cs) 1226 cs_deactivate(as, msg->spi); 1227 1228 atmel_spi_unlock(as); 1229 1230 msg->status = as->done_status; 1231 spi_finalize_current_message(spi->master); 1232 1233 return ret; 1234 } 1235 1236 static void atmel_spi_cleanup(struct spi_device *spi) 1237 { 1238 struct atmel_spi_device *asd = spi->controller_state; 1239 unsigned gpio = (unsigned long) spi->controller_data; 1240 1241 if (!asd) 1242 return; 1243 1244 spi->controller_state = NULL; 1245 gpio_free(gpio); 1246 kfree(asd); 1247 } 1248 1249 static inline unsigned int atmel_get_version(struct atmel_spi *as) 1250 { 1251 return spi_readl(as, VERSION) & 0x00000fff; 1252 } 1253 1254 static void atmel_get_caps(struct atmel_spi *as) 1255 { 1256 unsigned int version; 1257 1258 version = atmel_get_version(as); 1259 dev_info(&as->pdev->dev, "version: 0x%x\n", version); 1260 1261 as->caps.is_spi2 = version > 0x121; 1262 as->caps.has_wdrbt = version >= 0x210; 1263 as->caps.has_dma_support = version >= 0x212; 1264 } 1265 1266 /*-------------------------------------------------------------------------*/ 1267 1268 static int atmel_spi_probe(struct platform_device *pdev) 1269 { 1270 struct resource *regs; 1271 int irq; 1272 struct clk *clk; 1273 int ret; 1274 struct spi_master *master; 1275 struct atmel_spi *as; 1276 1277 /* Select default pin state */ 1278 pinctrl_pm_select_default_state(&pdev->dev); 1279 1280 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1281 if (!regs) 1282 return -ENXIO; 1283 1284 irq = platform_get_irq(pdev, 0); 1285 if (irq < 0) 1286 return irq; 1287 1288 clk = devm_clk_get(&pdev->dev, "spi_clk"); 1289 if (IS_ERR(clk)) 1290 return PTR_ERR(clk); 1291 1292 /* setup spi core then atmel-specific driver state */ 1293 ret = -ENOMEM; 1294 master = spi_alloc_master(&pdev->dev, sizeof(*as)); 1295 if (!master) 1296 goto out_free; 1297 1298 /* the spi->mode bits understood by this driver: */ 1299 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1300 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); 1301 master->dev.of_node = pdev->dev.of_node; 1302 master->bus_num = pdev->id; 1303 master->num_chipselect = master->dev.of_node ? 0 : 4; 1304 master->setup = atmel_spi_setup; 1305 master->transfer_one_message = atmel_spi_transfer_one_message; 1306 master->cleanup = atmel_spi_cleanup; 1307 master->auto_runtime_pm = true; 1308 platform_set_drvdata(pdev, master); 1309 1310 as = spi_master_get_devdata(master); 1311 1312 /* 1313 * Scratch buffer is used for throwaway rx and tx data. 1314 * It's coherent to minimize dcache pollution. 1315 */ 1316 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 1317 &as->buffer_dma, GFP_KERNEL); 1318 if (!as->buffer) 1319 goto out_free; 1320 1321 spin_lock_init(&as->lock); 1322 1323 as->pdev = pdev; 1324 as->regs = devm_ioremap_resource(&pdev->dev, regs); 1325 if (IS_ERR(as->regs)) { 1326 ret = PTR_ERR(as->regs); 1327 goto out_free_buffer; 1328 } 1329 as->phybase = regs->start; 1330 as->irq = irq; 1331 as->clk = clk; 1332 1333 init_completion(&as->xfer_completion); 1334 1335 atmel_get_caps(as); 1336 1337 as->use_dma = false; 1338 as->use_pdc = false; 1339 if (as->caps.has_dma_support) { 1340 ret = atmel_spi_configure_dma(as); 1341 if (ret == 0) 1342 as->use_dma = true; 1343 else if (ret == -EPROBE_DEFER) 1344 return ret; 1345 } else { 1346 as->use_pdc = true; 1347 } 1348 1349 if (as->caps.has_dma_support && !as->use_dma) 1350 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); 1351 1352 if (as->use_pdc) { 1353 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt, 1354 0, dev_name(&pdev->dev), master); 1355 } else { 1356 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt, 1357 0, dev_name(&pdev->dev), master); 1358 } 1359 if (ret) 1360 goto out_unmap_regs; 1361 1362 /* Initialize the hardware */ 1363 ret = clk_prepare_enable(clk); 1364 if (ret) 1365 goto out_free_irq; 1366 spi_writel(as, CR, SPI_BIT(SWRST)); 1367 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1368 if (as->caps.has_wdrbt) { 1369 spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) 1370 | SPI_BIT(MSTR)); 1371 } else { 1372 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 1373 } 1374 1375 if (as->use_pdc) 1376 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1377 spi_writel(as, CR, SPI_BIT(SPIEN)); 1378 1379 /* go! */ 1380 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1381 (unsigned long)regs->start, irq); 1382 1383 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); 1384 pm_runtime_use_autosuspend(&pdev->dev); 1385 pm_runtime_set_active(&pdev->dev); 1386 pm_runtime_enable(&pdev->dev); 1387 1388 ret = devm_spi_register_master(&pdev->dev, master); 1389 if (ret) 1390 goto out_free_dma; 1391 1392 return 0; 1393 1394 out_free_dma: 1395 pm_runtime_disable(&pdev->dev); 1396 pm_runtime_set_suspended(&pdev->dev); 1397 1398 if (as->use_dma) 1399 atmel_spi_release_dma(as); 1400 1401 spi_writel(as, CR, SPI_BIT(SWRST)); 1402 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1403 clk_disable_unprepare(clk); 1404 out_free_irq: 1405 out_unmap_regs: 1406 out_free_buffer: 1407 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1408 as->buffer_dma); 1409 out_free: 1410 spi_master_put(master); 1411 return ret; 1412 } 1413 1414 static int atmel_spi_remove(struct platform_device *pdev) 1415 { 1416 struct spi_master *master = platform_get_drvdata(pdev); 1417 struct atmel_spi *as = spi_master_get_devdata(master); 1418 1419 pm_runtime_get_sync(&pdev->dev); 1420 1421 /* reset the hardware and block queue progress */ 1422 spin_lock_irq(&as->lock); 1423 if (as->use_dma) { 1424 atmel_spi_stop_dma(as); 1425 atmel_spi_release_dma(as); 1426 } 1427 1428 spi_writel(as, CR, SPI_BIT(SWRST)); 1429 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1430 spi_readl(as, SR); 1431 spin_unlock_irq(&as->lock); 1432 1433 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1434 as->buffer_dma); 1435 1436 clk_disable_unprepare(as->clk); 1437 1438 pm_runtime_put_noidle(&pdev->dev); 1439 pm_runtime_disable(&pdev->dev); 1440 1441 return 0; 1442 } 1443 1444 #ifdef CONFIG_PM 1445 static int atmel_spi_runtime_suspend(struct device *dev) 1446 { 1447 struct spi_master *master = dev_get_drvdata(dev); 1448 struct atmel_spi *as = spi_master_get_devdata(master); 1449 1450 clk_disable_unprepare(as->clk); 1451 pinctrl_pm_select_sleep_state(dev); 1452 1453 return 0; 1454 } 1455 1456 static int atmel_spi_runtime_resume(struct device *dev) 1457 { 1458 struct spi_master *master = dev_get_drvdata(dev); 1459 struct atmel_spi *as = spi_master_get_devdata(master); 1460 1461 pinctrl_pm_select_default_state(dev); 1462 1463 return clk_prepare_enable(as->clk); 1464 } 1465 1466 static int atmel_spi_suspend(struct device *dev) 1467 { 1468 struct spi_master *master = dev_get_drvdata(dev); 1469 int ret; 1470 1471 /* Stop the queue running */ 1472 ret = spi_master_suspend(master); 1473 if (ret) { 1474 dev_warn(dev, "cannot suspend master\n"); 1475 return ret; 1476 } 1477 1478 if (!pm_runtime_suspended(dev)) 1479 atmel_spi_runtime_suspend(dev); 1480 1481 return 0; 1482 } 1483 1484 static int atmel_spi_resume(struct device *dev) 1485 { 1486 struct spi_master *master = dev_get_drvdata(dev); 1487 int ret; 1488 1489 if (!pm_runtime_suspended(dev)) { 1490 ret = atmel_spi_runtime_resume(dev); 1491 if (ret) 1492 return ret; 1493 } 1494 1495 /* Start the queue running */ 1496 ret = spi_master_resume(master); 1497 if (ret) 1498 dev_err(dev, "problem starting queue (%d)\n", ret); 1499 1500 return ret; 1501 } 1502 1503 static const struct dev_pm_ops atmel_spi_pm_ops = { 1504 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) 1505 SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend, 1506 atmel_spi_runtime_resume, NULL) 1507 }; 1508 #define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) 1509 #else 1510 #define ATMEL_SPI_PM_OPS NULL 1511 #endif 1512 1513 #if defined(CONFIG_OF) 1514 static const struct of_device_id atmel_spi_dt_ids[] = { 1515 { .compatible = "atmel,at91rm9200-spi" }, 1516 { /* sentinel */ } 1517 }; 1518 1519 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); 1520 #endif 1521 1522 static struct platform_driver atmel_spi_driver = { 1523 .driver = { 1524 .name = "atmel_spi", 1525 .pm = ATMEL_SPI_PM_OPS, 1526 .of_match_table = of_match_ptr(atmel_spi_dt_ids), 1527 }, 1528 .probe = atmel_spi_probe, 1529 .remove = atmel_spi_remove, 1530 }; 1531 module_platform_driver(atmel_spi_driver); 1532 1533 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 1534 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1535 MODULE_LICENSE("GPL"); 1536 MODULE_ALIAS("platform:atmel_spi"); 1537