1 /* 2 * Copyright (C) 2009 Texas Instruments. 3 * Copyright (C) 2010 EF Johnson Technologies 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/module.h> 20 #include <linux/delay.h> 21 #include <linux/platform_device.h> 22 #include <linux/err.h> 23 #include <linux/clk.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/spi/spi.h> 29 #include <linux/spi/spi_bitbang.h> 30 #include <linux/slab.h> 31 32 #include <linux/platform_data/spi-davinci.h> 33 34 #define CS_DEFAULT 0xFF 35 36 #define SPIFMT_PHASE_MASK BIT(16) 37 #define SPIFMT_POLARITY_MASK BIT(17) 38 #define SPIFMT_DISTIMER_MASK BIT(18) 39 #define SPIFMT_SHIFTDIR_MASK BIT(20) 40 #define SPIFMT_WAITENA_MASK BIT(21) 41 #define SPIFMT_PARITYENA_MASK BIT(22) 42 #define SPIFMT_ODD_PARITY_MASK BIT(23) 43 #define SPIFMT_WDELAY_MASK 0x3f000000u 44 #define SPIFMT_WDELAY_SHIFT 24 45 #define SPIFMT_PRESCALE_SHIFT 8 46 47 /* SPIPC0 */ 48 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 49 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 50 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 51 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 52 53 #define SPIINT_MASKALL 0x0101035F 54 #define SPIINT_MASKINT 0x0000015F 55 #define SPI_INTLVL_1 0x000001FF 56 #define SPI_INTLVL_0 0x00000000 57 58 /* SPIDAT1 (upper 16 bit defines) */ 59 #define SPIDAT1_CSHOLD_MASK BIT(12) 60 #define SPIDAT1_WDEL BIT(10) 61 62 /* SPIGCR1 */ 63 #define SPIGCR1_CLKMOD_MASK BIT(1) 64 #define SPIGCR1_MASTER_MASK BIT(0) 65 #define SPIGCR1_POWERDOWN_MASK BIT(8) 66 #define SPIGCR1_LOOPBACK_MASK BIT(16) 67 #define SPIGCR1_SPIENA_MASK BIT(24) 68 69 /* SPIBUF */ 70 #define SPIBUF_TXFULL_MASK BIT(29) 71 #define SPIBUF_RXEMPTY_MASK BIT(31) 72 73 /* SPIDELAY */ 74 #define SPIDELAY_C2TDELAY_SHIFT 24 75 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) 76 #define SPIDELAY_T2CDELAY_SHIFT 16 77 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) 78 #define SPIDELAY_T2EDELAY_SHIFT 8 79 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) 80 #define SPIDELAY_C2EDELAY_SHIFT 0 81 #define SPIDELAY_C2EDELAY_MASK 0xFF 82 83 /* Error Masks */ 84 #define SPIFLG_DLEN_ERR_MASK BIT(0) 85 #define SPIFLG_TIMEOUT_MASK BIT(1) 86 #define SPIFLG_PARERR_MASK BIT(2) 87 #define SPIFLG_DESYNC_MASK BIT(3) 88 #define SPIFLG_BITERR_MASK BIT(4) 89 #define SPIFLG_OVRRUN_MASK BIT(6) 90 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 91 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ 92 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 93 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 94 | SPIFLG_OVRRUN_MASK) 95 96 #define SPIINT_DMA_REQ_EN BIT(16) 97 98 /* SPI Controller registers */ 99 #define SPIGCR0 0x00 100 #define SPIGCR1 0x04 101 #define SPIINT 0x08 102 #define SPILVL 0x0c 103 #define SPIFLG 0x10 104 #define SPIPC0 0x14 105 #define SPIDAT1 0x3c 106 #define SPIBUF 0x40 107 #define SPIDELAY 0x48 108 #define SPIDEF 0x4c 109 #define SPIFMT0 0x50 110 111 #define DMA_MIN_BYTES 16 112 113 /* SPI Controller driver's private data. */ 114 struct davinci_spi { 115 struct spi_bitbang bitbang; 116 struct clk *clk; 117 118 u8 version; 119 resource_size_t pbase; 120 void __iomem *base; 121 u32 irq; 122 struct completion done; 123 124 const void *tx; 125 void *rx; 126 int rcount; 127 int wcount; 128 129 struct dma_chan *dma_rx; 130 struct dma_chan *dma_tx; 131 132 struct davinci_spi_platform_data pdata; 133 134 void (*get_rx)(u32 rx_data, struct davinci_spi *); 135 u32 (*get_tx)(struct davinci_spi *); 136 137 u8 *bytes_per_word; 138 139 u8 prescaler_limit; 140 }; 141 142 static struct davinci_spi_config davinci_spi_default_cfg; 143 144 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) 145 { 146 if (dspi->rx) { 147 u8 *rx = dspi->rx; 148 *rx++ = (u8)data; 149 dspi->rx = rx; 150 } 151 } 152 153 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) 154 { 155 if (dspi->rx) { 156 u16 *rx = dspi->rx; 157 *rx++ = (u16)data; 158 dspi->rx = rx; 159 } 160 } 161 162 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) 163 { 164 u32 data = 0; 165 166 if (dspi->tx) { 167 const u8 *tx = dspi->tx; 168 169 data = *tx++; 170 dspi->tx = tx; 171 } 172 return data; 173 } 174 175 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) 176 { 177 u32 data = 0; 178 179 if (dspi->tx) { 180 const u16 *tx = dspi->tx; 181 182 data = *tx++; 183 dspi->tx = tx; 184 } 185 return data; 186 } 187 188 static inline void set_io_bits(void __iomem *addr, u32 bits) 189 { 190 u32 v = ioread32(addr); 191 192 v |= bits; 193 iowrite32(v, addr); 194 } 195 196 static inline void clear_io_bits(void __iomem *addr, u32 bits) 197 { 198 u32 v = ioread32(addr); 199 200 v &= ~bits; 201 iowrite32(v, addr); 202 } 203 204 /* 205 * Interface to control the chip select signal 206 */ 207 static void davinci_spi_chipselect(struct spi_device *spi, int value) 208 { 209 struct davinci_spi *dspi; 210 struct davinci_spi_config *spicfg = spi->controller_data; 211 u8 chip_sel = spi->chip_select; 212 u16 spidat1 = CS_DEFAULT; 213 214 dspi = spi_master_get_devdata(spi->master); 215 216 /* program delay transfers if tx_delay is non zero */ 217 if (spicfg && spicfg->wdelay) 218 spidat1 |= SPIDAT1_WDEL; 219 220 /* 221 * Board specific chip select logic decides the polarity and cs 222 * line for the controller 223 */ 224 if (spi->cs_gpiod) { 225 /* 226 * FIXME: is this code ever executed? This host does not 227 * set SPI_MASTER_GPIO_SS so this chipselect callback should 228 * not get called from the SPI core when we are using 229 * GPIOs for chip select. 230 */ 231 if (value == BITBANG_CS_ACTIVE) 232 gpiod_set_value(spi->cs_gpiod, 1); 233 else 234 gpiod_set_value(spi->cs_gpiod, 0); 235 } else { 236 if (value == BITBANG_CS_ACTIVE) { 237 if (!(spi->mode & SPI_CS_WORD)) 238 spidat1 |= SPIDAT1_CSHOLD_MASK; 239 spidat1 &= ~(0x1 << chip_sel); 240 } 241 } 242 243 iowrite16(spidat1, dspi->base + SPIDAT1 + 2); 244 } 245 246 /** 247 * davinci_spi_get_prescale - Calculates the correct prescale value 248 * @maxspeed_hz: the maximum rate the SPI clock can run at 249 * 250 * This function calculates the prescale value that generates a clock rate 251 * less than or equal to the specified maximum. 252 * 253 * Returns: calculated prescale value for easy programming into SPI registers 254 * or negative error number if valid prescalar cannot be updated. 255 */ 256 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, 257 u32 max_speed_hz) 258 { 259 int ret; 260 261 /* Subtract 1 to match what will be programmed into SPI register. */ 262 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1; 263 264 if (ret < dspi->prescaler_limit || ret > 255) 265 return -EINVAL; 266 267 return ret; 268 } 269 270 /** 271 * davinci_spi_setup_transfer - This functions will determine transfer method 272 * @spi: spi device on which data transfer to be done 273 * @t: spi transfer in which transfer info is filled 274 * 275 * This function determines data transfer method (8/16/32 bit transfer). 276 * It will also set the SPI Clock Control register according to 277 * SPI slave device freq. 278 */ 279 static int davinci_spi_setup_transfer(struct spi_device *spi, 280 struct spi_transfer *t) 281 { 282 283 struct davinci_spi *dspi; 284 struct davinci_spi_config *spicfg; 285 u8 bits_per_word = 0; 286 u32 hz = 0, spifmt = 0; 287 int prescale; 288 289 dspi = spi_master_get_devdata(spi->master); 290 spicfg = spi->controller_data; 291 if (!spicfg) 292 spicfg = &davinci_spi_default_cfg; 293 294 if (t) { 295 bits_per_word = t->bits_per_word; 296 hz = t->speed_hz; 297 } 298 299 /* if bits_per_word is not set then set it default */ 300 if (!bits_per_word) 301 bits_per_word = spi->bits_per_word; 302 303 /* 304 * Assign function pointer to appropriate transfer method 305 * 8bit, 16bit or 32bit transfer 306 */ 307 if (bits_per_word <= 8) { 308 dspi->get_rx = davinci_spi_rx_buf_u8; 309 dspi->get_tx = davinci_spi_tx_buf_u8; 310 dspi->bytes_per_word[spi->chip_select] = 1; 311 } else { 312 dspi->get_rx = davinci_spi_rx_buf_u16; 313 dspi->get_tx = davinci_spi_tx_buf_u16; 314 dspi->bytes_per_word[spi->chip_select] = 2; 315 } 316 317 if (!hz) 318 hz = spi->max_speed_hz; 319 320 /* Set up SPIFMTn register, unique to this chipselect. */ 321 322 prescale = davinci_spi_get_prescale(dspi, hz); 323 if (prescale < 0) 324 return prescale; 325 326 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); 327 328 if (spi->mode & SPI_LSB_FIRST) 329 spifmt |= SPIFMT_SHIFTDIR_MASK; 330 331 if (spi->mode & SPI_CPOL) 332 spifmt |= SPIFMT_POLARITY_MASK; 333 334 if (!(spi->mode & SPI_CPHA)) 335 spifmt |= SPIFMT_PHASE_MASK; 336 337 /* 338 * Assume wdelay is used only on SPI peripherals that has this field 339 * in SPIFMTn register and when it's configured from board file or DT. 340 */ 341 if (spicfg->wdelay) 342 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) 343 & SPIFMT_WDELAY_MASK); 344 345 /* 346 * Version 1 hardware supports two basic SPI modes: 347 * - Standard SPI mode uses 4 pins, with chipselect 348 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) 349 * (distinct from SPI_3WIRE, with just one data wire; 350 * or similar variants without MOSI or without MISO) 351 * 352 * Version 2 hardware supports an optional handshaking signal, 353 * so it can support two more modes: 354 * - 5 pin SPI variant is standard SPI plus SPI_READY 355 * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 356 */ 357 358 if (dspi->version == SPI_VERSION_2) { 359 360 u32 delay = 0; 361 362 if (spicfg->odd_parity) 363 spifmt |= SPIFMT_ODD_PARITY_MASK; 364 365 if (spicfg->parity_enable) 366 spifmt |= SPIFMT_PARITYENA_MASK; 367 368 if (spicfg->timer_disable) { 369 spifmt |= SPIFMT_DISTIMER_MASK; 370 } else { 371 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) 372 & SPIDELAY_C2TDELAY_MASK; 373 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) 374 & SPIDELAY_T2CDELAY_MASK; 375 } 376 377 if (spi->mode & SPI_READY) { 378 spifmt |= SPIFMT_WAITENA_MASK; 379 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) 380 & SPIDELAY_T2EDELAY_MASK; 381 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) 382 & SPIDELAY_C2EDELAY_MASK; 383 } 384 385 iowrite32(delay, dspi->base + SPIDELAY); 386 } 387 388 iowrite32(spifmt, dspi->base + SPIFMT0); 389 390 return 0; 391 } 392 393 static int davinci_spi_of_setup(struct spi_device *spi) 394 { 395 struct davinci_spi_config *spicfg = spi->controller_data; 396 struct device_node *np = spi->dev.of_node; 397 struct davinci_spi *dspi = spi_master_get_devdata(spi->master); 398 u32 prop; 399 400 if (spicfg == NULL && np) { 401 spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL); 402 if (!spicfg) 403 return -ENOMEM; 404 *spicfg = davinci_spi_default_cfg; 405 /* override with dt configured values */ 406 if (!of_property_read_u32(np, "ti,spi-wdelay", &prop)) 407 spicfg->wdelay = (u8)prop; 408 spi->controller_data = spicfg; 409 410 if (dspi->dma_rx && dspi->dma_tx) 411 spicfg->io_type = SPI_IO_TYPE_DMA; 412 } 413 414 return 0; 415 } 416 417 /** 418 * davinci_spi_setup - This functions will set default transfer method 419 * @spi: spi device on which data transfer to be done 420 * 421 * This functions sets the default transfer method. 422 */ 423 static int davinci_spi_setup(struct spi_device *spi) 424 { 425 struct davinci_spi *dspi; 426 struct device_node *np = spi->dev.of_node; 427 bool internal_cs = true; 428 429 dspi = spi_master_get_devdata(spi->master); 430 431 if (!(spi->mode & SPI_NO_CS)) { 432 if (np && spi->cs_gpiod) 433 internal_cs = false; 434 435 if (internal_cs) 436 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 437 } 438 439 if (spi->mode & SPI_READY) 440 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); 441 442 if (spi->mode & SPI_LOOP) 443 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 444 else 445 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 446 447 return davinci_spi_of_setup(spi); 448 } 449 450 static void davinci_spi_cleanup(struct spi_device *spi) 451 { 452 struct davinci_spi_config *spicfg = spi->controller_data; 453 454 spi->controller_data = NULL; 455 if (spi->dev.of_node) 456 kfree(spicfg); 457 } 458 459 static bool davinci_spi_can_dma(struct spi_master *master, 460 struct spi_device *spi, 461 struct spi_transfer *xfer) 462 { 463 struct davinci_spi_config *spicfg = spi->controller_data; 464 bool can_dma = false; 465 466 if (spicfg) 467 can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) && 468 (xfer->len >= DMA_MIN_BYTES) && 469 !is_vmalloc_addr(xfer->rx_buf) && 470 !is_vmalloc_addr(xfer->tx_buf); 471 472 return can_dma; 473 } 474 475 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) 476 { 477 struct device *sdev = dspi->bitbang.master->dev.parent; 478 479 if (int_status & SPIFLG_TIMEOUT_MASK) { 480 dev_err(sdev, "SPI Time-out Error\n"); 481 return -ETIMEDOUT; 482 } 483 if (int_status & SPIFLG_DESYNC_MASK) { 484 dev_err(sdev, "SPI Desynchronization Error\n"); 485 return -EIO; 486 } 487 if (int_status & SPIFLG_BITERR_MASK) { 488 dev_err(sdev, "SPI Bit error\n"); 489 return -EIO; 490 } 491 492 if (dspi->version == SPI_VERSION_2) { 493 if (int_status & SPIFLG_DLEN_ERR_MASK) { 494 dev_err(sdev, "SPI Data Length Error\n"); 495 return -EIO; 496 } 497 if (int_status & SPIFLG_PARERR_MASK) { 498 dev_err(sdev, "SPI Parity Error\n"); 499 return -EIO; 500 } 501 if (int_status & SPIFLG_OVRRUN_MASK) { 502 dev_err(sdev, "SPI Data Overrun error\n"); 503 return -EIO; 504 } 505 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 506 dev_err(sdev, "SPI Buffer Init Active\n"); 507 return -EBUSY; 508 } 509 } 510 511 return 0; 512 } 513 514 /** 515 * davinci_spi_process_events - check for and handle any SPI controller events 516 * @dspi: the controller data 517 * 518 * This function will check the SPIFLG register and handle any events that are 519 * detected there 520 */ 521 static int davinci_spi_process_events(struct davinci_spi *dspi) 522 { 523 u32 buf, status, errors = 0, spidat1; 524 525 buf = ioread32(dspi->base + SPIBUF); 526 527 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { 528 dspi->get_rx(buf & 0xFFFF, dspi); 529 dspi->rcount--; 530 } 531 532 status = ioread32(dspi->base + SPIFLG); 533 534 if (unlikely(status & SPIFLG_ERROR_MASK)) { 535 errors = status & SPIFLG_ERROR_MASK; 536 goto out; 537 } 538 539 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { 540 spidat1 = ioread32(dspi->base + SPIDAT1); 541 dspi->wcount--; 542 spidat1 &= ~0xFFFF; 543 spidat1 |= 0xFFFF & dspi->get_tx(dspi); 544 iowrite32(spidat1, dspi->base + SPIDAT1); 545 } 546 547 out: 548 return errors; 549 } 550 551 static void davinci_spi_dma_rx_callback(void *data) 552 { 553 struct davinci_spi *dspi = (struct davinci_spi *)data; 554 555 dspi->rcount = 0; 556 557 if (!dspi->wcount && !dspi->rcount) 558 complete(&dspi->done); 559 } 560 561 static void davinci_spi_dma_tx_callback(void *data) 562 { 563 struct davinci_spi *dspi = (struct davinci_spi *)data; 564 565 dspi->wcount = 0; 566 567 if (!dspi->wcount && !dspi->rcount) 568 complete(&dspi->done); 569 } 570 571 /** 572 * davinci_spi_bufs - functions which will handle transfer data 573 * @spi: spi device on which data transfer to be done 574 * @t: spi transfer in which transfer info is filled 575 * 576 * This function will put data to be transferred into data register 577 * of SPI controller and then wait until the completion will be marked 578 * by the IRQ Handler. 579 */ 580 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 581 { 582 struct davinci_spi *dspi; 583 int data_type, ret = -ENOMEM; 584 u32 tx_data, spidat1; 585 u32 errors = 0; 586 struct davinci_spi_config *spicfg; 587 struct davinci_spi_platform_data *pdata; 588 unsigned uninitialized_var(rx_buf_count); 589 590 dspi = spi_master_get_devdata(spi->master); 591 pdata = &dspi->pdata; 592 spicfg = (struct davinci_spi_config *)spi->controller_data; 593 if (!spicfg) 594 spicfg = &davinci_spi_default_cfg; 595 596 /* convert len to words based on bits_per_word */ 597 data_type = dspi->bytes_per_word[spi->chip_select]; 598 599 dspi->tx = t->tx_buf; 600 dspi->rx = t->rx_buf; 601 dspi->wcount = t->len / data_type; 602 dspi->rcount = dspi->wcount; 603 604 spidat1 = ioread32(dspi->base + SPIDAT1); 605 606 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 607 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 608 609 reinit_completion(&dspi->done); 610 611 if (!davinci_spi_can_dma(spi->master, spi, t)) { 612 if (spicfg->io_type != SPI_IO_TYPE_POLL) 613 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 614 /* start the transfer */ 615 dspi->wcount--; 616 tx_data = dspi->get_tx(dspi); 617 spidat1 &= 0xFFFF0000; 618 spidat1 |= tx_data & 0xFFFF; 619 iowrite32(spidat1, dspi->base + SPIDAT1); 620 } else { 621 struct dma_slave_config dma_rx_conf = { 622 .direction = DMA_DEV_TO_MEM, 623 .src_addr = (unsigned long)dspi->pbase + SPIBUF, 624 .src_addr_width = data_type, 625 .src_maxburst = 1, 626 }; 627 struct dma_slave_config dma_tx_conf = { 628 .direction = DMA_MEM_TO_DEV, 629 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, 630 .dst_addr_width = data_type, 631 .dst_maxburst = 1, 632 }; 633 struct dma_async_tx_descriptor *rxdesc; 634 struct dma_async_tx_descriptor *txdesc; 635 636 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); 637 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); 638 639 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, 640 t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM, 641 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 642 if (!rxdesc) 643 goto err_desc; 644 645 if (!t->tx_buf) { 646 /* To avoid errors when doing rx-only transfers with 647 * many SG entries (> 20), use the rx buffer as the 648 * dummy tx buffer so that dma reloads are done at the 649 * same time for rx and tx. 650 */ 651 t->tx_sg.sgl = t->rx_sg.sgl; 652 t->tx_sg.nents = t->rx_sg.nents; 653 } 654 655 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, 656 t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV, 657 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 658 if (!txdesc) 659 goto err_desc; 660 661 rxdesc->callback = davinci_spi_dma_rx_callback; 662 rxdesc->callback_param = (void *)dspi; 663 txdesc->callback = davinci_spi_dma_tx_callback; 664 txdesc->callback_param = (void *)dspi; 665 666 if (pdata->cshold_bug) 667 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 668 669 dmaengine_submit(rxdesc); 670 dmaengine_submit(txdesc); 671 672 dma_async_issue_pending(dspi->dma_rx); 673 dma_async_issue_pending(dspi->dma_tx); 674 675 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 676 } 677 678 /* Wait for the transfer to complete */ 679 if (spicfg->io_type != SPI_IO_TYPE_POLL) { 680 if (wait_for_completion_timeout(&dspi->done, HZ) == 0) 681 errors = SPIFLG_TIMEOUT_MASK; 682 } else { 683 while (dspi->rcount > 0 || dspi->wcount > 0) { 684 errors = davinci_spi_process_events(dspi); 685 if (errors) 686 break; 687 cpu_relax(); 688 } 689 } 690 691 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 692 if (davinci_spi_can_dma(spi->master, spi, t)) 693 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 694 695 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 696 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 697 698 /* 699 * Check for bit error, desync error,parity error,timeout error and 700 * receive overflow errors 701 */ 702 if (errors) { 703 ret = davinci_spi_check_error(dspi, errors); 704 WARN(!ret, "%s: error reported but no error found!\n", 705 dev_name(&spi->dev)); 706 return ret; 707 } 708 709 if (dspi->rcount != 0 || dspi->wcount != 0) { 710 dev_err(&spi->dev, "SPI data transfer error\n"); 711 return -EIO; 712 } 713 714 return t->len; 715 716 err_desc: 717 return ret; 718 } 719 720 /** 721 * dummy_thread_fn - dummy thread function 722 * @irq: IRQ number for this SPI Master 723 * @context_data: structure for SPI Master controller davinci_spi 724 * 725 * This is to satisfy the request_threaded_irq() API so that the irq 726 * handler is called in interrupt context. 727 */ 728 static irqreturn_t dummy_thread_fn(s32 irq, void *data) 729 { 730 return IRQ_HANDLED; 731 } 732 733 /** 734 * davinci_spi_irq - Interrupt handler for SPI Master Controller 735 * @irq: IRQ number for this SPI Master 736 * @context_data: structure for SPI Master controller davinci_spi 737 * 738 * ISR will determine that interrupt arrives either for READ or WRITE command. 739 * According to command it will do the appropriate action. It will check 740 * transfer length and if it is not zero then dispatch transfer command again. 741 * If transfer length is zero then it will indicate the COMPLETION so that 742 * davinci_spi_bufs function can go ahead. 743 */ 744 static irqreturn_t davinci_spi_irq(s32 irq, void *data) 745 { 746 struct davinci_spi *dspi = data; 747 int status; 748 749 status = davinci_spi_process_events(dspi); 750 if (unlikely(status != 0)) 751 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 752 753 if ((!dspi->rcount && !dspi->wcount) || status) 754 complete(&dspi->done); 755 756 return IRQ_HANDLED; 757 } 758 759 static int davinci_spi_request_dma(struct davinci_spi *dspi) 760 { 761 struct device *sdev = dspi->bitbang.master->dev.parent; 762 763 dspi->dma_rx = dma_request_chan(sdev, "rx"); 764 if (IS_ERR(dspi->dma_rx)) 765 return PTR_ERR(dspi->dma_rx); 766 767 dspi->dma_tx = dma_request_chan(sdev, "tx"); 768 if (IS_ERR(dspi->dma_tx)) { 769 dma_release_channel(dspi->dma_rx); 770 return PTR_ERR(dspi->dma_tx); 771 } 772 773 return 0; 774 } 775 776 #if defined(CONFIG_OF) 777 778 /* OF SPI data structure */ 779 struct davinci_spi_of_data { 780 u8 version; 781 u8 prescaler_limit; 782 }; 783 784 static const struct davinci_spi_of_data dm6441_spi_data = { 785 .version = SPI_VERSION_1, 786 .prescaler_limit = 2, 787 }; 788 789 static const struct davinci_spi_of_data da830_spi_data = { 790 .version = SPI_VERSION_2, 791 .prescaler_limit = 2, 792 }; 793 794 static const struct davinci_spi_of_data keystone_spi_data = { 795 .version = SPI_VERSION_1, 796 .prescaler_limit = 0, 797 }; 798 799 static const struct of_device_id davinci_spi_of_match[] = { 800 { 801 .compatible = "ti,dm6441-spi", 802 .data = &dm6441_spi_data, 803 }, 804 { 805 .compatible = "ti,da830-spi", 806 .data = &da830_spi_data, 807 }, 808 { 809 .compatible = "ti,keystone-spi", 810 .data = &keystone_spi_data, 811 }, 812 { }, 813 }; 814 MODULE_DEVICE_TABLE(of, davinci_spi_of_match); 815 816 /** 817 * spi_davinci_get_pdata - Get platform data from DTS binding 818 * @pdev: ptr to platform data 819 * @dspi: ptr to driver data 820 * 821 * Parses and populates pdata in dspi from device tree bindings. 822 * 823 * NOTE: Not all platform data params are supported currently. 824 */ 825 static int spi_davinci_get_pdata(struct platform_device *pdev, 826 struct davinci_spi *dspi) 827 { 828 struct device_node *node = pdev->dev.of_node; 829 struct davinci_spi_of_data *spi_data; 830 struct davinci_spi_platform_data *pdata; 831 unsigned int num_cs, intr_line = 0; 832 const struct of_device_id *match; 833 834 pdata = &dspi->pdata; 835 836 match = of_match_device(davinci_spi_of_match, &pdev->dev); 837 if (!match) 838 return -ENODEV; 839 840 spi_data = (struct davinci_spi_of_data *)match->data; 841 842 pdata->version = spi_data->version; 843 pdata->prescaler_limit = spi_data->prescaler_limit; 844 /* 845 * default num_cs is 1 and all chipsel are internal to the chip 846 * indicated by chip_sel being NULL or cs_gpios being NULL or 847 * set to -ENOENT. num-cs includes internal as well as gpios. 848 * indicated by chip_sel being NULL. GPIO based CS is not 849 * supported yet in DT bindings. 850 */ 851 num_cs = 1; 852 of_property_read_u32(node, "num-cs", &num_cs); 853 pdata->num_chipselect = num_cs; 854 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line); 855 pdata->intr_line = intr_line; 856 return 0; 857 } 858 #else 859 static int spi_davinci_get_pdata(struct platform_device *pdev, 860 struct davinci_spi *dspi) 861 { 862 return -ENODEV; 863 } 864 #endif 865 866 /** 867 * davinci_spi_probe - probe function for SPI Master Controller 868 * @pdev: platform_device structure which contains plateform specific data 869 * 870 * According to Linux Device Model this function will be invoked by Linux 871 * with platform_device struct which contains the device specific info. 872 * This function will map the SPI controller's memory, register IRQ, 873 * Reset SPI controller and setting its registers to default value. 874 * It will invoke spi_bitbang_start to create work queue so that client driver 875 * can register transfer method to work queue. 876 */ 877 static int davinci_spi_probe(struct platform_device *pdev) 878 { 879 struct spi_master *master; 880 struct davinci_spi *dspi; 881 struct davinci_spi_platform_data *pdata; 882 struct resource *r; 883 int ret = 0; 884 u32 spipc0; 885 886 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); 887 if (master == NULL) { 888 ret = -ENOMEM; 889 goto err; 890 } 891 892 platform_set_drvdata(pdev, master); 893 894 dspi = spi_master_get_devdata(master); 895 896 if (dev_get_platdata(&pdev->dev)) { 897 pdata = dev_get_platdata(&pdev->dev); 898 dspi->pdata = *pdata; 899 } else { 900 /* update dspi pdata with that from the DT */ 901 ret = spi_davinci_get_pdata(pdev, dspi); 902 if (ret < 0) 903 goto free_master; 904 } 905 906 /* pdata in dspi is now updated and point pdata to that */ 907 pdata = &dspi->pdata; 908 909 dspi->bytes_per_word = devm_kcalloc(&pdev->dev, 910 pdata->num_chipselect, 911 sizeof(*dspi->bytes_per_word), 912 GFP_KERNEL); 913 if (dspi->bytes_per_word == NULL) { 914 ret = -ENOMEM; 915 goto free_master; 916 } 917 918 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 919 if (r == NULL) { 920 ret = -ENOENT; 921 goto free_master; 922 } 923 924 dspi->pbase = r->start; 925 926 dspi->base = devm_ioremap_resource(&pdev->dev, r); 927 if (IS_ERR(dspi->base)) { 928 ret = PTR_ERR(dspi->base); 929 goto free_master; 930 } 931 932 init_completion(&dspi->done); 933 934 ret = platform_get_irq(pdev, 0); 935 if (ret == 0) 936 ret = -EINVAL; 937 if (ret < 0) 938 goto free_master; 939 dspi->irq = ret; 940 941 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 942 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); 943 if (ret) 944 goto free_master; 945 946 dspi->bitbang.master = master; 947 948 dspi->clk = devm_clk_get(&pdev->dev, NULL); 949 if (IS_ERR(dspi->clk)) { 950 ret = -ENODEV; 951 goto free_master; 952 } 953 ret = clk_prepare_enable(dspi->clk); 954 if (ret) 955 goto free_master; 956 957 master->use_gpio_descriptors = true; 958 master->dev.of_node = pdev->dev.of_node; 959 master->bus_num = pdev->id; 960 master->num_chipselect = pdata->num_chipselect; 961 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); 962 master->flags = SPI_MASTER_MUST_RX; 963 master->setup = davinci_spi_setup; 964 master->cleanup = davinci_spi_cleanup; 965 master->can_dma = davinci_spi_can_dma; 966 967 dspi->bitbang.chipselect = davinci_spi_chipselect; 968 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; 969 dspi->prescaler_limit = pdata->prescaler_limit; 970 dspi->version = pdata->version; 971 972 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD; 973 if (dspi->version == SPI_VERSION_2) 974 dspi->bitbang.flags |= SPI_READY; 975 976 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 977 978 ret = davinci_spi_request_dma(dspi); 979 if (ret == -EPROBE_DEFER) { 980 goto free_clk; 981 } else if (ret) { 982 dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret); 983 dspi->dma_rx = NULL; 984 dspi->dma_tx = NULL; 985 } 986 987 dspi->get_rx = davinci_spi_rx_buf_u8; 988 dspi->get_tx = davinci_spi_tx_buf_u8; 989 990 /* Reset In/OUT SPI module */ 991 iowrite32(0, dspi->base + SPIGCR0); 992 udelay(100); 993 iowrite32(1, dspi->base + SPIGCR0); 994 995 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ 996 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; 997 iowrite32(spipc0, dspi->base + SPIPC0); 998 999 if (pdata->intr_line) 1000 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); 1001 else 1002 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); 1003 1004 iowrite32(CS_DEFAULT, dspi->base + SPIDEF); 1005 1006 /* master mode default */ 1007 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); 1008 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1009 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 1010 1011 ret = spi_bitbang_start(&dspi->bitbang); 1012 if (ret) 1013 goto free_dma; 1014 1015 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); 1016 1017 return ret; 1018 1019 free_dma: 1020 if (dspi->dma_rx) { 1021 dma_release_channel(dspi->dma_rx); 1022 dma_release_channel(dspi->dma_tx); 1023 } 1024 free_clk: 1025 clk_disable_unprepare(dspi->clk); 1026 free_master: 1027 spi_master_put(master); 1028 err: 1029 return ret; 1030 } 1031 1032 /** 1033 * davinci_spi_remove - remove function for SPI Master Controller 1034 * @pdev: platform_device structure which contains plateform specific data 1035 * 1036 * This function will do the reverse action of davinci_spi_probe function 1037 * It will free the IRQ and SPI controller's memory region. 1038 * It will also call spi_bitbang_stop to destroy the work queue which was 1039 * created by spi_bitbang_start. 1040 */ 1041 static int davinci_spi_remove(struct platform_device *pdev) 1042 { 1043 struct davinci_spi *dspi; 1044 struct spi_master *master; 1045 1046 master = platform_get_drvdata(pdev); 1047 dspi = spi_master_get_devdata(master); 1048 1049 spi_bitbang_stop(&dspi->bitbang); 1050 1051 clk_disable_unprepare(dspi->clk); 1052 spi_master_put(master); 1053 1054 if (dspi->dma_rx) { 1055 dma_release_channel(dspi->dma_rx); 1056 dma_release_channel(dspi->dma_tx); 1057 } 1058 1059 return 0; 1060 } 1061 1062 static struct platform_driver davinci_spi_driver = { 1063 .driver = { 1064 .name = "spi_davinci", 1065 .of_match_table = of_match_ptr(davinci_spi_of_match), 1066 }, 1067 .probe = davinci_spi_probe, 1068 .remove = davinci_spi_remove, 1069 }; 1070 module_platform_driver(davinci_spi_driver); 1071 1072 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); 1073 MODULE_LICENSE("GPL"); 1074