1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Texas Instruments. 4 * Copyright (C) 2010 EF Johnson Technologies 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/gpio/consumer.h> 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/platform_device.h> 13 #include <linux/err.h> 14 #include <linux/clk.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/of.h> 18 #include <linux/of_device.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi_bitbang.h> 21 #include <linux/slab.h> 22 23 #include <linux/platform_data/spi-davinci.h> 24 25 #define CS_DEFAULT 0xFF 26 27 #define SPIFMT_PHASE_MASK BIT(16) 28 #define SPIFMT_POLARITY_MASK BIT(17) 29 #define SPIFMT_DISTIMER_MASK BIT(18) 30 #define SPIFMT_SHIFTDIR_MASK BIT(20) 31 #define SPIFMT_WAITENA_MASK BIT(21) 32 #define SPIFMT_PARITYENA_MASK BIT(22) 33 #define SPIFMT_ODD_PARITY_MASK BIT(23) 34 #define SPIFMT_WDELAY_MASK 0x3f000000u 35 #define SPIFMT_WDELAY_SHIFT 24 36 #define SPIFMT_PRESCALE_SHIFT 8 37 38 /* SPIPC0 */ 39 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 40 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 41 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 42 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 43 44 #define SPIINT_MASKALL 0x0101035F 45 #define SPIINT_MASKINT 0x0000015F 46 #define SPI_INTLVL_1 0x000001FF 47 #define SPI_INTLVL_0 0x00000000 48 49 /* SPIDAT1 (upper 16 bit defines) */ 50 #define SPIDAT1_CSHOLD_MASK BIT(12) 51 #define SPIDAT1_WDEL BIT(10) 52 53 /* SPIGCR1 */ 54 #define SPIGCR1_CLKMOD_MASK BIT(1) 55 #define SPIGCR1_MASTER_MASK BIT(0) 56 #define SPIGCR1_POWERDOWN_MASK BIT(8) 57 #define SPIGCR1_LOOPBACK_MASK BIT(16) 58 #define SPIGCR1_SPIENA_MASK BIT(24) 59 60 /* SPIBUF */ 61 #define SPIBUF_TXFULL_MASK BIT(29) 62 #define SPIBUF_RXEMPTY_MASK BIT(31) 63 64 /* SPIDELAY */ 65 #define SPIDELAY_C2TDELAY_SHIFT 24 66 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) 67 #define SPIDELAY_T2CDELAY_SHIFT 16 68 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) 69 #define SPIDELAY_T2EDELAY_SHIFT 8 70 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) 71 #define SPIDELAY_C2EDELAY_SHIFT 0 72 #define SPIDELAY_C2EDELAY_MASK 0xFF 73 74 /* Error Masks */ 75 #define SPIFLG_DLEN_ERR_MASK BIT(0) 76 #define SPIFLG_TIMEOUT_MASK BIT(1) 77 #define SPIFLG_PARERR_MASK BIT(2) 78 #define SPIFLG_DESYNC_MASK BIT(3) 79 #define SPIFLG_BITERR_MASK BIT(4) 80 #define SPIFLG_OVRRUN_MASK BIT(6) 81 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 82 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ 83 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 84 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 85 | SPIFLG_OVRRUN_MASK) 86 87 #define SPIINT_DMA_REQ_EN BIT(16) 88 89 /* SPI Controller registers */ 90 #define SPIGCR0 0x00 91 #define SPIGCR1 0x04 92 #define SPIINT 0x08 93 #define SPILVL 0x0c 94 #define SPIFLG 0x10 95 #define SPIPC0 0x14 96 #define SPIDAT1 0x3c 97 #define SPIBUF 0x40 98 #define SPIDELAY 0x48 99 #define SPIDEF 0x4c 100 #define SPIFMT0 0x50 101 102 #define DMA_MIN_BYTES 16 103 104 /* SPI Controller driver's private data. */ 105 struct davinci_spi { 106 struct spi_bitbang bitbang; 107 struct clk *clk; 108 109 u8 version; 110 resource_size_t pbase; 111 void __iomem *base; 112 u32 irq; 113 struct completion done; 114 115 const void *tx; 116 void *rx; 117 int rcount; 118 int wcount; 119 120 struct dma_chan *dma_rx; 121 struct dma_chan *dma_tx; 122 123 struct davinci_spi_platform_data pdata; 124 125 void (*get_rx)(u32 rx_data, struct davinci_spi *); 126 u32 (*get_tx)(struct davinci_spi *); 127 128 u8 *bytes_per_word; 129 130 u8 prescaler_limit; 131 }; 132 133 static struct davinci_spi_config davinci_spi_default_cfg; 134 135 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) 136 { 137 if (dspi->rx) { 138 u8 *rx = dspi->rx; 139 *rx++ = (u8)data; 140 dspi->rx = rx; 141 } 142 } 143 144 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) 145 { 146 if (dspi->rx) { 147 u16 *rx = dspi->rx; 148 *rx++ = (u16)data; 149 dspi->rx = rx; 150 } 151 } 152 153 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) 154 { 155 u32 data = 0; 156 157 if (dspi->tx) { 158 const u8 *tx = dspi->tx; 159 160 data = *tx++; 161 dspi->tx = tx; 162 } 163 return data; 164 } 165 166 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) 167 { 168 u32 data = 0; 169 170 if (dspi->tx) { 171 const u16 *tx = dspi->tx; 172 173 data = *tx++; 174 dspi->tx = tx; 175 } 176 return data; 177 } 178 179 static inline void set_io_bits(void __iomem *addr, u32 bits) 180 { 181 u32 v = ioread32(addr); 182 183 v |= bits; 184 iowrite32(v, addr); 185 } 186 187 static inline void clear_io_bits(void __iomem *addr, u32 bits) 188 { 189 u32 v = ioread32(addr); 190 191 v &= ~bits; 192 iowrite32(v, addr); 193 } 194 195 /* 196 * Interface to control the chip select signal 197 */ 198 static void davinci_spi_chipselect(struct spi_device *spi, int value) 199 { 200 struct davinci_spi *dspi; 201 struct davinci_spi_config *spicfg = spi->controller_data; 202 u8 chip_sel = spi->chip_select; 203 u16 spidat1 = CS_DEFAULT; 204 205 dspi = spi_master_get_devdata(spi->master); 206 207 /* program delay transfers if tx_delay is non zero */ 208 if (spicfg && spicfg->wdelay) 209 spidat1 |= SPIDAT1_WDEL; 210 211 /* 212 * Board specific chip select logic decides the polarity and cs 213 * line for the controller 214 */ 215 if (spi->cs_gpiod) { 216 if (value == BITBANG_CS_ACTIVE) 217 gpiod_set_value(spi->cs_gpiod, 1); 218 else 219 gpiod_set_value(spi->cs_gpiod, 0); 220 } else { 221 if (value == BITBANG_CS_ACTIVE) { 222 if (!(spi->mode & SPI_CS_WORD)) 223 spidat1 |= SPIDAT1_CSHOLD_MASK; 224 spidat1 &= ~(0x1 << chip_sel); 225 } 226 } 227 228 iowrite16(spidat1, dspi->base + SPIDAT1 + 2); 229 } 230 231 /** 232 * davinci_spi_get_prescale - Calculates the correct prescale value 233 * @dspi: the controller data 234 * @max_speed_hz: the maximum rate the SPI clock can run at 235 * 236 * This function calculates the prescale value that generates a clock rate 237 * less than or equal to the specified maximum. 238 * 239 * Returns: calculated prescale value for easy programming into SPI registers 240 * or negative error number if valid prescalar cannot be updated. 241 */ 242 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, 243 u32 max_speed_hz) 244 { 245 int ret; 246 247 /* Subtract 1 to match what will be programmed into SPI register. */ 248 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1; 249 250 if (ret < dspi->prescaler_limit || ret > 255) 251 return -EINVAL; 252 253 return ret; 254 } 255 256 /** 257 * davinci_spi_setup_transfer - This functions will determine transfer method 258 * @spi: spi device on which data transfer to be done 259 * @t: spi transfer in which transfer info is filled 260 * 261 * This function determines data transfer method (8/16/32 bit transfer). 262 * It will also set the SPI Clock Control register according to 263 * SPI slave device freq. 264 */ 265 static int davinci_spi_setup_transfer(struct spi_device *spi, 266 struct spi_transfer *t) 267 { 268 269 struct davinci_spi *dspi; 270 struct davinci_spi_config *spicfg; 271 u8 bits_per_word = 0; 272 u32 hz = 0, spifmt = 0; 273 int prescale; 274 275 dspi = spi_master_get_devdata(spi->master); 276 spicfg = spi->controller_data; 277 if (!spicfg) 278 spicfg = &davinci_spi_default_cfg; 279 280 if (t) { 281 bits_per_word = t->bits_per_word; 282 hz = t->speed_hz; 283 } 284 285 /* if bits_per_word is not set then set it default */ 286 if (!bits_per_word) 287 bits_per_word = spi->bits_per_word; 288 289 /* 290 * Assign function pointer to appropriate transfer method 291 * 8bit, 16bit or 32bit transfer 292 */ 293 if (bits_per_word <= 8) { 294 dspi->get_rx = davinci_spi_rx_buf_u8; 295 dspi->get_tx = davinci_spi_tx_buf_u8; 296 dspi->bytes_per_word[spi->chip_select] = 1; 297 } else { 298 dspi->get_rx = davinci_spi_rx_buf_u16; 299 dspi->get_tx = davinci_spi_tx_buf_u16; 300 dspi->bytes_per_word[spi->chip_select] = 2; 301 } 302 303 if (!hz) 304 hz = spi->max_speed_hz; 305 306 /* Set up SPIFMTn register, unique to this chipselect. */ 307 308 prescale = davinci_spi_get_prescale(dspi, hz); 309 if (prescale < 0) 310 return prescale; 311 312 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); 313 314 if (spi->mode & SPI_LSB_FIRST) 315 spifmt |= SPIFMT_SHIFTDIR_MASK; 316 317 if (spi->mode & SPI_CPOL) 318 spifmt |= SPIFMT_POLARITY_MASK; 319 320 if (!(spi->mode & SPI_CPHA)) 321 spifmt |= SPIFMT_PHASE_MASK; 322 323 /* 324 * Assume wdelay is used only on SPI peripherals that has this field 325 * in SPIFMTn register and when it's configured from board file or DT. 326 */ 327 if (spicfg->wdelay) 328 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) 329 & SPIFMT_WDELAY_MASK); 330 331 /* 332 * Version 1 hardware supports two basic SPI modes: 333 * - Standard SPI mode uses 4 pins, with chipselect 334 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) 335 * (distinct from SPI_3WIRE, with just one data wire; 336 * or similar variants without MOSI or without MISO) 337 * 338 * Version 2 hardware supports an optional handshaking signal, 339 * so it can support two more modes: 340 * - 5 pin SPI variant is standard SPI plus SPI_READY 341 * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 342 */ 343 344 if (dspi->version == SPI_VERSION_2) { 345 346 u32 delay = 0; 347 348 if (spicfg->odd_parity) 349 spifmt |= SPIFMT_ODD_PARITY_MASK; 350 351 if (spicfg->parity_enable) 352 spifmt |= SPIFMT_PARITYENA_MASK; 353 354 if (spicfg->timer_disable) { 355 spifmt |= SPIFMT_DISTIMER_MASK; 356 } else { 357 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) 358 & SPIDELAY_C2TDELAY_MASK; 359 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) 360 & SPIDELAY_T2CDELAY_MASK; 361 } 362 363 if (spi->mode & SPI_READY) { 364 spifmt |= SPIFMT_WAITENA_MASK; 365 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) 366 & SPIDELAY_T2EDELAY_MASK; 367 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) 368 & SPIDELAY_C2EDELAY_MASK; 369 } 370 371 iowrite32(delay, dspi->base + SPIDELAY); 372 } 373 374 iowrite32(spifmt, dspi->base + SPIFMT0); 375 376 return 0; 377 } 378 379 static int davinci_spi_of_setup(struct spi_device *spi) 380 { 381 struct davinci_spi_config *spicfg = spi->controller_data; 382 struct device_node *np = spi->dev.of_node; 383 struct davinci_spi *dspi = spi_master_get_devdata(spi->master); 384 u32 prop; 385 386 if (spicfg == NULL && np) { 387 spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL); 388 if (!spicfg) 389 return -ENOMEM; 390 *spicfg = davinci_spi_default_cfg; 391 /* override with dt configured values */ 392 if (!of_property_read_u32(np, "ti,spi-wdelay", &prop)) 393 spicfg->wdelay = (u8)prop; 394 spi->controller_data = spicfg; 395 396 if (dspi->dma_rx && dspi->dma_tx) 397 spicfg->io_type = SPI_IO_TYPE_DMA; 398 } 399 400 return 0; 401 } 402 403 /** 404 * davinci_spi_setup - This functions will set default transfer method 405 * @spi: spi device on which data transfer to be done 406 * 407 * This functions sets the default transfer method. 408 */ 409 static int davinci_spi_setup(struct spi_device *spi) 410 { 411 struct davinci_spi *dspi; 412 struct device_node *np = spi->dev.of_node; 413 bool internal_cs = true; 414 415 dspi = spi_master_get_devdata(spi->master); 416 417 if (!(spi->mode & SPI_NO_CS)) { 418 if (np && spi->cs_gpiod) 419 internal_cs = false; 420 421 if (internal_cs) 422 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 423 } 424 425 if (spi->mode & SPI_READY) 426 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); 427 428 if (spi->mode & SPI_LOOP) 429 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 430 else 431 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 432 433 return davinci_spi_of_setup(spi); 434 } 435 436 static void davinci_spi_cleanup(struct spi_device *spi) 437 { 438 struct davinci_spi_config *spicfg = spi->controller_data; 439 440 spi->controller_data = NULL; 441 if (spi->dev.of_node) 442 kfree(spicfg); 443 } 444 445 static bool davinci_spi_can_dma(struct spi_master *master, 446 struct spi_device *spi, 447 struct spi_transfer *xfer) 448 { 449 struct davinci_spi_config *spicfg = spi->controller_data; 450 bool can_dma = false; 451 452 if (spicfg) 453 can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) && 454 (xfer->len >= DMA_MIN_BYTES) && 455 !is_vmalloc_addr(xfer->rx_buf) && 456 !is_vmalloc_addr(xfer->tx_buf); 457 458 return can_dma; 459 } 460 461 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) 462 { 463 struct device *sdev = dspi->bitbang.master->dev.parent; 464 465 if (int_status & SPIFLG_TIMEOUT_MASK) { 466 dev_err(sdev, "SPI Time-out Error\n"); 467 return -ETIMEDOUT; 468 } 469 if (int_status & SPIFLG_DESYNC_MASK) { 470 dev_err(sdev, "SPI Desynchronization Error\n"); 471 return -EIO; 472 } 473 if (int_status & SPIFLG_BITERR_MASK) { 474 dev_err(sdev, "SPI Bit error\n"); 475 return -EIO; 476 } 477 478 if (dspi->version == SPI_VERSION_2) { 479 if (int_status & SPIFLG_DLEN_ERR_MASK) { 480 dev_err(sdev, "SPI Data Length Error\n"); 481 return -EIO; 482 } 483 if (int_status & SPIFLG_PARERR_MASK) { 484 dev_err(sdev, "SPI Parity Error\n"); 485 return -EIO; 486 } 487 if (int_status & SPIFLG_OVRRUN_MASK) { 488 dev_err(sdev, "SPI Data Overrun error\n"); 489 return -EIO; 490 } 491 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 492 dev_err(sdev, "SPI Buffer Init Active\n"); 493 return -EBUSY; 494 } 495 } 496 497 return 0; 498 } 499 500 /** 501 * davinci_spi_process_events - check for and handle any SPI controller events 502 * @dspi: the controller data 503 * 504 * This function will check the SPIFLG register and handle any events that are 505 * detected there 506 */ 507 static int davinci_spi_process_events(struct davinci_spi *dspi) 508 { 509 u32 buf, status, errors = 0, spidat1; 510 511 buf = ioread32(dspi->base + SPIBUF); 512 513 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { 514 dspi->get_rx(buf & 0xFFFF, dspi); 515 dspi->rcount--; 516 } 517 518 status = ioread32(dspi->base + SPIFLG); 519 520 if (unlikely(status & SPIFLG_ERROR_MASK)) { 521 errors = status & SPIFLG_ERROR_MASK; 522 goto out; 523 } 524 525 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { 526 spidat1 = ioread32(dspi->base + SPIDAT1); 527 dspi->wcount--; 528 spidat1 &= ~0xFFFF; 529 spidat1 |= 0xFFFF & dspi->get_tx(dspi); 530 iowrite32(spidat1, dspi->base + SPIDAT1); 531 } 532 533 out: 534 return errors; 535 } 536 537 static void davinci_spi_dma_rx_callback(void *data) 538 { 539 struct davinci_spi *dspi = (struct davinci_spi *)data; 540 541 dspi->rcount = 0; 542 543 if (!dspi->wcount && !dspi->rcount) 544 complete(&dspi->done); 545 } 546 547 static void davinci_spi_dma_tx_callback(void *data) 548 { 549 struct davinci_spi *dspi = (struct davinci_spi *)data; 550 551 dspi->wcount = 0; 552 553 if (!dspi->wcount && !dspi->rcount) 554 complete(&dspi->done); 555 } 556 557 /** 558 * davinci_spi_bufs - functions which will handle transfer data 559 * @spi: spi device on which data transfer to be done 560 * @t: spi transfer in which transfer info is filled 561 * 562 * This function will put data to be transferred into data register 563 * of SPI controller and then wait until the completion will be marked 564 * by the IRQ Handler. 565 */ 566 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 567 { 568 struct davinci_spi *dspi; 569 int data_type, ret = -ENOMEM; 570 u32 tx_data, spidat1; 571 u32 errors = 0; 572 struct davinci_spi_config *spicfg; 573 struct davinci_spi_platform_data *pdata; 574 575 dspi = spi_master_get_devdata(spi->master); 576 pdata = &dspi->pdata; 577 spicfg = (struct davinci_spi_config *)spi->controller_data; 578 if (!spicfg) 579 spicfg = &davinci_spi_default_cfg; 580 581 /* convert len to words based on bits_per_word */ 582 data_type = dspi->bytes_per_word[spi->chip_select]; 583 584 dspi->tx = t->tx_buf; 585 dspi->rx = t->rx_buf; 586 dspi->wcount = t->len / data_type; 587 dspi->rcount = dspi->wcount; 588 589 spidat1 = ioread32(dspi->base + SPIDAT1); 590 591 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 592 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 593 594 reinit_completion(&dspi->done); 595 596 if (!davinci_spi_can_dma(spi->master, spi, t)) { 597 if (spicfg->io_type != SPI_IO_TYPE_POLL) 598 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 599 /* start the transfer */ 600 dspi->wcount--; 601 tx_data = dspi->get_tx(dspi); 602 spidat1 &= 0xFFFF0000; 603 spidat1 |= tx_data & 0xFFFF; 604 iowrite32(spidat1, dspi->base + SPIDAT1); 605 } else { 606 struct dma_slave_config dma_rx_conf = { 607 .direction = DMA_DEV_TO_MEM, 608 .src_addr = (unsigned long)dspi->pbase + SPIBUF, 609 .src_addr_width = data_type, 610 .src_maxburst = 1, 611 }; 612 struct dma_slave_config dma_tx_conf = { 613 .direction = DMA_MEM_TO_DEV, 614 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, 615 .dst_addr_width = data_type, 616 .dst_maxburst = 1, 617 }; 618 struct dma_async_tx_descriptor *rxdesc; 619 struct dma_async_tx_descriptor *txdesc; 620 621 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); 622 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); 623 624 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, 625 t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM, 626 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 627 if (!rxdesc) 628 goto err_desc; 629 630 if (!t->tx_buf) { 631 /* To avoid errors when doing rx-only transfers with 632 * many SG entries (> 20), use the rx buffer as the 633 * dummy tx buffer so that dma reloads are done at the 634 * same time for rx and tx. 635 */ 636 t->tx_sg.sgl = t->rx_sg.sgl; 637 t->tx_sg.nents = t->rx_sg.nents; 638 } 639 640 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, 641 t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV, 642 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 643 if (!txdesc) 644 goto err_desc; 645 646 rxdesc->callback = davinci_spi_dma_rx_callback; 647 rxdesc->callback_param = (void *)dspi; 648 txdesc->callback = davinci_spi_dma_tx_callback; 649 txdesc->callback_param = (void *)dspi; 650 651 if (pdata->cshold_bug) 652 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 653 654 dmaengine_submit(rxdesc); 655 dmaengine_submit(txdesc); 656 657 dma_async_issue_pending(dspi->dma_rx); 658 dma_async_issue_pending(dspi->dma_tx); 659 660 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 661 } 662 663 /* Wait for the transfer to complete */ 664 if (spicfg->io_type != SPI_IO_TYPE_POLL) { 665 if (wait_for_completion_timeout(&dspi->done, HZ) == 0) 666 errors = SPIFLG_TIMEOUT_MASK; 667 } else { 668 while (dspi->rcount > 0 || dspi->wcount > 0) { 669 errors = davinci_spi_process_events(dspi); 670 if (errors) 671 break; 672 cpu_relax(); 673 } 674 } 675 676 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 677 if (davinci_spi_can_dma(spi->master, spi, t)) 678 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 679 680 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 681 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 682 683 /* 684 * Check for bit error, desync error,parity error,timeout error and 685 * receive overflow errors 686 */ 687 if (errors) { 688 ret = davinci_spi_check_error(dspi, errors); 689 WARN(!ret, "%s: error reported but no error found!\n", 690 dev_name(&spi->dev)); 691 return ret; 692 } 693 694 if (dspi->rcount != 0 || dspi->wcount != 0) { 695 dev_err(&spi->dev, "SPI data transfer error\n"); 696 return -EIO; 697 } 698 699 return t->len; 700 701 err_desc: 702 return ret; 703 } 704 705 /** 706 * dummy_thread_fn - dummy thread function 707 * @irq: IRQ number for this SPI Master 708 * @data: structure for SPI Master controller davinci_spi 709 * 710 * This is to satisfy the request_threaded_irq() API so that the irq 711 * handler is called in interrupt context. 712 */ 713 static irqreturn_t dummy_thread_fn(s32 irq, void *data) 714 { 715 return IRQ_HANDLED; 716 } 717 718 /** 719 * davinci_spi_irq - Interrupt handler for SPI Master Controller 720 * @irq: IRQ number for this SPI Master 721 * @data: structure for SPI Master controller davinci_spi 722 * 723 * ISR will determine that interrupt arrives either for READ or WRITE command. 724 * According to command it will do the appropriate action. It will check 725 * transfer length and if it is not zero then dispatch transfer command again. 726 * If transfer length is zero then it will indicate the COMPLETION so that 727 * davinci_spi_bufs function can go ahead. 728 */ 729 static irqreturn_t davinci_spi_irq(s32 irq, void *data) 730 { 731 struct davinci_spi *dspi = data; 732 int status; 733 734 status = davinci_spi_process_events(dspi); 735 if (unlikely(status != 0)) 736 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 737 738 if ((!dspi->rcount && !dspi->wcount) || status) 739 complete(&dspi->done); 740 741 return IRQ_HANDLED; 742 } 743 744 static int davinci_spi_request_dma(struct davinci_spi *dspi) 745 { 746 struct device *sdev = dspi->bitbang.master->dev.parent; 747 748 dspi->dma_rx = dma_request_chan(sdev, "rx"); 749 if (IS_ERR(dspi->dma_rx)) 750 return PTR_ERR(dspi->dma_rx); 751 752 dspi->dma_tx = dma_request_chan(sdev, "tx"); 753 if (IS_ERR(dspi->dma_tx)) { 754 dma_release_channel(dspi->dma_rx); 755 return PTR_ERR(dspi->dma_tx); 756 } 757 758 return 0; 759 } 760 761 #if defined(CONFIG_OF) 762 763 /* OF SPI data structure */ 764 struct davinci_spi_of_data { 765 u8 version; 766 u8 prescaler_limit; 767 }; 768 769 static const struct davinci_spi_of_data dm6441_spi_data = { 770 .version = SPI_VERSION_1, 771 .prescaler_limit = 2, 772 }; 773 774 static const struct davinci_spi_of_data da830_spi_data = { 775 .version = SPI_VERSION_2, 776 .prescaler_limit = 2, 777 }; 778 779 static const struct davinci_spi_of_data keystone_spi_data = { 780 .version = SPI_VERSION_1, 781 .prescaler_limit = 0, 782 }; 783 784 static const struct of_device_id davinci_spi_of_match[] = { 785 { 786 .compatible = "ti,dm6441-spi", 787 .data = &dm6441_spi_data, 788 }, 789 { 790 .compatible = "ti,da830-spi", 791 .data = &da830_spi_data, 792 }, 793 { 794 .compatible = "ti,keystone-spi", 795 .data = &keystone_spi_data, 796 }, 797 { }, 798 }; 799 MODULE_DEVICE_TABLE(of, davinci_spi_of_match); 800 801 /** 802 * spi_davinci_get_pdata - Get platform data from DTS binding 803 * @pdev: ptr to platform data 804 * @dspi: ptr to driver data 805 * 806 * Parses and populates pdata in dspi from device tree bindings. 807 * 808 * NOTE: Not all platform data params are supported currently. 809 */ 810 static int spi_davinci_get_pdata(struct platform_device *pdev, 811 struct davinci_spi *dspi) 812 { 813 struct device_node *node = pdev->dev.of_node; 814 const struct davinci_spi_of_data *spi_data; 815 struct davinci_spi_platform_data *pdata; 816 unsigned int num_cs, intr_line = 0; 817 818 pdata = &dspi->pdata; 819 820 spi_data = device_get_match_data(&pdev->dev); 821 822 pdata->version = spi_data->version; 823 pdata->prescaler_limit = spi_data->prescaler_limit; 824 /* 825 * default num_cs is 1 and all chipsel are internal to the chip 826 * indicated by chip_sel being NULL or cs_gpios being NULL or 827 * set to -ENOENT. num-cs includes internal as well as gpios. 828 * indicated by chip_sel being NULL. GPIO based CS is not 829 * supported yet in DT bindings. 830 */ 831 num_cs = 1; 832 of_property_read_u32(node, "num-cs", &num_cs); 833 pdata->num_chipselect = num_cs; 834 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line); 835 pdata->intr_line = intr_line; 836 return 0; 837 } 838 #else 839 static int spi_davinci_get_pdata(struct platform_device *pdev, 840 struct davinci_spi *dspi) 841 { 842 return -ENODEV; 843 } 844 #endif 845 846 /** 847 * davinci_spi_probe - probe function for SPI Master Controller 848 * @pdev: platform_device structure which contains plateform specific data 849 * 850 * According to Linux Device Model this function will be invoked by Linux 851 * with platform_device struct which contains the device specific info. 852 * This function will map the SPI controller's memory, register IRQ, 853 * Reset SPI controller and setting its registers to default value. 854 * It will invoke spi_bitbang_start to create work queue so that client driver 855 * can register transfer method to work queue. 856 */ 857 static int davinci_spi_probe(struct platform_device *pdev) 858 { 859 struct spi_master *master; 860 struct davinci_spi *dspi; 861 struct davinci_spi_platform_data *pdata; 862 struct resource *r; 863 int ret = 0; 864 u32 spipc0; 865 866 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); 867 if (master == NULL) { 868 ret = -ENOMEM; 869 goto err; 870 } 871 872 platform_set_drvdata(pdev, master); 873 874 dspi = spi_master_get_devdata(master); 875 876 if (dev_get_platdata(&pdev->dev)) { 877 pdata = dev_get_platdata(&pdev->dev); 878 dspi->pdata = *pdata; 879 } else { 880 /* update dspi pdata with that from the DT */ 881 ret = spi_davinci_get_pdata(pdev, dspi); 882 if (ret < 0) 883 goto free_master; 884 } 885 886 /* pdata in dspi is now updated and point pdata to that */ 887 pdata = &dspi->pdata; 888 889 dspi->bytes_per_word = devm_kcalloc(&pdev->dev, 890 pdata->num_chipselect, 891 sizeof(*dspi->bytes_per_word), 892 GFP_KERNEL); 893 if (dspi->bytes_per_word == NULL) { 894 ret = -ENOMEM; 895 goto free_master; 896 } 897 898 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 899 if (r == NULL) { 900 ret = -ENOENT; 901 goto free_master; 902 } 903 904 dspi->pbase = r->start; 905 906 dspi->base = devm_ioremap_resource(&pdev->dev, r); 907 if (IS_ERR(dspi->base)) { 908 ret = PTR_ERR(dspi->base); 909 goto free_master; 910 } 911 912 init_completion(&dspi->done); 913 914 ret = platform_get_irq(pdev, 0); 915 if (ret == 0) 916 ret = -EINVAL; 917 if (ret < 0) 918 goto free_master; 919 dspi->irq = ret; 920 921 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 922 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); 923 if (ret) 924 goto free_master; 925 926 dspi->bitbang.master = master; 927 928 dspi->clk = devm_clk_get(&pdev->dev, NULL); 929 if (IS_ERR(dspi->clk)) { 930 ret = -ENODEV; 931 goto free_master; 932 } 933 ret = clk_prepare_enable(dspi->clk); 934 if (ret) 935 goto free_master; 936 937 master->use_gpio_descriptors = true; 938 master->dev.of_node = pdev->dev.of_node; 939 master->bus_num = pdev->id; 940 master->num_chipselect = pdata->num_chipselect; 941 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); 942 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS; 943 master->setup = davinci_spi_setup; 944 master->cleanup = davinci_spi_cleanup; 945 master->can_dma = davinci_spi_can_dma; 946 947 dspi->bitbang.chipselect = davinci_spi_chipselect; 948 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; 949 dspi->prescaler_limit = pdata->prescaler_limit; 950 dspi->version = pdata->version; 951 952 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD; 953 if (dspi->version == SPI_VERSION_2) 954 dspi->bitbang.flags |= SPI_READY; 955 956 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 957 958 ret = davinci_spi_request_dma(dspi); 959 if (ret == -EPROBE_DEFER) { 960 goto free_clk; 961 } else if (ret) { 962 dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret); 963 dspi->dma_rx = NULL; 964 dspi->dma_tx = NULL; 965 } 966 967 dspi->get_rx = davinci_spi_rx_buf_u8; 968 dspi->get_tx = davinci_spi_tx_buf_u8; 969 970 /* Reset In/OUT SPI module */ 971 iowrite32(0, dspi->base + SPIGCR0); 972 udelay(100); 973 iowrite32(1, dspi->base + SPIGCR0); 974 975 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ 976 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; 977 iowrite32(spipc0, dspi->base + SPIPC0); 978 979 if (pdata->intr_line) 980 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); 981 else 982 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); 983 984 iowrite32(CS_DEFAULT, dspi->base + SPIDEF); 985 986 /* master mode default */ 987 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); 988 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 989 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 990 991 ret = spi_bitbang_start(&dspi->bitbang); 992 if (ret) 993 goto free_dma; 994 995 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); 996 997 return ret; 998 999 free_dma: 1000 if (dspi->dma_rx) { 1001 dma_release_channel(dspi->dma_rx); 1002 dma_release_channel(dspi->dma_tx); 1003 } 1004 free_clk: 1005 clk_disable_unprepare(dspi->clk); 1006 free_master: 1007 spi_master_put(master); 1008 err: 1009 return ret; 1010 } 1011 1012 /** 1013 * davinci_spi_remove - remove function for SPI Master Controller 1014 * @pdev: platform_device structure which contains plateform specific data 1015 * 1016 * This function will do the reverse action of davinci_spi_probe function 1017 * It will free the IRQ and SPI controller's memory region. 1018 * It will also call spi_bitbang_stop to destroy the work queue which was 1019 * created by spi_bitbang_start. 1020 */ 1021 static int davinci_spi_remove(struct platform_device *pdev) 1022 { 1023 struct davinci_spi *dspi; 1024 struct spi_master *master; 1025 1026 master = platform_get_drvdata(pdev); 1027 dspi = spi_master_get_devdata(master); 1028 1029 spi_bitbang_stop(&dspi->bitbang); 1030 1031 clk_disable_unprepare(dspi->clk); 1032 1033 if (dspi->dma_rx) { 1034 dma_release_channel(dspi->dma_rx); 1035 dma_release_channel(dspi->dma_tx); 1036 } 1037 1038 spi_master_put(master); 1039 return 0; 1040 } 1041 1042 static struct platform_driver davinci_spi_driver = { 1043 .driver = { 1044 .name = "spi_davinci", 1045 .of_match_table = of_match_ptr(davinci_spi_of_match), 1046 }, 1047 .probe = davinci_spi_probe, 1048 .remove = davinci_spi_remove, 1049 }; 1050 module_platform_driver(davinci_spi_driver); 1051 1052 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); 1053 MODULE_LICENSE("GPL"); 1054