1 /* 2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/device.h> 22 #include <linux/ioport.h> 23 #include <linux/errno.h> 24 #include <linux/interrupt.h> 25 #include <linux/platform_device.h> 26 #include <linux/spi/pxa2xx_spi.h> 27 #include <linux/spi/spi.h> 28 #include <linux/workqueue.h> 29 #include <linux/delay.h> 30 #include <linux/gpio.h> 31 #include <linux/slab.h> 32 #include <linux/clk.h> 33 #include <linux/pm_runtime.h> 34 35 #include <asm/io.h> 36 #include <asm/irq.h> 37 #include <asm/delay.h> 38 39 #include "spi-pxa2xx.h" 40 41 MODULE_AUTHOR("Stephen Street"); 42 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 43 MODULE_LICENSE("GPL"); 44 MODULE_ALIAS("platform:pxa2xx-spi"); 45 46 #define MAX_BUSES 3 47 48 #define TIMOUT_DFLT 1000 49 50 /* 51 * for testing SSCR1 changes that require SSP restart, basically 52 * everything except the service and interrupt enables, the pxa270 developer 53 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this 54 * list, but the PXA255 dev man says all bits without really meaning the 55 * service and interrupt enables 56 */ 57 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ 58 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ 59 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ 60 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ 61 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 62 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 63 64 static void cs_assert(struct driver_data *drv_data) 65 { 66 struct chip_data *chip = drv_data->cur_chip; 67 68 if (drv_data->ssp_type == CE4100_SSP) { 69 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); 70 return; 71 } 72 73 if (chip->cs_control) { 74 chip->cs_control(PXA2XX_CS_ASSERT); 75 return; 76 } 77 78 if (gpio_is_valid(chip->gpio_cs)) 79 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); 80 } 81 82 static void cs_deassert(struct driver_data *drv_data) 83 { 84 struct chip_data *chip = drv_data->cur_chip; 85 86 if (drv_data->ssp_type == CE4100_SSP) 87 return; 88 89 if (chip->cs_control) { 90 chip->cs_control(PXA2XX_CS_DEASSERT); 91 return; 92 } 93 94 if (gpio_is_valid(chip->gpio_cs)) 95 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 96 } 97 98 int pxa2xx_spi_flush(struct driver_data *drv_data) 99 { 100 unsigned long limit = loops_per_jiffy << 1; 101 102 void __iomem *reg = drv_data->ioaddr; 103 104 do { 105 while (read_SSSR(reg) & SSSR_RNE) { 106 read_SSDR(reg); 107 } 108 } while ((read_SSSR(reg) & SSSR_BSY) && --limit); 109 write_SSSR_CS(drv_data, SSSR_ROR); 110 111 return limit; 112 } 113 114 static int null_writer(struct driver_data *drv_data) 115 { 116 void __iomem *reg = drv_data->ioaddr; 117 u8 n_bytes = drv_data->n_bytes; 118 119 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 120 || (drv_data->tx == drv_data->tx_end)) 121 return 0; 122 123 write_SSDR(0, reg); 124 drv_data->tx += n_bytes; 125 126 return 1; 127 } 128 129 static int null_reader(struct driver_data *drv_data) 130 { 131 void __iomem *reg = drv_data->ioaddr; 132 u8 n_bytes = drv_data->n_bytes; 133 134 while ((read_SSSR(reg) & SSSR_RNE) 135 && (drv_data->rx < drv_data->rx_end)) { 136 read_SSDR(reg); 137 drv_data->rx += n_bytes; 138 } 139 140 return drv_data->rx == drv_data->rx_end; 141 } 142 143 static int u8_writer(struct driver_data *drv_data) 144 { 145 void __iomem *reg = drv_data->ioaddr; 146 147 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 148 || (drv_data->tx == drv_data->tx_end)) 149 return 0; 150 151 write_SSDR(*(u8 *)(drv_data->tx), reg); 152 ++drv_data->tx; 153 154 return 1; 155 } 156 157 static int u8_reader(struct driver_data *drv_data) 158 { 159 void __iomem *reg = drv_data->ioaddr; 160 161 while ((read_SSSR(reg) & SSSR_RNE) 162 && (drv_data->rx < drv_data->rx_end)) { 163 *(u8 *)(drv_data->rx) = read_SSDR(reg); 164 ++drv_data->rx; 165 } 166 167 return drv_data->rx == drv_data->rx_end; 168 } 169 170 static int u16_writer(struct driver_data *drv_data) 171 { 172 void __iomem *reg = drv_data->ioaddr; 173 174 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 175 || (drv_data->tx == drv_data->tx_end)) 176 return 0; 177 178 write_SSDR(*(u16 *)(drv_data->tx), reg); 179 drv_data->tx += 2; 180 181 return 1; 182 } 183 184 static int u16_reader(struct driver_data *drv_data) 185 { 186 void __iomem *reg = drv_data->ioaddr; 187 188 while ((read_SSSR(reg) & SSSR_RNE) 189 && (drv_data->rx < drv_data->rx_end)) { 190 *(u16 *)(drv_data->rx) = read_SSDR(reg); 191 drv_data->rx += 2; 192 } 193 194 return drv_data->rx == drv_data->rx_end; 195 } 196 197 static int u32_writer(struct driver_data *drv_data) 198 { 199 void __iomem *reg = drv_data->ioaddr; 200 201 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 202 || (drv_data->tx == drv_data->tx_end)) 203 return 0; 204 205 write_SSDR(*(u32 *)(drv_data->tx), reg); 206 drv_data->tx += 4; 207 208 return 1; 209 } 210 211 static int u32_reader(struct driver_data *drv_data) 212 { 213 void __iomem *reg = drv_data->ioaddr; 214 215 while ((read_SSSR(reg) & SSSR_RNE) 216 && (drv_data->rx < drv_data->rx_end)) { 217 *(u32 *)(drv_data->rx) = read_SSDR(reg); 218 drv_data->rx += 4; 219 } 220 221 return drv_data->rx == drv_data->rx_end; 222 } 223 224 void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) 225 { 226 struct spi_message *msg = drv_data->cur_msg; 227 struct spi_transfer *trans = drv_data->cur_transfer; 228 229 /* Move to next transfer */ 230 if (trans->transfer_list.next != &msg->transfers) { 231 drv_data->cur_transfer = 232 list_entry(trans->transfer_list.next, 233 struct spi_transfer, 234 transfer_list); 235 return RUNNING_STATE; 236 } else 237 return DONE_STATE; 238 } 239 240 /* caller already set message->status; dma and pio irqs are blocked */ 241 static void giveback(struct driver_data *drv_data) 242 { 243 struct spi_transfer* last_transfer; 244 struct spi_message *msg; 245 246 msg = drv_data->cur_msg; 247 drv_data->cur_msg = NULL; 248 drv_data->cur_transfer = NULL; 249 250 last_transfer = list_entry(msg->transfers.prev, 251 struct spi_transfer, 252 transfer_list); 253 254 /* Delay if requested before any change in chip select */ 255 if (last_transfer->delay_usecs) 256 udelay(last_transfer->delay_usecs); 257 258 /* Drop chip select UNLESS cs_change is true or we are returning 259 * a message with an error, or next message is for another chip 260 */ 261 if (!last_transfer->cs_change) 262 cs_deassert(drv_data); 263 else { 264 struct spi_message *next_msg; 265 266 /* Holding of cs was hinted, but we need to make sure 267 * the next message is for the same chip. Don't waste 268 * time with the following tests unless this was hinted. 269 * 270 * We cannot postpone this until pump_messages, because 271 * after calling msg->complete (below) the driver that 272 * sent the current message could be unloaded, which 273 * could invalidate the cs_control() callback... 274 */ 275 276 /* get a pointer to the next message, if any */ 277 next_msg = spi_get_next_queued_message(drv_data->master); 278 279 /* see if the next and current messages point 280 * to the same chip 281 */ 282 if (next_msg && next_msg->spi != msg->spi) 283 next_msg = NULL; 284 if (!next_msg || msg->state == ERROR_STATE) 285 cs_deassert(drv_data); 286 } 287 288 spi_finalize_current_message(drv_data->master); 289 drv_data->cur_chip = NULL; 290 } 291 292 static void reset_sccr1(struct driver_data *drv_data) 293 { 294 void __iomem *reg = drv_data->ioaddr; 295 struct chip_data *chip = drv_data->cur_chip; 296 u32 sccr1_reg; 297 298 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; 299 sccr1_reg &= ~SSCR1_RFT; 300 sccr1_reg |= chip->threshold; 301 write_SSCR1(sccr1_reg, reg); 302 } 303 304 static void int_error_stop(struct driver_data *drv_data, const char* msg) 305 { 306 void __iomem *reg = drv_data->ioaddr; 307 308 /* Stop and reset SSP */ 309 write_SSSR_CS(drv_data, drv_data->clear_sr); 310 reset_sccr1(drv_data); 311 if (!pxa25x_ssp_comp(drv_data)) 312 write_SSTO(0, reg); 313 pxa2xx_spi_flush(drv_data); 314 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 315 316 dev_err(&drv_data->pdev->dev, "%s\n", msg); 317 318 drv_data->cur_msg->state = ERROR_STATE; 319 tasklet_schedule(&drv_data->pump_transfers); 320 } 321 322 static void int_transfer_complete(struct driver_data *drv_data) 323 { 324 void __iomem *reg = drv_data->ioaddr; 325 326 /* Stop SSP */ 327 write_SSSR_CS(drv_data, drv_data->clear_sr); 328 reset_sccr1(drv_data); 329 if (!pxa25x_ssp_comp(drv_data)) 330 write_SSTO(0, reg); 331 332 /* Update total byte transferred return count actual bytes read */ 333 drv_data->cur_msg->actual_length += drv_data->len - 334 (drv_data->rx_end - drv_data->rx); 335 336 /* Transfer delays and chip select release are 337 * handled in pump_transfers or giveback 338 */ 339 340 /* Move to next transfer */ 341 drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); 342 343 /* Schedule transfer tasklet */ 344 tasklet_schedule(&drv_data->pump_transfers); 345 } 346 347 static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 348 { 349 void __iomem *reg = drv_data->ioaddr; 350 351 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? 352 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; 353 354 u32 irq_status = read_SSSR(reg) & irq_mask; 355 356 if (irq_status & SSSR_ROR) { 357 int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); 358 return IRQ_HANDLED; 359 } 360 361 if (irq_status & SSSR_TINT) { 362 write_SSSR(SSSR_TINT, reg); 363 if (drv_data->read(drv_data)) { 364 int_transfer_complete(drv_data); 365 return IRQ_HANDLED; 366 } 367 } 368 369 /* Drain rx fifo, Fill tx fifo and prevent overruns */ 370 do { 371 if (drv_data->read(drv_data)) { 372 int_transfer_complete(drv_data); 373 return IRQ_HANDLED; 374 } 375 } while (drv_data->write(drv_data)); 376 377 if (drv_data->read(drv_data)) { 378 int_transfer_complete(drv_data); 379 return IRQ_HANDLED; 380 } 381 382 if (drv_data->tx == drv_data->tx_end) { 383 u32 bytes_left; 384 u32 sccr1_reg; 385 386 sccr1_reg = read_SSCR1(reg); 387 sccr1_reg &= ~SSCR1_TIE; 388 389 /* 390 * PXA25x_SSP has no timeout, set up rx threshould for the 391 * remaining RX bytes. 392 */ 393 if (pxa25x_ssp_comp(drv_data)) { 394 395 sccr1_reg &= ~SSCR1_RFT; 396 397 bytes_left = drv_data->rx_end - drv_data->rx; 398 switch (drv_data->n_bytes) { 399 case 4: 400 bytes_left >>= 1; 401 case 2: 402 bytes_left >>= 1; 403 } 404 405 if (bytes_left > RX_THRESH_DFLT) 406 bytes_left = RX_THRESH_DFLT; 407 408 sccr1_reg |= SSCR1_RxTresh(bytes_left); 409 } 410 write_SSCR1(sccr1_reg, reg); 411 } 412 413 /* We did something */ 414 return IRQ_HANDLED; 415 } 416 417 static irqreturn_t ssp_int(int irq, void *dev_id) 418 { 419 struct driver_data *drv_data = dev_id; 420 void __iomem *reg = drv_data->ioaddr; 421 u32 sccr1_reg; 422 u32 mask = drv_data->mask_sr; 423 u32 status; 424 425 /* 426 * The IRQ might be shared with other peripherals so we must first 427 * check that are we RPM suspended or not. If we are we assume that 428 * the IRQ was not for us (we shouldn't be RPM suspended when the 429 * interrupt is enabled). 430 */ 431 if (pm_runtime_suspended(&drv_data->pdev->dev)) 432 return IRQ_NONE; 433 434 sccr1_reg = read_SSCR1(reg); 435 status = read_SSSR(reg); 436 437 /* Ignore possible writes if we don't need to write */ 438 if (!(sccr1_reg & SSCR1_TIE)) 439 mask &= ~SSSR_TFS; 440 441 if (!(status & mask)) 442 return IRQ_NONE; 443 444 if (!drv_data->cur_msg) { 445 446 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 447 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 448 if (!pxa25x_ssp_comp(drv_data)) 449 write_SSTO(0, reg); 450 write_SSSR_CS(drv_data, drv_data->clear_sr); 451 452 dev_err(&drv_data->pdev->dev, "bad message state " 453 "in interrupt handler\n"); 454 455 /* Never fail */ 456 return IRQ_HANDLED; 457 } 458 459 return drv_data->transfer_handler(drv_data); 460 } 461 462 static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 463 { 464 unsigned long ssp_clk = drv_data->max_clk_rate; 465 const struct ssp_device *ssp = drv_data->ssp; 466 467 rate = min_t(int, ssp_clk, rate); 468 469 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) 470 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; 471 else 472 return ((ssp_clk / rate - 1) & 0xfff) << 8; 473 } 474 475 static void pump_transfers(unsigned long data) 476 { 477 struct driver_data *drv_data = (struct driver_data *)data; 478 struct spi_message *message = NULL; 479 struct spi_transfer *transfer = NULL; 480 struct spi_transfer *previous = NULL; 481 struct chip_data *chip = NULL; 482 void __iomem *reg = drv_data->ioaddr; 483 u32 clk_div = 0; 484 u8 bits = 0; 485 u32 speed = 0; 486 u32 cr0; 487 u32 cr1; 488 u32 dma_thresh = drv_data->cur_chip->dma_threshold; 489 u32 dma_burst = drv_data->cur_chip->dma_burst_size; 490 491 /* Get current state information */ 492 message = drv_data->cur_msg; 493 transfer = drv_data->cur_transfer; 494 chip = drv_data->cur_chip; 495 496 /* Handle for abort */ 497 if (message->state == ERROR_STATE) { 498 message->status = -EIO; 499 giveback(drv_data); 500 return; 501 } 502 503 /* Handle end of message */ 504 if (message->state == DONE_STATE) { 505 message->status = 0; 506 giveback(drv_data); 507 return; 508 } 509 510 /* Delay if requested at end of transfer before CS change */ 511 if (message->state == RUNNING_STATE) { 512 previous = list_entry(transfer->transfer_list.prev, 513 struct spi_transfer, 514 transfer_list); 515 if (previous->delay_usecs) 516 udelay(previous->delay_usecs); 517 518 /* Drop chip select only if cs_change is requested */ 519 if (previous->cs_change) 520 cs_deassert(drv_data); 521 } 522 523 /* Check if we can DMA this transfer */ 524 if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { 525 526 /* reject already-mapped transfers; PIO won't always work */ 527 if (message->is_dma_mapped 528 || transfer->rx_dma || transfer->tx_dma) { 529 dev_err(&drv_data->pdev->dev, 530 "pump_transfers: mapped transfer length " 531 "of %u is greater than %d\n", 532 transfer->len, MAX_DMA_LEN); 533 message->status = -EINVAL; 534 giveback(drv_data); 535 return; 536 } 537 538 /* warn ... we force this to PIO mode */ 539 if (printk_ratelimit()) 540 dev_warn(&message->spi->dev, "pump_transfers: " 541 "DMA disabled for transfer length %ld " 542 "greater than %d\n", 543 (long)drv_data->len, MAX_DMA_LEN); 544 } 545 546 /* Setup the transfer state based on the type of transfer */ 547 if (pxa2xx_spi_flush(drv_data) == 0) { 548 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 549 message->status = -EIO; 550 giveback(drv_data); 551 return; 552 } 553 drv_data->n_bytes = chip->n_bytes; 554 drv_data->tx = (void *)transfer->tx_buf; 555 drv_data->tx_end = drv_data->tx + transfer->len; 556 drv_data->rx = transfer->rx_buf; 557 drv_data->rx_end = drv_data->rx + transfer->len; 558 drv_data->rx_dma = transfer->rx_dma; 559 drv_data->tx_dma = transfer->tx_dma; 560 drv_data->len = transfer->len; 561 drv_data->write = drv_data->tx ? chip->write : null_writer; 562 drv_data->read = drv_data->rx ? chip->read : null_reader; 563 564 /* Change speed and bit per word on a per transfer */ 565 cr0 = chip->cr0; 566 if (transfer->speed_hz || transfer->bits_per_word) { 567 568 bits = chip->bits_per_word; 569 speed = chip->speed_hz; 570 571 if (transfer->speed_hz) 572 speed = transfer->speed_hz; 573 574 if (transfer->bits_per_word) 575 bits = transfer->bits_per_word; 576 577 clk_div = ssp_get_clk_div(drv_data, speed); 578 579 if (bits <= 8) { 580 drv_data->n_bytes = 1; 581 drv_data->read = drv_data->read != null_reader ? 582 u8_reader : null_reader; 583 drv_data->write = drv_data->write != null_writer ? 584 u8_writer : null_writer; 585 } else if (bits <= 16) { 586 drv_data->n_bytes = 2; 587 drv_data->read = drv_data->read != null_reader ? 588 u16_reader : null_reader; 589 drv_data->write = drv_data->write != null_writer ? 590 u16_writer : null_writer; 591 } else if (bits <= 32) { 592 drv_data->n_bytes = 4; 593 drv_data->read = drv_data->read != null_reader ? 594 u32_reader : null_reader; 595 drv_data->write = drv_data->write != null_writer ? 596 u32_writer : null_writer; 597 } 598 /* if bits/word is changed in dma mode, then must check the 599 * thresholds and burst also */ 600 if (chip->enable_dma) { 601 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, 602 message->spi, 603 bits, &dma_burst, 604 &dma_thresh)) 605 if (printk_ratelimit()) 606 dev_warn(&message->spi->dev, 607 "pump_transfers: " 608 "DMA burst size reduced to " 609 "match bits_per_word\n"); 610 } 611 612 cr0 = clk_div 613 | SSCR0_Motorola 614 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) 615 | SSCR0_SSE 616 | (bits > 16 ? SSCR0_EDSS : 0); 617 } 618 619 message->state = RUNNING_STATE; 620 621 drv_data->dma_mapped = 0; 622 if (pxa2xx_spi_dma_is_possible(drv_data->len)) 623 drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); 624 if (drv_data->dma_mapped) { 625 626 /* Ensure we have the correct interrupt handler */ 627 drv_data->transfer_handler = pxa2xx_spi_dma_transfer; 628 629 pxa2xx_spi_dma_prepare(drv_data, dma_burst); 630 631 /* Clear status and start DMA engine */ 632 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 633 write_SSSR(drv_data->clear_sr, reg); 634 635 pxa2xx_spi_dma_start(drv_data); 636 } else { 637 /* Ensure we have the correct interrupt handler */ 638 drv_data->transfer_handler = interrupt_transfer; 639 640 /* Clear status */ 641 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 642 write_SSSR_CS(drv_data, drv_data->clear_sr); 643 } 644 645 /* see if we need to reload the config registers */ 646 if ((read_SSCR0(reg) != cr0) 647 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != 648 (cr1 & SSCR1_CHANGE_MASK)) { 649 650 /* stop the SSP, and update the other bits */ 651 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 652 if (!pxa25x_ssp_comp(drv_data)) 653 write_SSTO(chip->timeout, reg); 654 /* first set CR1 without interrupt and service enables */ 655 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); 656 /* restart the SSP */ 657 write_SSCR0(cr0, reg); 658 659 } else { 660 if (!pxa25x_ssp_comp(drv_data)) 661 write_SSTO(chip->timeout, reg); 662 } 663 664 cs_assert(drv_data); 665 666 /* after chip select, release the data by enabling service 667 * requests and interrupts, without changing any mode bits */ 668 write_SSCR1(cr1, reg); 669 } 670 671 static int pxa2xx_spi_transfer_one_message(struct spi_master *master, 672 struct spi_message *msg) 673 { 674 struct driver_data *drv_data = spi_master_get_devdata(master); 675 676 drv_data->cur_msg = msg; 677 /* Initial message state*/ 678 drv_data->cur_msg->state = START_STATE; 679 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 680 struct spi_transfer, 681 transfer_list); 682 683 /* prepare to setup the SSP, in pump_transfers, using the per 684 * chip configuration */ 685 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 686 687 /* Mark as busy and launch transfers */ 688 tasklet_schedule(&drv_data->pump_transfers); 689 return 0; 690 } 691 692 static int pxa2xx_spi_prepare_transfer(struct spi_master *master) 693 { 694 struct driver_data *drv_data = spi_master_get_devdata(master); 695 696 pm_runtime_get_sync(&drv_data->pdev->dev); 697 return 0; 698 } 699 700 static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) 701 { 702 struct driver_data *drv_data = spi_master_get_devdata(master); 703 704 /* Disable the SSP now */ 705 write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, 706 drv_data->ioaddr); 707 708 pm_runtime_mark_last_busy(&drv_data->pdev->dev); 709 pm_runtime_put_autosuspend(&drv_data->pdev->dev); 710 return 0; 711 } 712 713 static int setup_cs(struct spi_device *spi, struct chip_data *chip, 714 struct pxa2xx_spi_chip *chip_info) 715 { 716 int err = 0; 717 718 if (chip == NULL || chip_info == NULL) 719 return 0; 720 721 /* NOTE: setup() can be called multiple times, possibly with 722 * different chip_info, release previously requested GPIO 723 */ 724 if (gpio_is_valid(chip->gpio_cs)) 725 gpio_free(chip->gpio_cs); 726 727 /* If (*cs_control) is provided, ignore GPIO chip select */ 728 if (chip_info->cs_control) { 729 chip->cs_control = chip_info->cs_control; 730 return 0; 731 } 732 733 if (gpio_is_valid(chip_info->gpio_cs)) { 734 err = gpio_request(chip_info->gpio_cs, "SPI_CS"); 735 if (err) { 736 dev_err(&spi->dev, "failed to request chip select " 737 "GPIO%d\n", chip_info->gpio_cs); 738 return err; 739 } 740 741 chip->gpio_cs = chip_info->gpio_cs; 742 chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; 743 744 err = gpio_direction_output(chip->gpio_cs, 745 !chip->gpio_cs_inverted); 746 } 747 748 return err; 749 } 750 751 static int setup(struct spi_device *spi) 752 { 753 struct pxa2xx_spi_chip *chip_info = NULL; 754 struct chip_data *chip; 755 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 756 unsigned int clk_div; 757 uint tx_thres = TX_THRESH_DFLT; 758 uint rx_thres = RX_THRESH_DFLT; 759 760 if (!pxa25x_ssp_comp(drv_data) 761 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { 762 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 763 "b/w not 4-32 for type non-PXA25x_SSP\n", 764 drv_data->ssp_type, spi->bits_per_word); 765 return -EINVAL; 766 } else if (pxa25x_ssp_comp(drv_data) 767 && (spi->bits_per_word < 4 768 || spi->bits_per_word > 16)) { 769 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 770 "b/w not 4-16 for type PXA25x_SSP\n", 771 drv_data->ssp_type, spi->bits_per_word); 772 return -EINVAL; 773 } 774 775 /* Only alloc on first setup */ 776 chip = spi_get_ctldata(spi); 777 if (!chip) { 778 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 779 if (!chip) { 780 dev_err(&spi->dev, 781 "failed setup: can't allocate chip data\n"); 782 return -ENOMEM; 783 } 784 785 if (drv_data->ssp_type == CE4100_SSP) { 786 if (spi->chip_select > 4) { 787 dev_err(&spi->dev, "failed setup: " 788 "cs number must not be > 4.\n"); 789 kfree(chip); 790 return -EINVAL; 791 } 792 793 chip->frm = spi->chip_select; 794 } else 795 chip->gpio_cs = -1; 796 chip->enable_dma = 0; 797 chip->timeout = TIMOUT_DFLT; 798 } 799 800 /* protocol drivers may change the chip settings, so... 801 * if chip_info exists, use it */ 802 chip_info = spi->controller_data; 803 804 /* chip_info isn't always needed */ 805 chip->cr1 = 0; 806 if (chip_info) { 807 if (chip_info->timeout) 808 chip->timeout = chip_info->timeout; 809 if (chip_info->tx_threshold) 810 tx_thres = chip_info->tx_threshold; 811 if (chip_info->rx_threshold) 812 rx_thres = chip_info->rx_threshold; 813 chip->enable_dma = drv_data->master_info->enable_dma; 814 chip->dma_threshold = 0; 815 if (chip_info->enable_loopback) 816 chip->cr1 = SSCR1_LBM; 817 } 818 819 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | 820 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); 821 822 /* set dma burst and threshold outside of chip_info path so that if 823 * chip_info goes away after setting chip->enable_dma, the 824 * burst and threshold can still respond to changes in bits_per_word */ 825 if (chip->enable_dma) { 826 /* set up legal burst and threshold for dma */ 827 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, 828 spi->bits_per_word, 829 &chip->dma_burst_size, 830 &chip->dma_threshold)) { 831 dev_warn(&spi->dev, "in setup: DMA burst size reduced " 832 "to match bits_per_word\n"); 833 } 834 } 835 836 clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz); 837 chip->speed_hz = spi->max_speed_hz; 838 839 chip->cr0 = clk_div 840 | SSCR0_Motorola 841 | SSCR0_DataSize(spi->bits_per_word > 16 ? 842 spi->bits_per_word - 16 : spi->bits_per_word) 843 | SSCR0_SSE 844 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); 845 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); 846 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) 847 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 848 849 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 850 if (!pxa25x_ssp_comp(drv_data)) 851 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 852 drv_data->max_clk_rate 853 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), 854 chip->enable_dma ? "DMA" : "PIO"); 855 else 856 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 857 drv_data->max_clk_rate / 2 858 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), 859 chip->enable_dma ? "DMA" : "PIO"); 860 861 if (spi->bits_per_word <= 8) { 862 chip->n_bytes = 1; 863 chip->read = u8_reader; 864 chip->write = u8_writer; 865 } else if (spi->bits_per_word <= 16) { 866 chip->n_bytes = 2; 867 chip->read = u16_reader; 868 chip->write = u16_writer; 869 } else if (spi->bits_per_word <= 32) { 870 chip->cr0 |= SSCR0_EDSS; 871 chip->n_bytes = 4; 872 chip->read = u32_reader; 873 chip->write = u32_writer; 874 } else { 875 dev_err(&spi->dev, "invalid wordsize\n"); 876 return -ENODEV; 877 } 878 chip->bits_per_word = spi->bits_per_word; 879 880 spi_set_ctldata(spi, chip); 881 882 if (drv_data->ssp_type == CE4100_SSP) 883 return 0; 884 885 return setup_cs(spi, chip, chip_info); 886 } 887 888 static void cleanup(struct spi_device *spi) 889 { 890 struct chip_data *chip = spi_get_ctldata(spi); 891 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 892 893 if (!chip) 894 return; 895 896 if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) 897 gpio_free(chip->gpio_cs); 898 899 kfree(chip); 900 } 901 902 static int pxa2xx_spi_probe(struct platform_device *pdev) 903 { 904 struct device *dev = &pdev->dev; 905 struct pxa2xx_spi_master *platform_info; 906 struct spi_master *master; 907 struct driver_data *drv_data; 908 struct ssp_device *ssp; 909 int status; 910 911 platform_info = dev_get_platdata(dev); 912 if (!platform_info) { 913 dev_err(&pdev->dev, "missing platform data\n"); 914 return -ENODEV; 915 } 916 917 ssp = pxa_ssp_request(pdev->id, pdev->name); 918 if (!ssp) 919 ssp = &platform_info->ssp; 920 921 if (!ssp->mmio_base) { 922 dev_err(&pdev->dev, "failed to get ssp\n"); 923 return -ENODEV; 924 } 925 926 /* Allocate master with space for drv_data and null dma buffer */ 927 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 928 if (!master) { 929 dev_err(&pdev->dev, "cannot alloc spi_master\n"); 930 pxa_ssp_free(ssp); 931 return -ENOMEM; 932 } 933 drv_data = spi_master_get_devdata(master); 934 drv_data->master = master; 935 drv_data->master_info = platform_info; 936 drv_data->pdev = pdev; 937 drv_data->ssp = ssp; 938 939 master->dev.parent = &pdev->dev; 940 master->dev.of_node = pdev->dev.of_node; 941 /* the spi->mode bits understood by this driver: */ 942 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 943 944 master->bus_num = ssp->port_id; 945 master->num_chipselect = platform_info->num_chipselect; 946 master->dma_alignment = DMA_ALIGNMENT; 947 master->cleanup = cleanup; 948 master->setup = setup; 949 master->transfer_one_message = pxa2xx_spi_transfer_one_message; 950 master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer; 951 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; 952 953 drv_data->ssp_type = ssp->type; 954 drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); 955 956 drv_data->ioaddr = ssp->mmio_base; 957 drv_data->ssdr_physical = ssp->phys_base + SSDR; 958 if (pxa25x_ssp_comp(drv_data)) { 959 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 960 drv_data->dma_cr1 = 0; 961 drv_data->clear_sr = SSSR_ROR; 962 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; 963 } else { 964 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; 965 drv_data->dma_cr1 = DEFAULT_DMA_CR1; 966 drv_data->clear_sr = SSSR_ROR | SSSR_TINT; 967 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 968 } 969 970 status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), 971 drv_data); 972 if (status < 0) { 973 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); 974 goto out_error_master_alloc; 975 } 976 977 /* Setup DMA if requested */ 978 drv_data->tx_channel = -1; 979 drv_data->rx_channel = -1; 980 if (platform_info->enable_dma) { 981 status = pxa2xx_spi_dma_setup(drv_data); 982 if (status) { 983 dev_warn(dev, "failed to setup DMA, using PIO\n"); 984 platform_info->enable_dma = false; 985 } 986 } 987 988 /* Enable SOC clock */ 989 clk_prepare_enable(ssp->clk); 990 991 drv_data->max_clk_rate = clk_get_rate(ssp->clk); 992 993 /* Load default SSP configuration */ 994 write_SSCR0(0, drv_data->ioaddr); 995 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 996 SSCR1_TxTresh(TX_THRESH_DFLT), 997 drv_data->ioaddr); 998 write_SSCR0(SSCR0_SCR(2) 999 | SSCR0_Motorola 1000 | SSCR0_DataSize(8), 1001 drv_data->ioaddr); 1002 if (!pxa25x_ssp_comp(drv_data)) 1003 write_SSTO(0, drv_data->ioaddr); 1004 write_SSPSP(0, drv_data->ioaddr); 1005 1006 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1007 (unsigned long)drv_data); 1008 1009 /* Register with the SPI framework */ 1010 platform_set_drvdata(pdev, drv_data); 1011 status = spi_register_master(master); 1012 if (status != 0) { 1013 dev_err(&pdev->dev, "problem registering spi master\n"); 1014 goto out_error_clock_enabled; 1015 } 1016 1017 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1018 pm_runtime_use_autosuspend(&pdev->dev); 1019 pm_runtime_set_active(&pdev->dev); 1020 pm_runtime_enable(&pdev->dev); 1021 1022 return status; 1023 1024 out_error_clock_enabled: 1025 clk_disable_unprepare(ssp->clk); 1026 pxa2xx_spi_dma_release(drv_data); 1027 free_irq(ssp->irq, drv_data); 1028 1029 out_error_master_alloc: 1030 spi_master_put(master); 1031 pxa_ssp_free(ssp); 1032 return status; 1033 } 1034 1035 static int pxa2xx_spi_remove(struct platform_device *pdev) 1036 { 1037 struct driver_data *drv_data = platform_get_drvdata(pdev); 1038 struct ssp_device *ssp; 1039 1040 if (!drv_data) 1041 return 0; 1042 ssp = drv_data->ssp; 1043 1044 pm_runtime_get_sync(&pdev->dev); 1045 1046 /* Disable the SSP at the peripheral and SOC level */ 1047 write_SSCR0(0, drv_data->ioaddr); 1048 clk_disable_unprepare(ssp->clk); 1049 1050 /* Release DMA */ 1051 if (drv_data->master_info->enable_dma) 1052 pxa2xx_spi_dma_release(drv_data); 1053 1054 pm_runtime_put_noidle(&pdev->dev); 1055 pm_runtime_disable(&pdev->dev); 1056 1057 /* Release IRQ */ 1058 free_irq(ssp->irq, drv_data); 1059 1060 /* Release SSP */ 1061 pxa_ssp_free(ssp); 1062 1063 /* Disconnect from the SPI framework */ 1064 spi_unregister_master(drv_data->master); 1065 1066 /* Prevent double remove */ 1067 platform_set_drvdata(pdev, NULL); 1068 1069 return 0; 1070 } 1071 1072 static void pxa2xx_spi_shutdown(struct platform_device *pdev) 1073 { 1074 int status = 0; 1075 1076 if ((status = pxa2xx_spi_remove(pdev)) != 0) 1077 dev_err(&pdev->dev, "shutdown failed with %d\n", status); 1078 } 1079 1080 #ifdef CONFIG_PM 1081 static int pxa2xx_spi_suspend(struct device *dev) 1082 { 1083 struct driver_data *drv_data = dev_get_drvdata(dev); 1084 struct ssp_device *ssp = drv_data->ssp; 1085 int status = 0; 1086 1087 status = spi_master_suspend(drv_data->master); 1088 if (status != 0) 1089 return status; 1090 write_SSCR0(0, drv_data->ioaddr); 1091 clk_disable_unprepare(ssp->clk); 1092 1093 return 0; 1094 } 1095 1096 static int pxa2xx_spi_resume(struct device *dev) 1097 { 1098 struct driver_data *drv_data = dev_get_drvdata(dev); 1099 struct ssp_device *ssp = drv_data->ssp; 1100 int status = 0; 1101 1102 pxa2xx_spi_dma_resume(drv_data); 1103 1104 /* Enable the SSP clock */ 1105 clk_prepare_enable(ssp->clk); 1106 1107 /* Start the queue running */ 1108 status = spi_master_resume(drv_data->master); 1109 if (status != 0) { 1110 dev_err(dev, "problem starting queue (%d)\n", status); 1111 return status; 1112 } 1113 1114 return 0; 1115 } 1116 #endif 1117 1118 #ifdef CONFIG_PM_RUNTIME 1119 static int pxa2xx_spi_runtime_suspend(struct device *dev) 1120 { 1121 struct driver_data *drv_data = dev_get_drvdata(dev); 1122 1123 clk_disable_unprepare(drv_data->ssp->clk); 1124 return 0; 1125 } 1126 1127 static int pxa2xx_spi_runtime_resume(struct device *dev) 1128 { 1129 struct driver_data *drv_data = dev_get_drvdata(dev); 1130 1131 clk_prepare_enable(drv_data->ssp->clk); 1132 return 0; 1133 } 1134 #endif 1135 1136 static const struct dev_pm_ops pxa2xx_spi_pm_ops = { 1137 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume) 1138 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, 1139 pxa2xx_spi_runtime_resume, NULL) 1140 }; 1141 1142 static struct platform_driver driver = { 1143 .driver = { 1144 .name = "pxa2xx-spi", 1145 .owner = THIS_MODULE, 1146 .pm = &pxa2xx_spi_pm_ops, 1147 }, 1148 .probe = pxa2xx_spi_probe, 1149 .remove = pxa2xx_spi_remove, 1150 .shutdown = pxa2xx_spi_shutdown, 1151 }; 1152 1153 static int __init pxa2xx_spi_init(void) 1154 { 1155 return platform_driver_register(&driver); 1156 } 1157 subsys_initcall(pxa2xx_spi_init); 1158 1159 static void __exit pxa2xx_spi_exit(void) 1160 { 1161 platform_driver_unregister(&driver); 1162 } 1163 module_exit(pxa2xx_spi_exit); 1164