1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * au1550 psc spi controller driver 4 * may work also with au1200, au1210, au1250 5 * will not work on au1000, au1100 and au1500 (no full spi controller there) 6 * 7 * Copyright (c) 2006 ATRON electronic GmbH 8 * Author: Jan Nikitenko <jan.nikitenko@gmail.com> 9 */ 10 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/slab.h> 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/device.h> 17 #include <linux/platform_device.h> 18 #include <linux/resource.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi_bitbang.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/completion.h> 23 #include <asm/mach-au1x00/au1000.h> 24 #include <asm/mach-au1x00/au1xxx_psc.h> 25 #include <asm/mach-au1x00/au1xxx_dbdma.h> 26 27 #include <asm/mach-au1x00/au1550_spi.h> 28 29 static unsigned usedma = 1; 30 module_param(usedma, uint, 0644); 31 32 /* 33 #define AU1550_SPI_DEBUG_LOOPBACK 34 */ 35 36 37 #define AU1550_SPI_DBDMA_DESCRIPTORS 1 38 #define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U 39 40 struct au1550_spi { 41 struct spi_bitbang bitbang; 42 43 volatile psc_spi_t __iomem *regs; 44 int irq; 45 46 unsigned len; 47 unsigned tx_count; 48 unsigned rx_count; 49 const u8 *tx; 50 u8 *rx; 51 52 void (*rx_word)(struct au1550_spi *hw); 53 void (*tx_word)(struct au1550_spi *hw); 54 int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); 55 irqreturn_t (*irq_callback)(struct au1550_spi *hw); 56 57 struct completion master_done; 58 59 unsigned usedma; 60 u32 dma_tx_id; 61 u32 dma_rx_id; 62 u32 dma_tx_ch; 63 u32 dma_rx_ch; 64 65 u8 *dma_rx_tmpbuf; 66 unsigned dma_rx_tmpbuf_size; 67 u32 dma_rx_tmpbuf_addr; 68 69 struct spi_master *master; 70 struct device *dev; 71 struct au1550_spi_info *pdata; 72 struct resource *ioarea; 73 }; 74 75 76 /* we use an 8-bit memory device for dma transfers to/from spi fifo */ 77 static dbdev_tab_t au1550_spi_mem_dbdev = 78 { 79 .dev_id = DBDMA_MEM_CHAN, 80 .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, 81 .dev_tsize = 0, 82 .dev_devwidth = 8, 83 .dev_physaddr = 0x00000000, 84 .dev_intlevel = 0, 85 .dev_intpolarity = 0 86 }; 87 88 static int ddma_memid; /* id to above mem dma device */ 89 90 static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); 91 92 93 /* 94 * compute BRG and DIV bits to setup spi clock based on main input clock rate 95 * that was specified in platform data structure 96 * according to au1550 datasheet: 97 * psc_tempclk = psc_mainclk / (2 << DIV) 98 * spiclk = psc_tempclk / (2 * (BRG + 1)) 99 * BRG valid range is 4..63 100 * DIV valid range is 0..3 101 */ 102 static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) 103 { 104 u32 mainclk_hz = hw->pdata->mainclk_hz; 105 u32 div, brg; 106 107 for (div = 0; div < 4; div++) { 108 brg = mainclk_hz / speed_hz / (4 << div); 109 /* now we have BRG+1 in brg, so count with that */ 110 if (brg < (4 + 1)) { 111 brg = (4 + 1); /* speed_hz too big */ 112 break; /* set lowest brg (div is == 0) */ 113 } 114 if (brg <= (63 + 1)) 115 break; /* we have valid brg and div */ 116 } 117 if (div == 4) { 118 div = 3; /* speed_hz too small */ 119 brg = (63 + 1); /* set highest brg and div */ 120 } 121 brg--; 122 return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); 123 } 124 125 static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) 126 { 127 hw->regs->psc_spimsk = 128 PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO 129 | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO 130 | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; 131 wmb(); /* drain writebuffer */ 132 133 hw->regs->psc_spievent = 134 PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO 135 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO 136 | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; 137 wmb(); /* drain writebuffer */ 138 } 139 140 static void au1550_spi_reset_fifos(struct au1550_spi *hw) 141 { 142 u32 pcr; 143 144 hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; 145 wmb(); /* drain writebuffer */ 146 do { 147 pcr = hw->regs->psc_spipcr; 148 wmb(); /* drain writebuffer */ 149 } while (pcr != 0); 150 } 151 152 /* 153 * dma transfers are used for the most common spi word size of 8-bits 154 * we cannot easily change already set up dma channels' width, so if we wanted 155 * dma support for more than 8-bit words (up to 24 bits), we would need to 156 * setup dma channels from scratch on each spi transfer, based on bits_per_word 157 * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits 158 * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode 159 * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() 160 */ 161 static void au1550_spi_chipsel(struct spi_device *spi, int value) 162 { 163 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 164 unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; 165 u32 cfg, stat; 166 167 switch (value) { 168 case BITBANG_CS_INACTIVE: 169 if (hw->pdata->deactivate_cs) 170 hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, 171 cspol); 172 break; 173 174 case BITBANG_CS_ACTIVE: 175 au1550_spi_bits_handlers_set(hw, spi->bits_per_word); 176 177 cfg = hw->regs->psc_spicfg; 178 wmb(); /* drain writebuffer */ 179 hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; 180 wmb(); /* drain writebuffer */ 181 182 if (spi->mode & SPI_CPOL) 183 cfg |= PSC_SPICFG_BI; 184 else 185 cfg &= ~PSC_SPICFG_BI; 186 if (spi->mode & SPI_CPHA) 187 cfg &= ~PSC_SPICFG_CDE; 188 else 189 cfg |= PSC_SPICFG_CDE; 190 191 if (spi->mode & SPI_LSB_FIRST) 192 cfg |= PSC_SPICFG_MLF; 193 else 194 cfg &= ~PSC_SPICFG_MLF; 195 196 if (hw->usedma && spi->bits_per_word <= 8) 197 cfg &= ~PSC_SPICFG_DD_DISABLE; 198 else 199 cfg |= PSC_SPICFG_DD_DISABLE; 200 cfg = PSC_SPICFG_CLR_LEN(cfg); 201 cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); 202 203 cfg = PSC_SPICFG_CLR_BAUD(cfg); 204 cfg &= ~PSC_SPICFG_SET_DIV(3); 205 cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); 206 207 hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; 208 wmb(); /* drain writebuffer */ 209 do { 210 stat = hw->regs->psc_spistat; 211 wmb(); /* drain writebuffer */ 212 } while ((stat & PSC_SPISTAT_DR) == 0); 213 214 if (hw->pdata->activate_cs) 215 hw->pdata->activate_cs(hw->pdata, spi->chip_select, 216 cspol); 217 break; 218 } 219 } 220 221 static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) 222 { 223 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 224 unsigned bpw, hz; 225 u32 cfg, stat; 226 227 if (t) { 228 bpw = t->bits_per_word; 229 hz = t->speed_hz; 230 } else { 231 bpw = spi->bits_per_word; 232 hz = spi->max_speed_hz; 233 } 234 235 if (!hz) 236 return -EINVAL; 237 238 au1550_spi_bits_handlers_set(hw, spi->bits_per_word); 239 240 cfg = hw->regs->psc_spicfg; 241 wmb(); /* drain writebuffer */ 242 hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; 243 wmb(); /* drain writebuffer */ 244 245 if (hw->usedma && bpw <= 8) 246 cfg &= ~PSC_SPICFG_DD_DISABLE; 247 else 248 cfg |= PSC_SPICFG_DD_DISABLE; 249 cfg = PSC_SPICFG_CLR_LEN(cfg); 250 cfg |= PSC_SPICFG_SET_LEN(bpw); 251 252 cfg = PSC_SPICFG_CLR_BAUD(cfg); 253 cfg &= ~PSC_SPICFG_SET_DIV(3); 254 cfg |= au1550_spi_baudcfg(hw, hz); 255 256 hw->regs->psc_spicfg = cfg; 257 wmb(); /* drain writebuffer */ 258 259 if (cfg & PSC_SPICFG_DE_ENABLE) { 260 do { 261 stat = hw->regs->psc_spistat; 262 wmb(); /* drain writebuffer */ 263 } while ((stat & PSC_SPISTAT_DR) == 0); 264 } 265 266 au1550_spi_reset_fifos(hw); 267 au1550_spi_mask_ack_all(hw); 268 return 0; 269 } 270 271 /* 272 * for dma spi transfers, we have to setup rx channel, otherwise there is 273 * no reliable way how to recognize that spi transfer is done 274 * dma complete callbacks are called before real spi transfer is finished 275 * and if only tx dma channel is set up (and rx fifo overflow event masked) 276 * spi master done event irq is not generated unless rx fifo is empty (emptied) 277 * so we need rx tmp buffer to use for rx dma if user does not provide one 278 */ 279 static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) 280 { 281 hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); 282 if (!hw->dma_rx_tmpbuf) 283 return -ENOMEM; 284 hw->dma_rx_tmpbuf_size = size; 285 hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, 286 size, DMA_FROM_DEVICE); 287 if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { 288 kfree(hw->dma_rx_tmpbuf); 289 hw->dma_rx_tmpbuf = 0; 290 hw->dma_rx_tmpbuf_size = 0; 291 return -EFAULT; 292 } 293 return 0; 294 } 295 296 static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) 297 { 298 dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, 299 hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); 300 kfree(hw->dma_rx_tmpbuf); 301 hw->dma_rx_tmpbuf = 0; 302 hw->dma_rx_tmpbuf_size = 0; 303 } 304 305 static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) 306 { 307 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 308 dma_addr_t dma_tx_addr; 309 dma_addr_t dma_rx_addr; 310 u32 res; 311 312 hw->len = t->len; 313 hw->tx_count = 0; 314 hw->rx_count = 0; 315 316 hw->tx = t->tx_buf; 317 hw->rx = t->rx_buf; 318 dma_tx_addr = t->tx_dma; 319 dma_rx_addr = t->rx_dma; 320 321 /* 322 * check if buffers are already dma mapped, map them otherwise: 323 * - first map the TX buffer, so cache data gets written to memory 324 * - then map the RX buffer, so that cache entries (with 325 * soon-to-be-stale data) get removed 326 * use rx buffer in place of tx if tx buffer was not provided 327 * use temp rx buffer (preallocated or realloc to fit) for rx dma 328 */ 329 if (t->tx_buf) { 330 if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ 331 dma_tx_addr = dma_map_single(hw->dev, 332 (void *)t->tx_buf, 333 t->len, DMA_TO_DEVICE); 334 if (dma_mapping_error(hw->dev, dma_tx_addr)) 335 dev_err(hw->dev, "tx dma map error\n"); 336 } 337 } 338 339 if (t->rx_buf) { 340 if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ 341 dma_rx_addr = dma_map_single(hw->dev, 342 (void *)t->rx_buf, 343 t->len, DMA_FROM_DEVICE); 344 if (dma_mapping_error(hw->dev, dma_rx_addr)) 345 dev_err(hw->dev, "rx dma map error\n"); 346 } 347 } else { 348 if (t->len > hw->dma_rx_tmpbuf_size) { 349 int ret; 350 351 au1550_spi_dma_rxtmp_free(hw); 352 ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, 353 AU1550_SPI_DMA_RXTMP_MINSIZE)); 354 if (ret < 0) 355 return ret; 356 } 357 hw->rx = hw->dma_rx_tmpbuf; 358 dma_rx_addr = hw->dma_rx_tmpbuf_addr; 359 dma_sync_single_for_device(hw->dev, dma_rx_addr, 360 t->len, DMA_FROM_DEVICE); 361 } 362 363 if (!t->tx_buf) { 364 dma_sync_single_for_device(hw->dev, dma_rx_addr, 365 t->len, DMA_BIDIRECTIONAL); 366 hw->tx = hw->rx; 367 } 368 369 /* put buffers on the ring */ 370 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), 371 t->len, DDMA_FLAGS_IE); 372 if (!res) 373 dev_err(hw->dev, "rx dma put dest error\n"); 374 375 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), 376 t->len, DDMA_FLAGS_IE); 377 if (!res) 378 dev_err(hw->dev, "tx dma put source error\n"); 379 380 au1xxx_dbdma_start(hw->dma_rx_ch); 381 au1xxx_dbdma_start(hw->dma_tx_ch); 382 383 /* by default enable nearly all events interrupt */ 384 hw->regs->psc_spimsk = PSC_SPIMSK_SD; 385 wmb(); /* drain writebuffer */ 386 387 /* start the transfer */ 388 hw->regs->psc_spipcr = PSC_SPIPCR_MS; 389 wmb(); /* drain writebuffer */ 390 391 wait_for_completion(&hw->master_done); 392 393 au1xxx_dbdma_stop(hw->dma_tx_ch); 394 au1xxx_dbdma_stop(hw->dma_rx_ch); 395 396 if (!t->rx_buf) { 397 /* using the temporal preallocated and premapped buffer */ 398 dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, 399 DMA_FROM_DEVICE); 400 } 401 /* unmap buffers if mapped above */ 402 if (t->rx_buf && t->rx_dma == 0 ) 403 dma_unmap_single(hw->dev, dma_rx_addr, t->len, 404 DMA_FROM_DEVICE); 405 if (t->tx_buf && t->tx_dma == 0 ) 406 dma_unmap_single(hw->dev, dma_tx_addr, t->len, 407 DMA_TO_DEVICE); 408 409 return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; 410 } 411 412 static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) 413 { 414 u32 stat, evnt; 415 416 stat = hw->regs->psc_spistat; 417 evnt = hw->regs->psc_spievent; 418 wmb(); /* drain writebuffer */ 419 if ((stat & PSC_SPISTAT_DI) == 0) { 420 dev_err(hw->dev, "Unexpected IRQ!\n"); 421 return IRQ_NONE; 422 } 423 424 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO 425 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO 426 | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) 427 != 0) { 428 /* 429 * due to an spi error we consider transfer as done, 430 * so mask all events until before next transfer start 431 * and stop the possibly running dma immediately 432 */ 433 au1550_spi_mask_ack_all(hw); 434 au1xxx_dbdma_stop(hw->dma_rx_ch); 435 au1xxx_dbdma_stop(hw->dma_tx_ch); 436 437 /* get number of transferred bytes */ 438 hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); 439 hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); 440 441 au1xxx_dbdma_reset(hw->dma_rx_ch); 442 au1xxx_dbdma_reset(hw->dma_tx_ch); 443 au1550_spi_reset_fifos(hw); 444 445 if (evnt == PSC_SPIEVNT_RO) 446 dev_err(hw->dev, 447 "dma transfer: receive FIFO overflow!\n"); 448 else 449 dev_err(hw->dev, 450 "dma transfer: unexpected SPI error " 451 "(event=0x%x stat=0x%x)!\n", evnt, stat); 452 453 complete(&hw->master_done); 454 return IRQ_HANDLED; 455 } 456 457 if ((evnt & PSC_SPIEVNT_MD) != 0) { 458 /* transfer completed successfully */ 459 au1550_spi_mask_ack_all(hw); 460 hw->rx_count = hw->len; 461 hw->tx_count = hw->len; 462 complete(&hw->master_done); 463 } 464 return IRQ_HANDLED; 465 } 466 467 468 /* routines to handle different word sizes in pio mode */ 469 #define AU1550_SPI_RX_WORD(size, mask) \ 470 static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ 471 { \ 472 u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ 473 wmb(); /* drain writebuffer */ \ 474 if (hw->rx) { \ 475 *(u##size *)hw->rx = (u##size)fifoword; \ 476 hw->rx += (size) / 8; \ 477 } \ 478 hw->rx_count += (size) / 8; \ 479 } 480 481 #define AU1550_SPI_TX_WORD(size, mask) \ 482 static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ 483 { \ 484 u32 fifoword = 0; \ 485 if (hw->tx) { \ 486 fifoword = *(u##size *)hw->tx & (u32)(mask); \ 487 hw->tx += (size) / 8; \ 488 } \ 489 hw->tx_count += (size) / 8; \ 490 if (hw->tx_count >= hw->len) \ 491 fifoword |= PSC_SPITXRX_LC; \ 492 hw->regs->psc_spitxrx = fifoword; \ 493 wmb(); /* drain writebuffer */ \ 494 } 495 496 AU1550_SPI_RX_WORD(8,0xff) 497 AU1550_SPI_RX_WORD(16,0xffff) 498 AU1550_SPI_RX_WORD(32,0xffffff) 499 AU1550_SPI_TX_WORD(8,0xff) 500 AU1550_SPI_TX_WORD(16,0xffff) 501 AU1550_SPI_TX_WORD(32,0xffffff) 502 503 static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) 504 { 505 u32 stat, mask; 506 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 507 508 hw->tx = t->tx_buf; 509 hw->rx = t->rx_buf; 510 hw->len = t->len; 511 hw->tx_count = 0; 512 hw->rx_count = 0; 513 514 /* by default enable nearly all events after filling tx fifo */ 515 mask = PSC_SPIMSK_SD; 516 517 /* fill the transmit FIFO */ 518 while (hw->tx_count < hw->len) { 519 520 hw->tx_word(hw); 521 522 if (hw->tx_count >= hw->len) { 523 /* mask tx fifo request interrupt as we are done */ 524 mask |= PSC_SPIMSK_TR; 525 } 526 527 stat = hw->regs->psc_spistat; 528 wmb(); /* drain writebuffer */ 529 if (stat & PSC_SPISTAT_TF) 530 break; 531 } 532 533 /* enable event interrupts */ 534 hw->regs->psc_spimsk = mask; 535 wmb(); /* drain writebuffer */ 536 537 /* start the transfer */ 538 hw->regs->psc_spipcr = PSC_SPIPCR_MS; 539 wmb(); /* drain writebuffer */ 540 541 wait_for_completion(&hw->master_done); 542 543 return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; 544 } 545 546 static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) 547 { 548 int busy; 549 u32 stat, evnt; 550 551 stat = hw->regs->psc_spistat; 552 evnt = hw->regs->psc_spievent; 553 wmb(); /* drain writebuffer */ 554 if ((stat & PSC_SPISTAT_DI) == 0) { 555 dev_err(hw->dev, "Unexpected IRQ!\n"); 556 return IRQ_NONE; 557 } 558 559 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO 560 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO 561 | PSC_SPIEVNT_SD)) 562 != 0) { 563 /* 564 * due to an error we consider transfer as done, 565 * so mask all events until before next transfer start 566 */ 567 au1550_spi_mask_ack_all(hw); 568 au1550_spi_reset_fifos(hw); 569 dev_err(hw->dev, 570 "pio transfer: unexpected SPI error " 571 "(event=0x%x stat=0x%x)!\n", evnt, stat); 572 complete(&hw->master_done); 573 return IRQ_HANDLED; 574 } 575 576 /* 577 * while there is something to read from rx fifo 578 * or there is a space to write to tx fifo: 579 */ 580 do { 581 busy = 0; 582 stat = hw->regs->psc_spistat; 583 wmb(); /* drain writebuffer */ 584 585 /* 586 * Take care to not let the Rx FIFO overflow. 587 * 588 * We only write a byte if we have read one at least. Initially, 589 * the write fifo is full, so we should read from the read fifo 590 * first. 591 * In case we miss a word from the read fifo, we should get a 592 * RO event and should back out. 593 */ 594 if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) { 595 hw->rx_word(hw); 596 busy = 1; 597 598 if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len) 599 hw->tx_word(hw); 600 } 601 } while (busy); 602 603 hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; 604 wmb(); /* drain writebuffer */ 605 606 /* 607 * Restart the SPI transmission in case of a transmit underflow. 608 * This seems to work despite the notes in the Au1550 data book 609 * of Figure 8-4 with flowchart for SPI master operation: 610 * 611 * """Note 1: An XFR Error Interrupt occurs, unless masked, 612 * for any of the following events: Tx FIFO Underflow, 613 * Rx FIFO Overflow, or Multiple-master Error 614 * Note 2: In case of a Tx Underflow Error, all zeroes are 615 * transmitted.""" 616 * 617 * By simply restarting the spi transfer on Tx Underflow Error, 618 * we assume that spi transfer was paused instead of zeroes 619 * transmittion mentioned in the Note 2 of Au1550 data book. 620 */ 621 if (evnt & PSC_SPIEVNT_TU) { 622 hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; 623 wmb(); /* drain writebuffer */ 624 hw->regs->psc_spipcr = PSC_SPIPCR_MS; 625 wmb(); /* drain writebuffer */ 626 } 627 628 if (hw->rx_count >= hw->len) { 629 /* transfer completed successfully */ 630 au1550_spi_mask_ack_all(hw); 631 complete(&hw->master_done); 632 } 633 return IRQ_HANDLED; 634 } 635 636 static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 637 { 638 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 639 return hw->txrx_bufs(spi, t); 640 } 641 642 static irqreturn_t au1550_spi_irq(int irq, void *dev) 643 { 644 struct au1550_spi *hw = dev; 645 return hw->irq_callback(hw); 646 } 647 648 static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) 649 { 650 if (bpw <= 8) { 651 if (hw->usedma) { 652 hw->txrx_bufs = &au1550_spi_dma_txrxb; 653 hw->irq_callback = &au1550_spi_dma_irq_callback; 654 } else { 655 hw->rx_word = &au1550_spi_rx_word_8; 656 hw->tx_word = &au1550_spi_tx_word_8; 657 hw->txrx_bufs = &au1550_spi_pio_txrxb; 658 hw->irq_callback = &au1550_spi_pio_irq_callback; 659 } 660 } else if (bpw <= 16) { 661 hw->rx_word = &au1550_spi_rx_word_16; 662 hw->tx_word = &au1550_spi_tx_word_16; 663 hw->txrx_bufs = &au1550_spi_pio_txrxb; 664 hw->irq_callback = &au1550_spi_pio_irq_callback; 665 } else { 666 hw->rx_word = &au1550_spi_rx_word_32; 667 hw->tx_word = &au1550_spi_tx_word_32; 668 hw->txrx_bufs = &au1550_spi_pio_txrxb; 669 hw->irq_callback = &au1550_spi_pio_irq_callback; 670 } 671 } 672 673 static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) 674 { 675 u32 stat, cfg; 676 677 /* set up the PSC for SPI mode */ 678 hw->regs->psc_ctrl = PSC_CTRL_DISABLE; 679 wmb(); /* drain writebuffer */ 680 hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; 681 wmb(); /* drain writebuffer */ 682 683 hw->regs->psc_spicfg = 0; 684 wmb(); /* drain writebuffer */ 685 686 hw->regs->psc_ctrl = PSC_CTRL_ENABLE; 687 wmb(); /* drain writebuffer */ 688 689 do { 690 stat = hw->regs->psc_spistat; 691 wmb(); /* drain writebuffer */ 692 } while ((stat & PSC_SPISTAT_SR) == 0); 693 694 695 cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; 696 cfg |= PSC_SPICFG_SET_LEN(8); 697 cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; 698 /* use minimal allowed brg and div values as initial setting: */ 699 cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); 700 701 #ifdef AU1550_SPI_DEBUG_LOOPBACK 702 cfg |= PSC_SPICFG_LB; 703 #endif 704 705 hw->regs->psc_spicfg = cfg; 706 wmb(); /* drain writebuffer */ 707 708 au1550_spi_mask_ack_all(hw); 709 710 hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; 711 wmb(); /* drain writebuffer */ 712 713 do { 714 stat = hw->regs->psc_spistat; 715 wmb(); /* drain writebuffer */ 716 } while ((stat & PSC_SPISTAT_DR) == 0); 717 718 au1550_spi_reset_fifos(hw); 719 } 720 721 722 static int au1550_spi_probe(struct platform_device *pdev) 723 { 724 struct au1550_spi *hw; 725 struct spi_master *master; 726 struct resource *r; 727 int err = 0; 728 729 master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); 730 if (master == NULL) { 731 dev_err(&pdev->dev, "No memory for spi_master\n"); 732 err = -ENOMEM; 733 goto err_nomem; 734 } 735 736 /* the spi->mode bits understood by this driver: */ 737 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; 738 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24); 739 740 hw = spi_master_get_devdata(master); 741 742 hw->master = master; 743 hw->pdata = dev_get_platdata(&pdev->dev); 744 hw->dev = &pdev->dev; 745 746 if (hw->pdata == NULL) { 747 dev_err(&pdev->dev, "No platform data supplied\n"); 748 err = -ENOENT; 749 goto err_no_pdata; 750 } 751 752 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 753 if (!r) { 754 dev_err(&pdev->dev, "no IRQ\n"); 755 err = -ENODEV; 756 goto err_no_iores; 757 } 758 hw->irq = r->start; 759 760 hw->usedma = 0; 761 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 762 if (r) { 763 hw->dma_tx_id = r->start; 764 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 765 if (r) { 766 hw->dma_rx_id = r->start; 767 if (usedma && ddma_memid) { 768 if (pdev->dev.dma_mask == NULL) 769 dev_warn(&pdev->dev, "no dma mask\n"); 770 else 771 hw->usedma = 1; 772 } 773 } 774 } 775 776 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 777 if (!r) { 778 dev_err(&pdev->dev, "no mmio resource\n"); 779 err = -ENODEV; 780 goto err_no_iores; 781 } 782 783 hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), 784 pdev->name); 785 if (!hw->ioarea) { 786 dev_err(&pdev->dev, "Cannot reserve iomem region\n"); 787 err = -ENXIO; 788 goto err_no_iores; 789 } 790 791 hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); 792 if (!hw->regs) { 793 dev_err(&pdev->dev, "cannot ioremap\n"); 794 err = -ENXIO; 795 goto err_ioremap; 796 } 797 798 platform_set_drvdata(pdev, hw); 799 800 init_completion(&hw->master_done); 801 802 hw->bitbang.master = hw->master; 803 hw->bitbang.setup_transfer = au1550_spi_setupxfer; 804 hw->bitbang.chipselect = au1550_spi_chipsel; 805 hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; 806 807 if (hw->usedma) { 808 hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid, 809 hw->dma_tx_id, NULL, (void *)hw); 810 if (hw->dma_tx_ch == 0) { 811 dev_err(&pdev->dev, 812 "Cannot allocate tx dma channel\n"); 813 err = -ENXIO; 814 goto err_no_txdma; 815 } 816 au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); 817 if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, 818 AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { 819 dev_err(&pdev->dev, 820 "Cannot allocate tx dma descriptors\n"); 821 err = -ENXIO; 822 goto err_no_txdma_descr; 823 } 824 825 826 hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, 827 ddma_memid, NULL, (void *)hw); 828 if (hw->dma_rx_ch == 0) { 829 dev_err(&pdev->dev, 830 "Cannot allocate rx dma channel\n"); 831 err = -ENXIO; 832 goto err_no_rxdma; 833 } 834 au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); 835 if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, 836 AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { 837 dev_err(&pdev->dev, 838 "Cannot allocate rx dma descriptors\n"); 839 err = -ENXIO; 840 goto err_no_rxdma_descr; 841 } 842 843 err = au1550_spi_dma_rxtmp_alloc(hw, 844 AU1550_SPI_DMA_RXTMP_MINSIZE); 845 if (err < 0) { 846 dev_err(&pdev->dev, 847 "Cannot allocate initial rx dma tmp buffer\n"); 848 goto err_dma_rxtmp_alloc; 849 } 850 } 851 852 au1550_spi_bits_handlers_set(hw, 8); 853 854 err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); 855 if (err) { 856 dev_err(&pdev->dev, "Cannot claim IRQ\n"); 857 goto err_no_irq; 858 } 859 860 master->bus_num = pdev->id; 861 master->num_chipselect = hw->pdata->num_chipselect; 862 863 /* 864 * precompute valid range for spi freq - from au1550 datasheet: 865 * psc_tempclk = psc_mainclk / (2 << DIV) 866 * spiclk = psc_tempclk / (2 * (BRG + 1)) 867 * BRG valid range is 4..63 868 * DIV valid range is 0..3 869 * round the min and max frequencies to values that would still 870 * produce valid brg and div 871 */ 872 { 873 int min_div = (2 << 0) * (2 * (4 + 1)); 874 int max_div = (2 << 3) * (2 * (63 + 1)); 875 master->max_speed_hz = hw->pdata->mainclk_hz / min_div; 876 master->min_speed_hz = 877 hw->pdata->mainclk_hz / (max_div + 1) + 1; 878 } 879 880 au1550_spi_setup_psc_as_spi(hw); 881 882 err = spi_bitbang_start(&hw->bitbang); 883 if (err) { 884 dev_err(&pdev->dev, "Failed to register SPI master\n"); 885 goto err_register; 886 } 887 888 dev_info(&pdev->dev, 889 "spi master registered: bus_num=%d num_chipselect=%d\n", 890 master->bus_num, master->num_chipselect); 891 892 return 0; 893 894 err_register: 895 free_irq(hw->irq, hw); 896 897 err_no_irq: 898 au1550_spi_dma_rxtmp_free(hw); 899 900 err_dma_rxtmp_alloc: 901 err_no_rxdma_descr: 902 if (hw->usedma) 903 au1xxx_dbdma_chan_free(hw->dma_rx_ch); 904 905 err_no_rxdma: 906 err_no_txdma_descr: 907 if (hw->usedma) 908 au1xxx_dbdma_chan_free(hw->dma_tx_ch); 909 910 err_no_txdma: 911 iounmap((void __iomem *)hw->regs); 912 913 err_ioremap: 914 release_mem_region(r->start, sizeof(psc_spi_t)); 915 916 err_no_iores: 917 err_no_pdata: 918 spi_master_put(hw->master); 919 920 err_nomem: 921 return err; 922 } 923 924 static int au1550_spi_remove(struct platform_device *pdev) 925 { 926 struct au1550_spi *hw = platform_get_drvdata(pdev); 927 928 dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", 929 hw->master->bus_num); 930 931 spi_bitbang_stop(&hw->bitbang); 932 free_irq(hw->irq, hw); 933 iounmap((void __iomem *)hw->regs); 934 release_mem_region(hw->ioarea->start, sizeof(psc_spi_t)); 935 936 if (hw->usedma) { 937 au1550_spi_dma_rxtmp_free(hw); 938 au1xxx_dbdma_chan_free(hw->dma_rx_ch); 939 au1xxx_dbdma_chan_free(hw->dma_tx_ch); 940 } 941 942 spi_master_put(hw->master); 943 return 0; 944 } 945 946 /* work with hotplug and coldplug */ 947 MODULE_ALIAS("platform:au1550-spi"); 948 949 static struct platform_driver au1550_spi_drv = { 950 .probe = au1550_spi_probe, 951 .remove = au1550_spi_remove, 952 .driver = { 953 .name = "au1550-spi", 954 }, 955 }; 956 957 static int __init au1550_spi_init(void) 958 { 959 /* 960 * create memory device with 8 bits dev_devwidth 961 * needed for proper byte ordering to spi fifo 962 */ 963 switch (alchemy_get_cputype()) { 964 case ALCHEMY_CPU_AU1550: 965 case ALCHEMY_CPU_AU1200: 966 case ALCHEMY_CPU_AU1300: 967 break; 968 default: 969 return -ENODEV; 970 } 971 972 if (usedma) { 973 ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); 974 if (!ddma_memid) 975 printk(KERN_ERR "au1550-spi: cannot add memory" 976 "dbdma device\n"); 977 } 978 return platform_driver_register(&au1550_spi_drv); 979 } 980 module_init(au1550_spi_init); 981 982 static void __exit au1550_spi_exit(void) 983 { 984 if (usedma && ddma_memid) 985 au1xxx_ddma_del_device(ddma_memid); 986 platform_driver_unregister(&au1550_spi_drv); 987 } 988 module_exit(au1550_spi_exit); 989 990 MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); 991 MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>"); 992 MODULE_LICENSE("GPL"); 993