1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Special handling for DW DMA core 4 * 5 * Copyright (c) 2009, 2014 Intel Corporation. 6 */ 7 8 #include <linux/completion.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/dmaengine.h> 11 #include <linux/irqreturn.h> 12 #include <linux/jiffies.h> 13 #include <linux/module.h> 14 #include <linux/pci.h> 15 #include <linux/platform_data/dma-dw.h> 16 #include <linux/spi/spi.h> 17 #include <linux/types.h> 18 19 #include "spi-dw.h" 20 21 #define DW_SPI_RX_BUSY 0 22 #define DW_SPI_RX_BURST_LEVEL 16 23 #define DW_SPI_TX_BUSY 1 24 #define DW_SPI_TX_BURST_LEVEL 16 25 26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) 27 { 28 struct dw_dma_slave *s = param; 29 30 if (s->dma_dev != chan->device->dev) 31 return false; 32 33 chan->private = s; 34 return true; 35 } 36 37 static void dw_spi_dma_maxburst_init(struct dw_spi *dws) 38 { 39 struct dma_slave_caps caps; 40 u32 max_burst, def_burst; 41 int ret; 42 43 def_burst = dws->fifo_len / 2; 44 45 ret = dma_get_slave_caps(dws->rxchan, &caps); 46 if (!ret && caps.max_burst) 47 max_burst = caps.max_burst; 48 else 49 max_burst = DW_SPI_RX_BURST_LEVEL; 50 51 dws->rxburst = min(max_burst, def_burst); 52 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); 53 54 ret = dma_get_slave_caps(dws->txchan, &caps); 55 if (!ret && caps.max_burst) 56 max_burst = caps.max_burst; 57 else 58 max_burst = DW_SPI_TX_BURST_LEVEL; 59 60 /* 61 * Having a Rx DMA channel serviced with higher priority than a Tx DMA 62 * channel might not be enough to provide a well balanced DMA-based 63 * SPI transfer interface. There might still be moments when the Tx DMA 64 * channel is occasionally handled faster than the Rx DMA channel. 65 * That in its turn will eventually cause the SPI Rx FIFO overflow if 66 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's 67 * cleared by the Rx DMA channel. In order to fix the problem the Tx 68 * DMA activity is intentionally slowed down by limiting the SPI Tx 69 * FIFO depth with a value twice bigger than the Tx burst length. 70 */ 71 dws->txburst = min(max_burst, def_burst); 72 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); 73 } 74 75 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws) 76 { 77 struct dma_slave_caps tx = {0}, rx = {0}; 78 79 dma_get_slave_caps(dws->txchan, &tx); 80 dma_get_slave_caps(dws->rxchan, &rx); 81 82 if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) 83 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); 84 else if (tx.max_sg_burst > 0) 85 dws->dma_sg_burst = tx.max_sg_burst; 86 else if (rx.max_sg_burst > 0) 87 dws->dma_sg_burst = rx.max_sg_burst; 88 else 89 dws->dma_sg_burst = 0; 90 } 91 92 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) 93 { 94 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; 95 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; 96 struct pci_dev *dma_dev; 97 dma_cap_mask_t mask; 98 99 /* 100 * Get pci device for DMA controller, currently it could only 101 * be the DMA controller of Medfield 102 */ 103 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); 104 if (!dma_dev) 105 return -ENODEV; 106 107 dma_cap_zero(mask); 108 dma_cap_set(DMA_SLAVE, mask); 109 110 /* 1. Init rx channel */ 111 rx->dma_dev = &dma_dev->dev; 112 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); 113 if (!dws->rxchan) 114 goto err_exit; 115 116 /* 2. Init tx channel */ 117 tx->dma_dev = &dma_dev->dev; 118 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); 119 if (!dws->txchan) 120 goto free_rxchan; 121 122 dws->master->dma_rx = dws->rxchan; 123 dws->master->dma_tx = dws->txchan; 124 125 init_completion(&dws->dma_completion); 126 127 dw_spi_dma_maxburst_init(dws); 128 129 dw_spi_dma_sg_burst_init(dws); 130 131 pci_dev_put(dma_dev); 132 133 return 0; 134 135 free_rxchan: 136 dma_release_channel(dws->rxchan); 137 dws->rxchan = NULL; 138 err_exit: 139 pci_dev_put(dma_dev); 140 return -EBUSY; 141 } 142 143 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) 144 { 145 int ret; 146 147 dws->rxchan = dma_request_chan(dev, "rx"); 148 if (IS_ERR(dws->rxchan)) { 149 ret = PTR_ERR(dws->rxchan); 150 dws->rxchan = NULL; 151 goto err_exit; 152 } 153 154 dws->txchan = dma_request_chan(dev, "tx"); 155 if (IS_ERR(dws->txchan)) { 156 ret = PTR_ERR(dws->txchan); 157 dws->txchan = NULL; 158 goto free_rxchan; 159 } 160 161 dws->master->dma_rx = dws->rxchan; 162 dws->master->dma_tx = dws->txchan; 163 164 init_completion(&dws->dma_completion); 165 166 dw_spi_dma_maxburst_init(dws); 167 168 dw_spi_dma_sg_burst_init(dws); 169 170 return 0; 171 172 free_rxchan: 173 dma_release_channel(dws->rxchan); 174 dws->rxchan = NULL; 175 err_exit: 176 return ret; 177 } 178 179 static void dw_spi_dma_exit(struct dw_spi *dws) 180 { 181 if (dws->txchan) { 182 dmaengine_terminate_sync(dws->txchan); 183 dma_release_channel(dws->txchan); 184 } 185 186 if (dws->rxchan) { 187 dmaengine_terminate_sync(dws->rxchan); 188 dma_release_channel(dws->rxchan); 189 } 190 } 191 192 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) 193 { 194 dw_spi_check_status(dws, false); 195 196 complete(&dws->dma_completion); 197 198 return IRQ_HANDLED; 199 } 200 201 static bool dw_spi_can_dma(struct spi_controller *master, 202 struct spi_device *spi, struct spi_transfer *xfer) 203 { 204 struct dw_spi *dws = spi_controller_get_devdata(master); 205 206 return xfer->len > dws->fifo_len; 207 } 208 209 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) 210 { 211 if (n_bytes == 1) 212 return DMA_SLAVE_BUSWIDTH_1_BYTE; 213 else if (n_bytes == 2) 214 return DMA_SLAVE_BUSWIDTH_2_BYTES; 215 216 return DMA_SLAVE_BUSWIDTH_UNDEFINED; 217 } 218 219 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) 220 { 221 unsigned long long ms; 222 223 ms = len * MSEC_PER_SEC * BITS_PER_BYTE; 224 do_div(ms, speed); 225 ms += ms + 200; 226 227 if (ms > UINT_MAX) 228 ms = UINT_MAX; 229 230 ms = wait_for_completion_timeout(&dws->dma_completion, 231 msecs_to_jiffies(ms)); 232 233 if (ms == 0) { 234 dev_err(&dws->master->cur_msg->spi->dev, 235 "DMA transaction timed out\n"); 236 return -ETIMEDOUT; 237 } 238 239 return 0; 240 } 241 242 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) 243 { 244 return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT); 245 } 246 247 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, 248 struct spi_transfer *xfer) 249 { 250 int retry = DW_SPI_WAIT_RETRIES; 251 struct spi_delay delay; 252 u32 nents; 253 254 nents = dw_readl(dws, DW_SPI_TXFLR); 255 delay.unit = SPI_DELAY_UNIT_SCK; 256 delay.value = nents * dws->n_bytes * BITS_PER_BYTE; 257 258 while (dw_spi_dma_tx_busy(dws) && retry--) 259 spi_delay_exec(&delay, xfer); 260 261 if (retry < 0) { 262 dev_err(&dws->master->dev, "Tx hanged up\n"); 263 return -EIO; 264 } 265 266 return 0; 267 } 268 269 /* 270 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx 271 * channel will clear a corresponding bit. 272 */ 273 static void dw_spi_dma_tx_done(void *arg) 274 { 275 struct dw_spi *dws = arg; 276 277 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 278 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) 279 return; 280 281 complete(&dws->dma_completion); 282 } 283 284 static int dw_spi_dma_config_tx(struct dw_spi *dws) 285 { 286 struct dma_slave_config txconf; 287 288 memset(&txconf, 0, sizeof(txconf)); 289 txconf.direction = DMA_MEM_TO_DEV; 290 txconf.dst_addr = dws->dma_addr; 291 txconf.dst_maxburst = dws->txburst; 292 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 293 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 294 txconf.device_fc = false; 295 296 return dmaengine_slave_config(dws->txchan, &txconf); 297 } 298 299 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, 300 unsigned int nents) 301 { 302 struct dma_async_tx_descriptor *txdesc; 303 dma_cookie_t cookie; 304 int ret; 305 306 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, 307 DMA_MEM_TO_DEV, 308 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 309 if (!txdesc) 310 return -ENOMEM; 311 312 txdesc->callback = dw_spi_dma_tx_done; 313 txdesc->callback_param = dws; 314 315 cookie = dmaengine_submit(txdesc); 316 ret = dma_submit_error(cookie); 317 if (ret) { 318 dmaengine_terminate_sync(dws->txchan); 319 return ret; 320 } 321 322 set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 323 324 return 0; 325 } 326 327 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) 328 { 329 return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT); 330 } 331 332 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) 333 { 334 int retry = DW_SPI_WAIT_RETRIES; 335 struct spi_delay delay; 336 unsigned long ns, us; 337 u32 nents; 338 339 /* 340 * It's unlikely that DMA engine is still doing the data fetching, but 341 * if it's let's give it some reasonable time. The timeout calculation 342 * is based on the synchronous APB/SSI reference clock rate, on a 343 * number of data entries left in the Rx FIFO, times a number of clock 344 * periods normally needed for a single APB read/write transaction 345 * without PREADY signal utilized (which is true for the DW APB SSI 346 * controller). 347 */ 348 nents = dw_readl(dws, DW_SPI_RXFLR); 349 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; 350 if (ns <= NSEC_PER_USEC) { 351 delay.unit = SPI_DELAY_UNIT_NSECS; 352 delay.value = ns; 353 } else { 354 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 355 delay.unit = SPI_DELAY_UNIT_USECS; 356 delay.value = clamp_val(us, 0, USHRT_MAX); 357 } 358 359 while (dw_spi_dma_rx_busy(dws) && retry--) 360 spi_delay_exec(&delay, NULL); 361 362 if (retry < 0) { 363 dev_err(&dws->master->dev, "Rx hanged up\n"); 364 return -EIO; 365 } 366 367 return 0; 368 } 369 370 /* 371 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx 372 * channel will clear a corresponding bit. 373 */ 374 static void dw_spi_dma_rx_done(void *arg) 375 { 376 struct dw_spi *dws = arg; 377 378 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 379 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) 380 return; 381 382 complete(&dws->dma_completion); 383 } 384 385 static int dw_spi_dma_config_rx(struct dw_spi *dws) 386 { 387 struct dma_slave_config rxconf; 388 389 memset(&rxconf, 0, sizeof(rxconf)); 390 rxconf.direction = DMA_DEV_TO_MEM; 391 rxconf.src_addr = dws->dma_addr; 392 rxconf.src_maxburst = dws->rxburst; 393 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 394 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 395 rxconf.device_fc = false; 396 397 return dmaengine_slave_config(dws->rxchan, &rxconf); 398 } 399 400 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, 401 unsigned int nents) 402 { 403 struct dma_async_tx_descriptor *rxdesc; 404 dma_cookie_t cookie; 405 int ret; 406 407 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, 408 DMA_DEV_TO_MEM, 409 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 410 if (!rxdesc) 411 return -ENOMEM; 412 413 rxdesc->callback = dw_spi_dma_rx_done; 414 rxdesc->callback_param = dws; 415 416 cookie = dmaengine_submit(rxdesc); 417 ret = dma_submit_error(cookie); 418 if (ret) { 419 dmaengine_terminate_sync(dws->rxchan); 420 return ret; 421 } 422 423 set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 424 425 return 0; 426 } 427 428 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) 429 { 430 u16 imr, dma_ctrl; 431 int ret; 432 433 if (!xfer->tx_buf) 434 return -EINVAL; 435 436 /* Setup DMA channels */ 437 ret = dw_spi_dma_config_tx(dws); 438 if (ret) 439 return ret; 440 441 if (xfer->rx_buf) { 442 ret = dw_spi_dma_config_rx(dws); 443 if (ret) 444 return ret; 445 } 446 447 /* Set the DMA handshaking interface */ 448 dma_ctrl = DW_SPI_DMACR_TDMAE; 449 if (xfer->rx_buf) 450 dma_ctrl |= DW_SPI_DMACR_RDMAE; 451 dw_writel(dws, DW_SPI_DMACR, dma_ctrl); 452 453 /* Set the interrupt mask */ 454 imr = DW_SPI_INT_TXOI; 455 if (xfer->rx_buf) 456 imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI; 457 dw_spi_umask_intr(dws, imr); 458 459 reinit_completion(&dws->dma_completion); 460 461 dws->transfer_handler = dw_spi_dma_transfer_handler; 462 463 return 0; 464 } 465 466 static int dw_spi_dma_transfer_all(struct dw_spi *dws, 467 struct spi_transfer *xfer) 468 { 469 int ret; 470 471 /* Submit the DMA Tx transfer */ 472 ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents); 473 if (ret) 474 goto err_clear_dmac; 475 476 /* Submit the DMA Rx transfer if required */ 477 if (xfer->rx_buf) { 478 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, 479 xfer->rx_sg.nents); 480 if (ret) 481 goto err_clear_dmac; 482 483 /* rx must be started before tx due to spi instinct */ 484 dma_async_issue_pending(dws->rxchan); 485 } 486 487 dma_async_issue_pending(dws->txchan); 488 489 ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz); 490 491 err_clear_dmac: 492 dw_writel(dws, DW_SPI_DMACR, 0); 493 494 return ret; 495 } 496 497 /* 498 * In case if at least one of the requested DMA channels doesn't support the 499 * hardware accelerated SG list entries traverse, the DMA driver will most 500 * likely work that around by performing the IRQ-based SG list entries 501 * resubmission. That might and will cause a problem if the DMA Tx channel is 502 * recharged and re-executed before the Rx DMA channel. Due to 503 * non-deterministic IRQ-handler execution latency the DMA Tx channel will 504 * start pushing data to the SPI bus before the Rx DMA channel is even 505 * reinitialized with the next inbound SG list entry. By doing so the DMA Tx 506 * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while 507 * the DMA Rx channel being recharged and re-executed will eventually be 508 * overflown. 509 * 510 * In order to solve the problem we have to feed the DMA engine with SG list 511 * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs 512 * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg 513 * and rx_sg lists may have different number of entries of different lengths 514 * (though total length should match) let's virtually split the SG-lists to the 515 * set of DMA transfers, which length is a minimum of the ordered SG-entries 516 * lengths. An ASCII-sketch of the implemented algo is following: 517 * xfer->len 518 * |___________| 519 * tx_sg list: |___|____|__| 520 * rx_sg list: |_|____|____| 521 * DMA transfers: |_|_|__|_|__| 522 * 523 * Note in order to have this workaround solving the denoted problem the DMA 524 * engine driver should properly initialize the max_sg_burst capability and set 525 * the DMA device max segment size parameter with maximum data block size the 526 * DMA engine supports. 527 */ 528 529 static int dw_spi_dma_transfer_one(struct dw_spi *dws, 530 struct spi_transfer *xfer) 531 { 532 struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; 533 unsigned int tx_len = 0, rx_len = 0; 534 unsigned int base, len; 535 int ret; 536 537 sg_init_table(&tx_tmp, 1); 538 sg_init_table(&rx_tmp, 1); 539 540 for (base = 0, len = 0; base < xfer->len; base += len) { 541 /* Fetch next Tx DMA data chunk */ 542 if (!tx_len) { 543 tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); 544 sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); 545 tx_len = sg_dma_len(tx_sg); 546 } 547 548 /* Fetch next Rx DMA data chunk */ 549 if (!rx_len) { 550 rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); 551 sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); 552 rx_len = sg_dma_len(rx_sg); 553 } 554 555 len = min(tx_len, rx_len); 556 557 sg_dma_len(&tx_tmp) = len; 558 sg_dma_len(&rx_tmp) = len; 559 560 /* Submit DMA Tx transfer */ 561 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1); 562 if (ret) 563 break; 564 565 /* Submit DMA Rx transfer */ 566 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1); 567 if (ret) 568 break; 569 570 /* Rx must be started before Tx due to SPI instinct */ 571 dma_async_issue_pending(dws->rxchan); 572 573 dma_async_issue_pending(dws->txchan); 574 575 /* 576 * Here we only need to wait for the DMA transfer to be 577 * finished since SPI controller is kept enabled during the 578 * procedure this loop implements and there is no risk to lose 579 * data left in the Tx/Rx FIFOs. 580 */ 581 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz); 582 if (ret) 583 break; 584 585 reinit_completion(&dws->dma_completion); 586 587 sg_dma_address(&tx_tmp) += len; 588 sg_dma_address(&rx_tmp) += len; 589 tx_len -= len; 590 rx_len -= len; 591 } 592 593 dw_writel(dws, DW_SPI_DMACR, 0); 594 595 return ret; 596 } 597 598 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) 599 { 600 unsigned int nents; 601 int ret; 602 603 nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); 604 605 /* 606 * Execute normal DMA-based transfer (which submits the Rx and Tx SG 607 * lists directly to the DMA engine at once) if either full hardware 608 * accelerated SG list traverse is supported by both channels, or the 609 * Tx-only SPI transfer is requested, or the DMA engine is capable to 610 * handle both SG lists on hardware accelerated basis. 611 */ 612 if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) 613 ret = dw_spi_dma_transfer_all(dws, xfer); 614 else 615 ret = dw_spi_dma_transfer_one(dws, xfer); 616 if (ret) 617 return ret; 618 619 if (dws->master->cur_msg->status == -EINPROGRESS) { 620 ret = dw_spi_dma_wait_tx_done(dws, xfer); 621 if (ret) 622 return ret; 623 } 624 625 if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS) 626 ret = dw_spi_dma_wait_rx_done(dws); 627 628 return ret; 629 } 630 631 static void dw_spi_dma_stop(struct dw_spi *dws) 632 { 633 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) { 634 dmaengine_terminate_sync(dws->txchan); 635 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 636 } 637 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) { 638 dmaengine_terminate_sync(dws->rxchan); 639 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 640 } 641 } 642 643 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { 644 .dma_init = dw_spi_dma_init_mfld, 645 .dma_exit = dw_spi_dma_exit, 646 .dma_setup = dw_spi_dma_setup, 647 .can_dma = dw_spi_can_dma, 648 .dma_transfer = dw_spi_dma_transfer, 649 .dma_stop = dw_spi_dma_stop, 650 }; 651 652 void dw_spi_dma_setup_mfld(struct dw_spi *dws) 653 { 654 dws->dma_ops = &dw_spi_dma_mfld_ops; 655 } 656 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE); 657 658 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { 659 .dma_init = dw_spi_dma_init_generic, 660 .dma_exit = dw_spi_dma_exit, 661 .dma_setup = dw_spi_dma_setup, 662 .can_dma = dw_spi_can_dma, 663 .dma_transfer = dw_spi_dma_transfer, 664 .dma_stop = dw_spi_dma_stop, 665 }; 666 667 void dw_spi_dma_setup_generic(struct dw_spi *dws) 668 { 669 dws->dma_ops = &dw_spi_dma_generic_ops; 670 } 671 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE); 672