1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Special handling for DW DMA core 4 * 5 * Copyright (c) 2009, 2014 Intel Corporation. 6 */ 7 8 #include <linux/completion.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/dmaengine.h> 11 #include <linux/irqreturn.h> 12 #include <linux/jiffies.h> 13 #include <linux/pci.h> 14 #include <linux/platform_data/dma-dw.h> 15 #include <linux/spi/spi.h> 16 #include <linux/types.h> 17 18 #include "spi-dw.h" 19 20 #define WAIT_RETRIES 5 21 #define RX_BUSY 0 22 #define RX_BURST_LEVEL 16 23 #define TX_BUSY 1 24 #define TX_BURST_LEVEL 16 25 26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) 27 { 28 struct dw_dma_slave *s = param; 29 30 if (s->dma_dev != chan->device->dev) 31 return false; 32 33 chan->private = s; 34 return true; 35 } 36 37 static void dw_spi_dma_maxburst_init(struct dw_spi *dws) 38 { 39 struct dma_slave_caps caps; 40 u32 max_burst, def_burst; 41 int ret; 42 43 def_burst = dws->fifo_len / 2; 44 45 ret = dma_get_slave_caps(dws->rxchan, &caps); 46 if (!ret && caps.max_burst) 47 max_burst = caps.max_burst; 48 else 49 max_burst = RX_BURST_LEVEL; 50 51 dws->rxburst = min(max_burst, def_burst); 52 53 ret = dma_get_slave_caps(dws->txchan, &caps); 54 if (!ret && caps.max_burst) 55 max_burst = caps.max_burst; 56 else 57 max_burst = TX_BURST_LEVEL; 58 59 dws->txburst = min(max_burst, def_burst); 60 } 61 62 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) 63 { 64 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; 65 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; 66 struct pci_dev *dma_dev; 67 dma_cap_mask_t mask; 68 69 /* 70 * Get pci device for DMA controller, currently it could only 71 * be the DMA controller of Medfield 72 */ 73 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); 74 if (!dma_dev) 75 return -ENODEV; 76 77 dma_cap_zero(mask); 78 dma_cap_set(DMA_SLAVE, mask); 79 80 /* 1. Init rx channel */ 81 rx->dma_dev = &dma_dev->dev; 82 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); 83 if (!dws->rxchan) 84 goto err_exit; 85 86 /* 2. Init tx channel */ 87 tx->dma_dev = &dma_dev->dev; 88 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); 89 if (!dws->txchan) 90 goto free_rxchan; 91 92 dws->master->dma_rx = dws->rxchan; 93 dws->master->dma_tx = dws->txchan; 94 95 init_completion(&dws->dma_completion); 96 97 dw_spi_dma_maxburst_init(dws); 98 99 return 0; 100 101 free_rxchan: 102 dma_release_channel(dws->rxchan); 103 dws->rxchan = NULL; 104 err_exit: 105 return -EBUSY; 106 } 107 108 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) 109 { 110 dws->rxchan = dma_request_slave_channel(dev, "rx"); 111 if (!dws->rxchan) 112 return -ENODEV; 113 114 dws->txchan = dma_request_slave_channel(dev, "tx"); 115 if (!dws->txchan) { 116 dma_release_channel(dws->rxchan); 117 dws->rxchan = NULL; 118 return -ENODEV; 119 } 120 121 dws->master->dma_rx = dws->rxchan; 122 dws->master->dma_tx = dws->txchan; 123 124 init_completion(&dws->dma_completion); 125 126 dw_spi_dma_maxburst_init(dws); 127 128 return 0; 129 } 130 131 static void dw_spi_dma_exit(struct dw_spi *dws) 132 { 133 if (dws->txchan) { 134 dmaengine_terminate_sync(dws->txchan); 135 dma_release_channel(dws->txchan); 136 } 137 138 if (dws->rxchan) { 139 dmaengine_terminate_sync(dws->rxchan); 140 dma_release_channel(dws->rxchan); 141 } 142 143 dw_writel(dws, DW_SPI_DMACR, 0); 144 } 145 146 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) 147 { 148 u16 irq_status = dw_readl(dws, DW_SPI_ISR); 149 150 if (!irq_status) 151 return IRQ_NONE; 152 153 dw_readl(dws, DW_SPI_ICR); 154 spi_reset_chip(dws); 155 156 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); 157 dws->master->cur_msg->status = -EIO; 158 complete(&dws->dma_completion); 159 return IRQ_HANDLED; 160 } 161 162 static bool dw_spi_can_dma(struct spi_controller *master, 163 struct spi_device *spi, struct spi_transfer *xfer) 164 { 165 struct dw_spi *dws = spi_controller_get_devdata(master); 166 167 return xfer->len > dws->fifo_len; 168 } 169 170 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) 171 { 172 if (n_bytes == 1) 173 return DMA_SLAVE_BUSWIDTH_1_BYTE; 174 else if (n_bytes == 2) 175 return DMA_SLAVE_BUSWIDTH_2_BYTES; 176 177 return DMA_SLAVE_BUSWIDTH_UNDEFINED; 178 } 179 180 static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer) 181 { 182 unsigned long long ms; 183 184 ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE; 185 do_div(ms, xfer->effective_speed_hz); 186 ms += ms + 200; 187 188 if (ms > UINT_MAX) 189 ms = UINT_MAX; 190 191 ms = wait_for_completion_timeout(&dws->dma_completion, 192 msecs_to_jiffies(ms)); 193 194 if (ms == 0) { 195 dev_err(&dws->master->cur_msg->spi->dev, 196 "DMA transaction timed out\n"); 197 return -ETIMEDOUT; 198 } 199 200 return 0; 201 } 202 203 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) 204 { 205 return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); 206 } 207 208 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, 209 struct spi_transfer *xfer) 210 { 211 int retry = WAIT_RETRIES; 212 struct spi_delay delay; 213 u32 nents; 214 215 nents = dw_readl(dws, DW_SPI_TXFLR); 216 delay.unit = SPI_DELAY_UNIT_SCK; 217 delay.value = nents * dws->n_bytes * BITS_PER_BYTE; 218 219 while (dw_spi_dma_tx_busy(dws) && retry--) 220 spi_delay_exec(&delay, xfer); 221 222 if (retry < 0) { 223 dev_err(&dws->master->dev, "Tx hanged up\n"); 224 return -EIO; 225 } 226 227 return 0; 228 } 229 230 /* 231 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx 232 * channel will clear a corresponding bit. 233 */ 234 static void dw_spi_dma_tx_done(void *arg) 235 { 236 struct dw_spi *dws = arg; 237 238 clear_bit(TX_BUSY, &dws->dma_chan_busy); 239 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) 240 return; 241 242 dw_writel(dws, DW_SPI_DMACR, 0); 243 complete(&dws->dma_completion); 244 } 245 246 static struct dma_async_tx_descriptor * 247 dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) 248 { 249 struct dma_slave_config txconf; 250 struct dma_async_tx_descriptor *txdesc; 251 252 if (!xfer->tx_buf) 253 return NULL; 254 255 memset(&txconf, 0, sizeof(txconf)); 256 txconf.direction = DMA_MEM_TO_DEV; 257 txconf.dst_addr = dws->dma_addr; 258 txconf.dst_maxburst = dws->txburst; 259 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 260 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 261 txconf.device_fc = false; 262 263 dmaengine_slave_config(dws->txchan, &txconf); 264 265 txdesc = dmaengine_prep_slave_sg(dws->txchan, 266 xfer->tx_sg.sgl, 267 xfer->tx_sg.nents, 268 DMA_MEM_TO_DEV, 269 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 270 if (!txdesc) 271 return NULL; 272 273 txdesc->callback = dw_spi_dma_tx_done; 274 txdesc->callback_param = dws; 275 276 return txdesc; 277 } 278 279 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) 280 { 281 return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); 282 } 283 284 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) 285 { 286 int retry = WAIT_RETRIES; 287 struct spi_delay delay; 288 unsigned long ns, us; 289 u32 nents; 290 291 /* 292 * It's unlikely that DMA engine is still doing the data fetching, but 293 * if it's let's give it some reasonable time. The timeout calculation 294 * is based on the synchronous APB/SSI reference clock rate, on a 295 * number of data entries left in the Rx FIFO, times a number of clock 296 * periods normally needed for a single APB read/write transaction 297 * without PREADY signal utilized (which is true for the DW APB SSI 298 * controller). 299 */ 300 nents = dw_readl(dws, DW_SPI_RXFLR); 301 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; 302 if (ns <= NSEC_PER_USEC) { 303 delay.unit = SPI_DELAY_UNIT_NSECS; 304 delay.value = ns; 305 } else { 306 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 307 delay.unit = SPI_DELAY_UNIT_USECS; 308 delay.value = clamp_val(us, 0, USHRT_MAX); 309 } 310 311 while (dw_spi_dma_rx_busy(dws) && retry--) 312 spi_delay_exec(&delay, NULL); 313 314 if (retry < 0) { 315 dev_err(&dws->master->dev, "Rx hanged up\n"); 316 return -EIO; 317 } 318 319 return 0; 320 } 321 322 /* 323 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx 324 * channel will clear a corresponding bit. 325 */ 326 static void dw_spi_dma_rx_done(void *arg) 327 { 328 struct dw_spi *dws = arg; 329 330 clear_bit(RX_BUSY, &dws->dma_chan_busy); 331 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) 332 return; 333 334 dw_writel(dws, DW_SPI_DMACR, 0); 335 complete(&dws->dma_completion); 336 } 337 338 static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, 339 struct spi_transfer *xfer) 340 { 341 struct dma_slave_config rxconf; 342 struct dma_async_tx_descriptor *rxdesc; 343 344 if (!xfer->rx_buf) 345 return NULL; 346 347 memset(&rxconf, 0, sizeof(rxconf)); 348 rxconf.direction = DMA_DEV_TO_MEM; 349 rxconf.src_addr = dws->dma_addr; 350 rxconf.src_maxburst = dws->rxburst; 351 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 352 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 353 rxconf.device_fc = false; 354 355 dmaengine_slave_config(dws->rxchan, &rxconf); 356 357 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, 358 xfer->rx_sg.sgl, 359 xfer->rx_sg.nents, 360 DMA_DEV_TO_MEM, 361 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 362 if (!rxdesc) 363 return NULL; 364 365 rxdesc->callback = dw_spi_dma_rx_done; 366 rxdesc->callback_param = dws; 367 368 return rxdesc; 369 } 370 371 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) 372 { 373 u16 imr = 0, dma_ctrl = 0; 374 375 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); 376 dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst); 377 378 if (xfer->tx_buf) 379 dma_ctrl |= SPI_DMA_TDMAE; 380 if (xfer->rx_buf) 381 dma_ctrl |= SPI_DMA_RDMAE; 382 dw_writel(dws, DW_SPI_DMACR, dma_ctrl); 383 384 /* Set the interrupt mask */ 385 if (xfer->tx_buf) 386 imr |= SPI_INT_TXOI; 387 if (xfer->rx_buf) 388 imr |= SPI_INT_RXUI | SPI_INT_RXOI; 389 spi_umask_intr(dws, imr); 390 391 reinit_completion(&dws->dma_completion); 392 393 dws->transfer_handler = dw_spi_dma_transfer_handler; 394 395 return 0; 396 } 397 398 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) 399 { 400 struct dma_async_tx_descriptor *txdesc, *rxdesc; 401 int ret; 402 403 /* Prepare the TX dma transfer */ 404 txdesc = dw_spi_dma_prepare_tx(dws, xfer); 405 406 /* Prepare the RX dma transfer */ 407 rxdesc = dw_spi_dma_prepare_rx(dws, xfer); 408 409 /* rx must be started before tx due to spi instinct */ 410 if (rxdesc) { 411 set_bit(RX_BUSY, &dws->dma_chan_busy); 412 dmaengine_submit(rxdesc); 413 dma_async_issue_pending(dws->rxchan); 414 } 415 416 if (txdesc) { 417 set_bit(TX_BUSY, &dws->dma_chan_busy); 418 dmaengine_submit(txdesc); 419 dma_async_issue_pending(dws->txchan); 420 } 421 422 ret = dw_spi_dma_wait(dws, xfer); 423 if (ret) 424 return ret; 425 426 if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) { 427 ret = dw_spi_dma_wait_tx_done(dws, xfer); 428 if (ret) 429 return ret; 430 } 431 432 if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS) 433 ret = dw_spi_dma_wait_rx_done(dws); 434 435 return ret; 436 } 437 438 static void dw_spi_dma_stop(struct dw_spi *dws) 439 { 440 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 441 dmaengine_terminate_sync(dws->txchan); 442 clear_bit(TX_BUSY, &dws->dma_chan_busy); 443 } 444 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 445 dmaengine_terminate_sync(dws->rxchan); 446 clear_bit(RX_BUSY, &dws->dma_chan_busy); 447 } 448 449 dw_writel(dws, DW_SPI_DMACR, 0); 450 } 451 452 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { 453 .dma_init = dw_spi_dma_init_mfld, 454 .dma_exit = dw_spi_dma_exit, 455 .dma_setup = dw_spi_dma_setup, 456 .can_dma = dw_spi_can_dma, 457 .dma_transfer = dw_spi_dma_transfer, 458 .dma_stop = dw_spi_dma_stop, 459 }; 460 461 void dw_spi_dma_setup_mfld(struct dw_spi *dws) 462 { 463 dws->dma_ops = &dw_spi_dma_mfld_ops; 464 } 465 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld); 466 467 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { 468 .dma_init = dw_spi_dma_init_generic, 469 .dma_exit = dw_spi_dma_exit, 470 .dma_setup = dw_spi_dma_setup, 471 .can_dma = dw_spi_can_dma, 472 .dma_transfer = dw_spi_dma_transfer, 473 .dma_stop = dw_spi_dma_stop, 474 }; 475 476 void dw_spi_dma_setup_generic(struct dw_spi *dws) 477 { 478 dws->dma_ops = &dw_spi_dma_generic_ops; 479 } 480 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic); 481