xref: /openbmc/linux/drivers/spi/spi-dw-dma.c (revision 1dbab6b1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Special handling for DW DMA core
4  *
5  * Copyright (c) 2009, 2014 Intel Corporation.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/irqreturn.h>
12 #include <linux/jiffies.h>
13 #include <linux/pci.h>
14 #include <linux/platform_data/dma-dw.h>
15 #include <linux/spi/spi.h>
16 #include <linux/types.h>
17 
18 #include "spi-dw.h"
19 
20 #define WAIT_RETRIES	5
21 #define RX_BUSY		0
22 #define RX_BURST_LEVEL	16
23 #define TX_BUSY		1
24 #define TX_BURST_LEVEL	16
25 
26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
27 {
28 	struct dw_dma_slave *s = param;
29 
30 	if (s->dma_dev != chan->device->dev)
31 		return false;
32 
33 	chan->private = s;
34 	return true;
35 }
36 
37 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
38 {
39 	struct dma_slave_caps caps;
40 	u32 max_burst, def_burst;
41 	int ret;
42 
43 	def_burst = dws->fifo_len / 2;
44 
45 	ret = dma_get_slave_caps(dws->rxchan, &caps);
46 	if (!ret && caps.max_burst)
47 		max_burst = caps.max_burst;
48 	else
49 		max_burst = RX_BURST_LEVEL;
50 
51 	dws->rxburst = min(max_burst, def_burst);
52 
53 	ret = dma_get_slave_caps(dws->txchan, &caps);
54 	if (!ret && caps.max_burst)
55 		max_burst = caps.max_burst;
56 	else
57 		max_burst = TX_BURST_LEVEL;
58 
59 	dws->txburst = min(max_burst, def_burst);
60 }
61 
62 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
63 {
64 	struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
65 	struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
66 	struct pci_dev *dma_dev;
67 	dma_cap_mask_t mask;
68 
69 	/*
70 	 * Get pci device for DMA controller, currently it could only
71 	 * be the DMA controller of Medfield
72 	 */
73 	dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
74 	if (!dma_dev)
75 		return -ENODEV;
76 
77 	dma_cap_zero(mask);
78 	dma_cap_set(DMA_SLAVE, mask);
79 
80 	/* 1. Init rx channel */
81 	rx->dma_dev = &dma_dev->dev;
82 	dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
83 	if (!dws->rxchan)
84 		goto err_exit;
85 
86 	/* 2. Init tx channel */
87 	tx->dma_dev = &dma_dev->dev;
88 	dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
89 	if (!dws->txchan)
90 		goto free_rxchan;
91 
92 	dws->master->dma_rx = dws->rxchan;
93 	dws->master->dma_tx = dws->txchan;
94 
95 	init_completion(&dws->dma_completion);
96 
97 	dw_spi_dma_maxburst_init(dws);
98 
99 	return 0;
100 
101 free_rxchan:
102 	dma_release_channel(dws->rxchan);
103 	dws->rxchan = NULL;
104 err_exit:
105 	return -EBUSY;
106 }
107 
108 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
109 {
110 	dws->rxchan = dma_request_slave_channel(dev, "rx");
111 	if (!dws->rxchan)
112 		return -ENODEV;
113 
114 	dws->txchan = dma_request_slave_channel(dev, "tx");
115 	if (!dws->txchan) {
116 		dma_release_channel(dws->rxchan);
117 		dws->rxchan = NULL;
118 		return -ENODEV;
119 	}
120 
121 	dws->master->dma_rx = dws->rxchan;
122 	dws->master->dma_tx = dws->txchan;
123 
124 	init_completion(&dws->dma_completion);
125 
126 	dw_spi_dma_maxburst_init(dws);
127 
128 	return 0;
129 }
130 
131 static void dw_spi_dma_exit(struct dw_spi *dws)
132 {
133 	if (dws->txchan) {
134 		dmaengine_terminate_sync(dws->txchan);
135 		dma_release_channel(dws->txchan);
136 	}
137 
138 	if (dws->rxchan) {
139 		dmaengine_terminate_sync(dws->rxchan);
140 		dma_release_channel(dws->rxchan);
141 	}
142 
143 	dw_writel(dws, DW_SPI_DMACR, 0);
144 }
145 
146 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
147 {
148 	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
149 
150 	if (!irq_status)
151 		return IRQ_NONE;
152 
153 	dw_readl(dws, DW_SPI_ICR);
154 	spi_reset_chip(dws);
155 
156 	dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
157 	dws->master->cur_msg->status = -EIO;
158 	complete(&dws->dma_completion);
159 	return IRQ_HANDLED;
160 }
161 
162 static bool dw_spi_can_dma(struct spi_controller *master,
163 			   struct spi_device *spi, struct spi_transfer *xfer)
164 {
165 	struct dw_spi *dws = spi_controller_get_devdata(master);
166 
167 	return xfer->len > dws->fifo_len;
168 }
169 
170 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
171 {
172 	if (n_bytes == 1)
173 		return DMA_SLAVE_BUSWIDTH_1_BYTE;
174 	else if (n_bytes == 2)
175 		return DMA_SLAVE_BUSWIDTH_2_BYTES;
176 
177 	return DMA_SLAVE_BUSWIDTH_UNDEFINED;
178 }
179 
180 static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer)
181 {
182 	unsigned long long ms;
183 
184 	ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE;
185 	do_div(ms, xfer->effective_speed_hz);
186 	ms += ms + 200;
187 
188 	if (ms > UINT_MAX)
189 		ms = UINT_MAX;
190 
191 	ms = wait_for_completion_timeout(&dws->dma_completion,
192 					 msecs_to_jiffies(ms));
193 
194 	if (ms == 0) {
195 		dev_err(&dws->master->cur_msg->spi->dev,
196 			"DMA transaction timed out\n");
197 		return -ETIMEDOUT;
198 	}
199 
200 	return 0;
201 }
202 
203 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
204 {
205 	return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
206 }
207 
208 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
209 				   struct spi_transfer *xfer)
210 {
211 	int retry = WAIT_RETRIES;
212 	struct spi_delay delay;
213 	u32 nents;
214 
215 	nents = dw_readl(dws, DW_SPI_TXFLR);
216 	delay.unit = SPI_DELAY_UNIT_SCK;
217 	delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
218 
219 	while (dw_spi_dma_tx_busy(dws) && retry--)
220 		spi_delay_exec(&delay, xfer);
221 
222 	if (retry < 0) {
223 		dev_err(&dws->master->dev, "Tx hanged up\n");
224 		return -EIO;
225 	}
226 
227 	return 0;
228 }
229 
230 /*
231  * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
232  * channel will clear a corresponding bit.
233  */
234 static void dw_spi_dma_tx_done(void *arg)
235 {
236 	struct dw_spi *dws = arg;
237 
238 	clear_bit(TX_BUSY, &dws->dma_chan_busy);
239 	if (test_bit(RX_BUSY, &dws->dma_chan_busy))
240 		return;
241 
242 	dw_writel(dws, DW_SPI_DMACR, 0);
243 	complete(&dws->dma_completion);
244 }
245 
246 static struct dma_async_tx_descriptor *
247 dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
248 {
249 	struct dma_slave_config txconf;
250 	struct dma_async_tx_descriptor *txdesc;
251 
252 	if (!xfer->tx_buf)
253 		return NULL;
254 
255 	memset(&txconf, 0, sizeof(txconf));
256 	txconf.direction = DMA_MEM_TO_DEV;
257 	txconf.dst_addr = dws->dma_addr;
258 	txconf.dst_maxburst = dws->txburst;
259 	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
260 	txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
261 	txconf.device_fc = false;
262 
263 	dmaengine_slave_config(dws->txchan, &txconf);
264 
265 	txdesc = dmaengine_prep_slave_sg(dws->txchan,
266 				xfer->tx_sg.sgl,
267 				xfer->tx_sg.nents,
268 				DMA_MEM_TO_DEV,
269 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
270 	if (!txdesc)
271 		return NULL;
272 
273 	txdesc->callback = dw_spi_dma_tx_done;
274 	txdesc->callback_param = dws;
275 
276 	return txdesc;
277 }
278 
279 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
280 {
281 	return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
282 }
283 
284 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
285 {
286 	int retry = WAIT_RETRIES;
287 	struct spi_delay delay;
288 	unsigned long ns, us;
289 	u32 nents;
290 
291 	/*
292 	 * It's unlikely that DMA engine is still doing the data fetching, but
293 	 * if it's let's give it some reasonable time. The timeout calculation
294 	 * is based on the synchronous APB/SSI reference clock rate, on a
295 	 * number of data entries left in the Rx FIFO, times a number of clock
296 	 * periods normally needed for a single APB read/write transaction
297 	 * without PREADY signal utilized (which is true for the DW APB SSI
298 	 * controller).
299 	 */
300 	nents = dw_readl(dws, DW_SPI_RXFLR);
301 	ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
302 	if (ns <= NSEC_PER_USEC) {
303 		delay.unit = SPI_DELAY_UNIT_NSECS;
304 		delay.value = ns;
305 	} else {
306 		us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
307 		delay.unit = SPI_DELAY_UNIT_USECS;
308 		delay.value = clamp_val(us, 0, USHRT_MAX);
309 	}
310 
311 	while (dw_spi_dma_rx_busy(dws) && retry--)
312 		spi_delay_exec(&delay, NULL);
313 
314 	if (retry < 0) {
315 		dev_err(&dws->master->dev, "Rx hanged up\n");
316 		return -EIO;
317 	}
318 
319 	return 0;
320 }
321 
322 /*
323  * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
324  * channel will clear a corresponding bit.
325  */
326 static void dw_spi_dma_rx_done(void *arg)
327 {
328 	struct dw_spi *dws = arg;
329 
330 	clear_bit(RX_BUSY, &dws->dma_chan_busy);
331 	if (test_bit(TX_BUSY, &dws->dma_chan_busy))
332 		return;
333 
334 	dw_writel(dws, DW_SPI_DMACR, 0);
335 	complete(&dws->dma_completion);
336 }
337 
338 static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
339 		struct spi_transfer *xfer)
340 {
341 	struct dma_slave_config rxconf;
342 	struct dma_async_tx_descriptor *rxdesc;
343 
344 	if (!xfer->rx_buf)
345 		return NULL;
346 
347 	memset(&rxconf, 0, sizeof(rxconf));
348 	rxconf.direction = DMA_DEV_TO_MEM;
349 	rxconf.src_addr = dws->dma_addr;
350 	rxconf.src_maxburst = dws->rxburst;
351 	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
352 	rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
353 	rxconf.device_fc = false;
354 
355 	dmaengine_slave_config(dws->rxchan, &rxconf);
356 
357 	rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
358 				xfer->rx_sg.sgl,
359 				xfer->rx_sg.nents,
360 				DMA_DEV_TO_MEM,
361 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
362 	if (!rxdesc)
363 		return NULL;
364 
365 	rxdesc->callback = dw_spi_dma_rx_done;
366 	rxdesc->callback_param = dws;
367 
368 	return rxdesc;
369 }
370 
371 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
372 {
373 	u16 imr = 0, dma_ctrl = 0;
374 
375 	/*
376 	 * Having a Rx DMA channel serviced with higher priority than a Tx DMA
377 	 * channel might not be enough to provide a well balanced DMA-based
378 	 * SPI transfer interface. There might still be moments when the Tx DMA
379 	 * channel is occasionally handled faster than the Rx DMA channel.
380 	 * That in its turn will eventually cause the SPI Rx FIFO overflow if
381 	 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
382 	 * cleared by the Rx DMA channel. In order to fix the problem the Tx
383 	 * DMA activity is intentionally slowed down by limiting the SPI Tx
384 	 * FIFO depth with a value twice bigger than the Tx burst length
385 	 * calculated earlier by the dw_spi_dma_maxburst_init() method.
386 	 */
387 	dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
388 	dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
389 
390 	if (xfer->tx_buf)
391 		dma_ctrl |= SPI_DMA_TDMAE;
392 	if (xfer->rx_buf)
393 		dma_ctrl |= SPI_DMA_RDMAE;
394 	dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
395 
396 	/* Set the interrupt mask */
397 	if (xfer->tx_buf)
398 		imr |= SPI_INT_TXOI;
399 	if (xfer->rx_buf)
400 		imr |= SPI_INT_RXUI | SPI_INT_RXOI;
401 	spi_umask_intr(dws, imr);
402 
403 	reinit_completion(&dws->dma_completion);
404 
405 	dws->transfer_handler = dw_spi_dma_transfer_handler;
406 
407 	return 0;
408 }
409 
410 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
411 {
412 	struct dma_async_tx_descriptor *txdesc, *rxdesc;
413 	int ret;
414 
415 	/* Prepare the TX dma transfer */
416 	txdesc = dw_spi_dma_prepare_tx(dws, xfer);
417 
418 	/* Prepare the RX dma transfer */
419 	rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
420 
421 	/* rx must be started before tx due to spi instinct */
422 	if (rxdesc) {
423 		set_bit(RX_BUSY, &dws->dma_chan_busy);
424 		dmaengine_submit(rxdesc);
425 		dma_async_issue_pending(dws->rxchan);
426 	}
427 
428 	if (txdesc) {
429 		set_bit(TX_BUSY, &dws->dma_chan_busy);
430 		dmaengine_submit(txdesc);
431 		dma_async_issue_pending(dws->txchan);
432 	}
433 
434 	ret = dw_spi_dma_wait(dws, xfer);
435 	if (ret)
436 		return ret;
437 
438 	if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) {
439 		ret = dw_spi_dma_wait_tx_done(dws, xfer);
440 		if (ret)
441 			return ret;
442 	}
443 
444 	if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS)
445 		ret = dw_spi_dma_wait_rx_done(dws);
446 
447 	return ret;
448 }
449 
450 static void dw_spi_dma_stop(struct dw_spi *dws)
451 {
452 	if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
453 		dmaengine_terminate_sync(dws->txchan);
454 		clear_bit(TX_BUSY, &dws->dma_chan_busy);
455 	}
456 	if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
457 		dmaengine_terminate_sync(dws->rxchan);
458 		clear_bit(RX_BUSY, &dws->dma_chan_busy);
459 	}
460 
461 	dw_writel(dws, DW_SPI_DMACR, 0);
462 }
463 
464 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
465 	.dma_init	= dw_spi_dma_init_mfld,
466 	.dma_exit	= dw_spi_dma_exit,
467 	.dma_setup	= dw_spi_dma_setup,
468 	.can_dma	= dw_spi_can_dma,
469 	.dma_transfer	= dw_spi_dma_transfer,
470 	.dma_stop	= dw_spi_dma_stop,
471 };
472 
473 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
474 {
475 	dws->dma_ops = &dw_spi_dma_mfld_ops;
476 }
477 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
478 
479 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
480 	.dma_init	= dw_spi_dma_init_generic,
481 	.dma_exit	= dw_spi_dma_exit,
482 	.dma_setup	= dw_spi_dma_setup,
483 	.can_dma	= dw_spi_can_dma,
484 	.dma_transfer	= dw_spi_dma_transfer,
485 	.dma_stop	= dw_spi_dma_stop,
486 };
487 
488 void dw_spi_dma_setup_generic(struct dw_spi *dws)
489 {
490 	dws->dma_ops = &dw_spi_dma_generic_ops;
491 }
492 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);
493