xref: /openbmc/linux/drivers/tty/serial/8250/8250_dma.c (revision ecfb9f40)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * 8250_dma.c - DMA Engine API support for 8250.c
4  *
5  * Copyright (C) 2013 Intel Corporation
6  */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11 
12 #include "8250.h"
13 
14 static void __dma_tx_complete(void *param)
15 {
16 	struct uart_8250_port	*p = param;
17 	struct uart_8250_dma	*dma = p->dma;
18 	struct circ_buf		*xmit = &p->port.state->xmit;
19 	unsigned long	flags;
20 	int		ret;
21 
22 	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 				UART_XMIT_SIZE, DMA_TO_DEVICE);
24 
25 	spin_lock_irqsave(&p->port.lock, flags);
26 
27 	dma->tx_running = 0;
28 
29 	uart_xmit_advance(&p->port, dma->tx_size);
30 
31 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
32 		uart_write_wakeup(&p->port);
33 
34 	ret = serial8250_tx_dma(p);
35 	if (ret || !dma->tx_running)
36 		serial8250_set_THRI(p);
37 
38 	spin_unlock_irqrestore(&p->port.lock, flags);
39 }
40 
41 static void __dma_rx_complete(struct uart_8250_port *p)
42 {
43 	struct uart_8250_dma	*dma = p->dma;
44 	struct tty_port		*tty_port = &p->port.state->port;
45 	struct dma_tx_state	state;
46 	int			count;
47 
48 	dma->rx_running = 0;
49 	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
50 
51 	count = dma->rx_size - state.residue;
52 
53 	tty_insert_flip_string(tty_port, dma->rx_buf, count);
54 	p->port.icount.rx += count;
55 
56 	tty_flip_buffer_push(tty_port);
57 }
58 
59 static void dma_rx_complete(void *param)
60 {
61 	struct uart_8250_port *p = param;
62 	struct uart_8250_dma *dma = p->dma;
63 	unsigned long flags;
64 
65 	__dma_rx_complete(p);
66 
67 	spin_lock_irqsave(&p->port.lock, flags);
68 	if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
69 		p->dma->rx_dma(p);
70 	spin_unlock_irqrestore(&p->port.lock, flags);
71 }
72 
73 int serial8250_tx_dma(struct uart_8250_port *p)
74 {
75 	struct uart_8250_dma		*dma = p->dma;
76 	struct circ_buf			*xmit = &p->port.state->xmit;
77 	struct dma_async_tx_descriptor	*desc;
78 	struct uart_port		*up = &p->port;
79 	int ret;
80 
81 	if (dma->tx_running) {
82 		if (up->x_char) {
83 			dmaengine_pause(dma->txchan);
84 			uart_xchar_out(up, UART_TX);
85 			dmaengine_resume(dma->txchan);
86 		}
87 		return 0;
88 	} else if (up->x_char) {
89 		uart_xchar_out(up, UART_TX);
90 	}
91 
92 	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
93 		/* We have been called from __dma_tx_complete() */
94 		return 0;
95 	}
96 
97 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
98 
99 	serial8250_do_prepare_tx_dma(p);
100 
101 	desc = dmaengine_prep_slave_single(dma->txchan,
102 					   dma->tx_addr + xmit->tail,
103 					   dma->tx_size, DMA_MEM_TO_DEV,
104 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
105 	if (!desc) {
106 		ret = -EBUSY;
107 		goto err;
108 	}
109 
110 	dma->tx_running = 1;
111 	desc->callback = __dma_tx_complete;
112 	desc->callback_param = p;
113 
114 	dma->tx_cookie = dmaengine_submit(desc);
115 
116 	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
117 				   UART_XMIT_SIZE, DMA_TO_DEVICE);
118 
119 	dma_async_issue_pending(dma->txchan);
120 	serial8250_clear_THRI(p);
121 	dma->tx_err = 0;
122 
123 	return 0;
124 err:
125 	dma->tx_err = 1;
126 	return ret;
127 }
128 
129 int serial8250_rx_dma(struct uart_8250_port *p)
130 {
131 	struct uart_8250_dma		*dma = p->dma;
132 	struct dma_async_tx_descriptor	*desc;
133 
134 	if (dma->rx_running)
135 		return 0;
136 
137 	serial8250_do_prepare_rx_dma(p);
138 
139 	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
140 					   dma->rx_size, DMA_DEV_TO_MEM,
141 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
142 	if (!desc)
143 		return -EBUSY;
144 
145 	dma->rx_running = 1;
146 	desc->callback = dma_rx_complete;
147 	desc->callback_param = p;
148 
149 	dma->rx_cookie = dmaengine_submit(desc);
150 
151 	dma_async_issue_pending(dma->rxchan);
152 
153 	return 0;
154 }
155 
156 void serial8250_rx_dma_flush(struct uart_8250_port *p)
157 {
158 	struct uart_8250_dma *dma = p->dma;
159 
160 	if (dma->rx_running) {
161 		dmaengine_pause(dma->rxchan);
162 		__dma_rx_complete(p);
163 		dmaengine_terminate_async(dma->rxchan);
164 	}
165 }
166 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
167 
168 int serial8250_request_dma(struct uart_8250_port *p)
169 {
170 	struct uart_8250_dma	*dma = p->dma;
171 	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
172 				  dma->rx_dma_addr : p->port.mapbase;
173 	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
174 				  dma->tx_dma_addr : p->port.mapbase;
175 	dma_cap_mask_t		mask;
176 	struct dma_slave_caps	caps;
177 	int			ret;
178 
179 	/* Default slave configuration parameters */
180 	dma->rxconf.direction		= DMA_DEV_TO_MEM;
181 	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
182 	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;
183 
184 	dma->txconf.direction		= DMA_MEM_TO_DEV;
185 	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
186 	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;
187 
188 	dma_cap_zero(mask);
189 	dma_cap_set(DMA_SLAVE, mask);
190 
191 	/* Get a channel for RX */
192 	dma->rxchan = dma_request_slave_channel_compat(mask,
193 						       dma->fn, dma->rx_param,
194 						       p->port.dev, "rx");
195 	if (!dma->rxchan)
196 		return -ENODEV;
197 
198 	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
199 	ret = dma_get_slave_caps(dma->rxchan, &caps);
200 	if (ret)
201 		goto release_rx;
202 	if (!caps.cmd_pause || !caps.cmd_terminate ||
203 	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
204 		ret = -EINVAL;
205 		goto release_rx;
206 	}
207 
208 	dmaengine_slave_config(dma->rxchan, &dma->rxconf);
209 
210 	/* Get a channel for TX */
211 	dma->txchan = dma_request_slave_channel_compat(mask,
212 						       dma->fn, dma->tx_param,
213 						       p->port.dev, "tx");
214 	if (!dma->txchan) {
215 		ret = -ENODEV;
216 		goto release_rx;
217 	}
218 
219 	/* 8250 tx dma requires dmaengine driver to support terminate */
220 	ret = dma_get_slave_caps(dma->txchan, &caps);
221 	if (ret)
222 		goto err;
223 	if (!caps.cmd_terminate) {
224 		ret = -EINVAL;
225 		goto err;
226 	}
227 
228 	dmaengine_slave_config(dma->txchan, &dma->txconf);
229 
230 	/* RX buffer */
231 	if (!dma->rx_size)
232 		dma->rx_size = PAGE_SIZE;
233 
234 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
235 					&dma->rx_addr, GFP_KERNEL);
236 	if (!dma->rx_buf) {
237 		ret = -ENOMEM;
238 		goto err;
239 	}
240 
241 	/* TX buffer */
242 	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
243 					p->port.state->xmit.buf,
244 					UART_XMIT_SIZE,
245 					DMA_TO_DEVICE);
246 	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
247 		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
248 				  dma->rx_buf, dma->rx_addr);
249 		ret = -ENOMEM;
250 		goto err;
251 	}
252 
253 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
254 
255 	return 0;
256 err:
257 	dma_release_channel(dma->txchan);
258 release_rx:
259 	dma_release_channel(dma->rxchan);
260 	return ret;
261 }
262 EXPORT_SYMBOL_GPL(serial8250_request_dma);
263 
264 void serial8250_release_dma(struct uart_8250_port *p)
265 {
266 	struct uart_8250_dma *dma = p->dma;
267 
268 	if (!dma)
269 		return;
270 
271 	/* Release RX resources */
272 	dmaengine_terminate_sync(dma->rxchan);
273 	dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
274 			  dma->rx_addr);
275 	dma_release_channel(dma->rxchan);
276 	dma->rxchan = NULL;
277 
278 	/* Release TX resources */
279 	dmaengine_terminate_sync(dma->txchan);
280 	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
281 			 UART_XMIT_SIZE, DMA_TO_DEVICE);
282 	dma_release_channel(dma->txchan);
283 	dma->txchan = NULL;
284 	dma->tx_running = 0;
285 
286 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
287 }
288 EXPORT_SYMBOL_GPL(serial8250_release_dma);
289