1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * 8250_dma.c - DMA Engine API support for 8250.c
4 *
5 * Copyright (C) 2013 Intel Corporation
6 */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11
12 #include "8250.h"
13
__dma_tx_complete(void * param)14 static void __dma_tx_complete(void *param)
15 {
16 struct uart_8250_port *p = param;
17 struct uart_8250_dma *dma = p->dma;
18 struct circ_buf *xmit = &p->port.state->xmit;
19 unsigned long flags;
20 int ret;
21
22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 UART_XMIT_SIZE, DMA_TO_DEVICE);
24
25 spin_lock_irqsave(&p->port.lock, flags);
26
27 dma->tx_running = 0;
28
29 uart_xmit_advance(&p->port, dma->tx_size);
30
31 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
32 uart_write_wakeup(&p->port);
33
34 ret = serial8250_tx_dma(p);
35 if (ret || !dma->tx_running)
36 serial8250_set_THRI(p);
37
38 spin_unlock_irqrestore(&p->port.lock, flags);
39 }
40
__dma_rx_complete(struct uart_8250_port * p)41 static void __dma_rx_complete(struct uart_8250_port *p)
42 {
43 struct uart_8250_dma *dma = p->dma;
44 struct tty_port *tty_port = &p->port.state->port;
45 struct dma_tx_state state;
46 enum dma_status dma_status;
47 int count;
48
49 /*
50 * New DMA Rx can be started during the completion handler before it
51 * could acquire port's lock and it might still be ongoing. Don't to
52 * anything in such case.
53 */
54 dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
55 if (dma_status == DMA_IN_PROGRESS)
56 return;
57
58 count = dma->rx_size - state.residue;
59
60 tty_insert_flip_string(tty_port, dma->rx_buf, count);
61 p->port.icount.rx += count;
62 dma->rx_running = 0;
63
64 tty_flip_buffer_push(tty_port);
65 }
66
dma_rx_complete(void * param)67 static void dma_rx_complete(void *param)
68 {
69 struct uart_8250_port *p = param;
70 struct uart_8250_dma *dma = p->dma;
71 unsigned long flags;
72
73 spin_lock_irqsave(&p->port.lock, flags);
74 if (dma->rx_running)
75 __dma_rx_complete(p);
76
77 /*
78 * Cannot be combined with the previous check because __dma_rx_complete()
79 * changes dma->rx_running.
80 */
81 if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
82 p->dma->rx_dma(p);
83 spin_unlock_irqrestore(&p->port.lock, flags);
84 }
85
serial8250_tx_dma(struct uart_8250_port * p)86 int serial8250_tx_dma(struct uart_8250_port *p)
87 {
88 struct uart_8250_dma *dma = p->dma;
89 struct circ_buf *xmit = &p->port.state->xmit;
90 struct dma_async_tx_descriptor *desc;
91 struct uart_port *up = &p->port;
92 int ret;
93
94 if (dma->tx_running) {
95 if (up->x_char) {
96 dmaengine_pause(dma->txchan);
97 uart_xchar_out(up, UART_TX);
98 dmaengine_resume(dma->txchan);
99 }
100 return 0;
101 } else if (up->x_char) {
102 uart_xchar_out(up, UART_TX);
103 }
104
105 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
106 /* We have been called from __dma_tx_complete() */
107 return 0;
108 }
109
110 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
111
112 serial8250_do_prepare_tx_dma(p);
113
114 desc = dmaengine_prep_slave_single(dma->txchan,
115 dma->tx_addr + xmit->tail,
116 dma->tx_size, DMA_MEM_TO_DEV,
117 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
118 if (!desc) {
119 ret = -EBUSY;
120 goto err;
121 }
122
123 dma->tx_running = 1;
124 desc->callback = __dma_tx_complete;
125 desc->callback_param = p;
126
127 dma->tx_cookie = dmaengine_submit(desc);
128
129 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
130 UART_XMIT_SIZE, DMA_TO_DEVICE);
131
132 dma_async_issue_pending(dma->txchan);
133 serial8250_clear_THRI(p);
134 dma->tx_err = 0;
135
136 return 0;
137 err:
138 dma->tx_err = 1;
139 return ret;
140 }
141
serial8250_tx_dma_flush(struct uart_8250_port * p)142 void serial8250_tx_dma_flush(struct uart_8250_port *p)
143 {
144 struct uart_8250_dma *dma = p->dma;
145
146 if (!dma->tx_running)
147 return;
148
149 /*
150 * kfifo_reset() has been called by the serial core, avoid
151 * advancing and underflowing in __dma_tx_complete().
152 */
153 dma->tx_size = 0;
154
155 dmaengine_terminate_async(dma->rxchan);
156 }
157
serial8250_rx_dma(struct uart_8250_port * p)158 int serial8250_rx_dma(struct uart_8250_port *p)
159 {
160 struct uart_8250_dma *dma = p->dma;
161 struct dma_async_tx_descriptor *desc;
162
163 if (dma->rx_running)
164 return 0;
165
166 serial8250_do_prepare_rx_dma(p);
167
168 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
169 dma->rx_size, DMA_DEV_TO_MEM,
170 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
171 if (!desc)
172 return -EBUSY;
173
174 dma->rx_running = 1;
175 desc->callback = dma_rx_complete;
176 desc->callback_param = p;
177
178 dma->rx_cookie = dmaengine_submit(desc);
179
180 dma_async_issue_pending(dma->rxchan);
181
182 return 0;
183 }
184
serial8250_rx_dma_flush(struct uart_8250_port * p)185 void serial8250_rx_dma_flush(struct uart_8250_port *p)
186 {
187 struct uart_8250_dma *dma = p->dma;
188
189 if (dma->rx_running) {
190 dmaengine_pause(dma->rxchan);
191 __dma_rx_complete(p);
192 dmaengine_terminate_async(dma->rxchan);
193 }
194 }
195 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
196
serial8250_request_dma(struct uart_8250_port * p)197 int serial8250_request_dma(struct uart_8250_port *p)
198 {
199 struct uart_8250_dma *dma = p->dma;
200 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
201 dma->rx_dma_addr : p->port.mapbase;
202 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
203 dma->tx_dma_addr : p->port.mapbase;
204 dma_cap_mask_t mask;
205 struct dma_slave_caps caps;
206 int ret;
207
208 /* Default slave configuration parameters */
209 dma->rxconf.direction = DMA_DEV_TO_MEM;
210 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
211 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
212
213 dma->txconf.direction = DMA_MEM_TO_DEV;
214 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
215 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
216
217 dma_cap_zero(mask);
218 dma_cap_set(DMA_SLAVE, mask);
219
220 /* Get a channel for RX */
221 dma->rxchan = dma_request_slave_channel_compat(mask,
222 dma->fn, dma->rx_param,
223 p->port.dev, "rx");
224 if (!dma->rxchan)
225 return -ENODEV;
226
227 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
228 ret = dma_get_slave_caps(dma->rxchan, &caps);
229 if (ret)
230 goto release_rx;
231 if (!caps.cmd_pause || !caps.cmd_terminate ||
232 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
233 ret = -EINVAL;
234 goto release_rx;
235 }
236
237 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
238
239 /* Get a channel for TX */
240 dma->txchan = dma_request_slave_channel_compat(mask,
241 dma->fn, dma->tx_param,
242 p->port.dev, "tx");
243 if (!dma->txchan) {
244 ret = -ENODEV;
245 goto release_rx;
246 }
247
248 /* 8250 tx dma requires dmaengine driver to support terminate */
249 ret = dma_get_slave_caps(dma->txchan, &caps);
250 if (ret)
251 goto err;
252 if (!caps.cmd_terminate) {
253 ret = -EINVAL;
254 goto err;
255 }
256
257 dmaengine_slave_config(dma->txchan, &dma->txconf);
258
259 /* RX buffer */
260 if (!dma->rx_size)
261 dma->rx_size = PAGE_SIZE;
262
263 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
264 &dma->rx_addr, GFP_KERNEL);
265 if (!dma->rx_buf) {
266 ret = -ENOMEM;
267 goto err;
268 }
269
270 /* TX buffer */
271 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
272 p->port.state->xmit.buf,
273 UART_XMIT_SIZE,
274 DMA_TO_DEVICE);
275 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
276 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
277 dma->rx_buf, dma->rx_addr);
278 ret = -ENOMEM;
279 goto err;
280 }
281
282 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
283
284 return 0;
285 err:
286 dma_release_channel(dma->txchan);
287 release_rx:
288 dma_release_channel(dma->rxchan);
289 return ret;
290 }
291 EXPORT_SYMBOL_GPL(serial8250_request_dma);
292
serial8250_release_dma(struct uart_8250_port * p)293 void serial8250_release_dma(struct uart_8250_port *p)
294 {
295 struct uart_8250_dma *dma = p->dma;
296
297 if (!dma)
298 return;
299
300 /* Release RX resources */
301 dmaengine_terminate_sync(dma->rxchan);
302 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
303 dma->rx_addr);
304 dma_release_channel(dma->rxchan);
305 dma->rxchan = NULL;
306
307 /* Release TX resources */
308 dmaengine_terminate_sync(dma->txchan);
309 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
310 UART_XMIT_SIZE, DMA_TO_DEVICE);
311 dma_release_channel(dma->txchan);
312 dma->txchan = NULL;
313 dma->tx_running = 0;
314
315 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
316 }
317 EXPORT_SYMBOL_GPL(serial8250_release_dma);
318