1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * 8250_dma.c - DMA Engine API support for 8250.c 4 * 5 * Copyright (C) 2013 Intel Corporation 6 */ 7 #include <linux/tty.h> 8 #include <linux/tty_flip.h> 9 #include <linux/serial_reg.h> 10 #include <linux/dma-mapping.h> 11 12 #include "8250.h" 13 14 static void __dma_tx_complete(void *param) 15 { 16 struct uart_8250_port *p = param; 17 struct uart_8250_dma *dma = p->dma; 18 struct circ_buf *xmit = &p->port.state->xmit; 19 unsigned long flags; 20 int ret; 21 22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, 23 UART_XMIT_SIZE, DMA_TO_DEVICE); 24 25 spin_lock_irqsave(&p->port.lock, flags); 26 27 dma->tx_running = 0; 28 29 uart_xmit_advance(&p->port, dma->tx_size); 30 31 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 32 uart_write_wakeup(&p->port); 33 34 ret = serial8250_tx_dma(p); 35 if (ret || !dma->tx_running) 36 serial8250_set_THRI(p); 37 38 spin_unlock_irqrestore(&p->port.lock, flags); 39 } 40 41 static void __dma_rx_complete(struct uart_8250_port *p) 42 { 43 struct uart_8250_dma *dma = p->dma; 44 struct tty_port *tty_port = &p->port.state->port; 45 struct dma_tx_state state; 46 int count; 47 48 dma->rx_running = 0; 49 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); 50 51 count = dma->rx_size - state.residue; 52 53 tty_insert_flip_string(tty_port, dma->rx_buf, count); 54 p->port.icount.rx += count; 55 56 tty_flip_buffer_push(tty_port); 57 } 58 59 static void dma_rx_complete(void *param) 60 { 61 struct uart_8250_port *p = param; 62 struct uart_8250_dma *dma = p->dma; 63 unsigned long flags; 64 65 spin_lock_irqsave(&p->port.lock, flags); 66 if (dma->rx_running) 67 __dma_rx_complete(p); 68 69 /* 70 * Cannot be combined with the previous check because __dma_rx_complete() 71 * changes dma->rx_running. 72 */ 73 if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR)) 74 p->dma->rx_dma(p); 75 spin_unlock_irqrestore(&p->port.lock, flags); 76 } 77 78 int serial8250_tx_dma(struct uart_8250_port *p) 79 { 80 struct uart_8250_dma *dma = p->dma; 81 struct circ_buf *xmit = &p->port.state->xmit; 82 struct dma_async_tx_descriptor *desc; 83 struct uart_port *up = &p->port; 84 int ret; 85 86 if (dma->tx_running) { 87 if (up->x_char) { 88 dmaengine_pause(dma->txchan); 89 uart_xchar_out(up, UART_TX); 90 dmaengine_resume(dma->txchan); 91 } 92 return 0; 93 } else if (up->x_char) { 94 uart_xchar_out(up, UART_TX); 95 } 96 97 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { 98 /* We have been called from __dma_tx_complete() */ 99 return 0; 100 } 101 102 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 103 104 serial8250_do_prepare_tx_dma(p); 105 106 desc = dmaengine_prep_slave_single(dma->txchan, 107 dma->tx_addr + xmit->tail, 108 dma->tx_size, DMA_MEM_TO_DEV, 109 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 110 if (!desc) { 111 ret = -EBUSY; 112 goto err; 113 } 114 115 dma->tx_running = 1; 116 desc->callback = __dma_tx_complete; 117 desc->callback_param = p; 118 119 dma->tx_cookie = dmaengine_submit(desc); 120 121 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr, 122 UART_XMIT_SIZE, DMA_TO_DEVICE); 123 124 dma_async_issue_pending(dma->txchan); 125 serial8250_clear_THRI(p); 126 dma->tx_err = 0; 127 128 return 0; 129 err: 130 dma->tx_err = 1; 131 return ret; 132 } 133 134 int serial8250_rx_dma(struct uart_8250_port *p) 135 { 136 struct uart_8250_dma *dma = p->dma; 137 struct dma_async_tx_descriptor *desc; 138 139 if (dma->rx_running) 140 return 0; 141 142 serial8250_do_prepare_rx_dma(p); 143 144 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, 145 dma->rx_size, DMA_DEV_TO_MEM, 146 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 147 if (!desc) 148 return -EBUSY; 149 150 dma->rx_running = 1; 151 desc->callback = dma_rx_complete; 152 desc->callback_param = p; 153 154 dma->rx_cookie = dmaengine_submit(desc); 155 156 dma_async_issue_pending(dma->rxchan); 157 158 return 0; 159 } 160 161 void serial8250_rx_dma_flush(struct uart_8250_port *p) 162 { 163 struct uart_8250_dma *dma = p->dma; 164 165 if (dma->rx_running) { 166 dmaengine_pause(dma->rxchan); 167 __dma_rx_complete(p); 168 dmaengine_terminate_async(dma->rxchan); 169 } 170 } 171 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush); 172 173 int serial8250_request_dma(struct uart_8250_port *p) 174 { 175 struct uart_8250_dma *dma = p->dma; 176 phys_addr_t rx_dma_addr = dma->rx_dma_addr ? 177 dma->rx_dma_addr : p->port.mapbase; 178 phys_addr_t tx_dma_addr = dma->tx_dma_addr ? 179 dma->tx_dma_addr : p->port.mapbase; 180 dma_cap_mask_t mask; 181 struct dma_slave_caps caps; 182 int ret; 183 184 /* Default slave configuration parameters */ 185 dma->rxconf.direction = DMA_DEV_TO_MEM; 186 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 187 dma->rxconf.src_addr = rx_dma_addr + UART_RX; 188 189 dma->txconf.direction = DMA_MEM_TO_DEV; 190 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 191 dma->txconf.dst_addr = tx_dma_addr + UART_TX; 192 193 dma_cap_zero(mask); 194 dma_cap_set(DMA_SLAVE, mask); 195 196 /* Get a channel for RX */ 197 dma->rxchan = dma_request_slave_channel_compat(mask, 198 dma->fn, dma->rx_param, 199 p->port.dev, "rx"); 200 if (!dma->rxchan) 201 return -ENODEV; 202 203 /* 8250 rx dma requires dmaengine driver to support pause/terminate */ 204 ret = dma_get_slave_caps(dma->rxchan, &caps); 205 if (ret) 206 goto release_rx; 207 if (!caps.cmd_pause || !caps.cmd_terminate || 208 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 209 ret = -EINVAL; 210 goto release_rx; 211 } 212 213 dmaengine_slave_config(dma->rxchan, &dma->rxconf); 214 215 /* Get a channel for TX */ 216 dma->txchan = dma_request_slave_channel_compat(mask, 217 dma->fn, dma->tx_param, 218 p->port.dev, "tx"); 219 if (!dma->txchan) { 220 ret = -ENODEV; 221 goto release_rx; 222 } 223 224 /* 8250 tx dma requires dmaengine driver to support terminate */ 225 ret = dma_get_slave_caps(dma->txchan, &caps); 226 if (ret) 227 goto err; 228 if (!caps.cmd_terminate) { 229 ret = -EINVAL; 230 goto err; 231 } 232 233 dmaengine_slave_config(dma->txchan, &dma->txconf); 234 235 /* RX buffer */ 236 if (!dma->rx_size) 237 dma->rx_size = PAGE_SIZE; 238 239 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size, 240 &dma->rx_addr, GFP_KERNEL); 241 if (!dma->rx_buf) { 242 ret = -ENOMEM; 243 goto err; 244 } 245 246 /* TX buffer */ 247 dma->tx_addr = dma_map_single(dma->txchan->device->dev, 248 p->port.state->xmit.buf, 249 UART_XMIT_SIZE, 250 DMA_TO_DEVICE); 251 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) { 252 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, 253 dma->rx_buf, dma->rx_addr); 254 ret = -ENOMEM; 255 goto err; 256 } 257 258 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n"); 259 260 return 0; 261 err: 262 dma_release_channel(dma->txchan); 263 release_rx: 264 dma_release_channel(dma->rxchan); 265 return ret; 266 } 267 EXPORT_SYMBOL_GPL(serial8250_request_dma); 268 269 void serial8250_release_dma(struct uart_8250_port *p) 270 { 271 struct uart_8250_dma *dma = p->dma; 272 273 if (!dma) 274 return; 275 276 /* Release RX resources */ 277 dmaengine_terminate_sync(dma->rxchan); 278 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf, 279 dma->rx_addr); 280 dma_release_channel(dma->rxchan); 281 dma->rxchan = NULL; 282 283 /* Release TX resources */ 284 dmaengine_terminate_sync(dma->txchan); 285 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr, 286 UART_XMIT_SIZE, DMA_TO_DEVICE); 287 dma_release_channel(dma->txchan); 288 dma->txchan = NULL; 289 dma->tx_running = 0; 290 291 dev_dbg_ratelimited(p->port.dev, "dma channels released\n"); 292 } 293 EXPORT_SYMBOL_GPL(serial8250_release_dma); 294