xref: /openbmc/linux/drivers/tty/serial/stm32-usart.c (revision 7b0364ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  *	     Gerald Baeza <gerald.baeza@foss.st.com>
7  *	     Erwan Le Ray <erwan.leray@foss.st.com>
8  *
9  * Inspired by st-asc.c from STMicroelectronics (c)
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34 
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37 
38 static void stm32_usart_stop_tx(struct uart_port *port);
39 static void stm32_usart_transmit_chars(struct uart_port *port);
40 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
41 
42 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
43 {
44 	return container_of(port, struct stm32_port, port);
45 }
46 
47 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
48 {
49 	u32 val;
50 
51 	val = readl_relaxed(port->membase + reg);
52 	val |= bits;
53 	writel_relaxed(val, port->membase + reg);
54 }
55 
56 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
57 {
58 	u32 val;
59 
60 	val = readl_relaxed(port->membase + reg);
61 	val &= ~bits;
62 	writel_relaxed(val, port->membase + reg);
63 }
64 
65 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
66 					 u32 delay_DDE, u32 baud)
67 {
68 	u32 rs485_deat_dedt;
69 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
70 	bool over8;
71 
72 	*cr3 |= USART_CR3_DEM;
73 	over8 = *cr1 & USART_CR1_OVER8;
74 
75 	if (over8)
76 		rs485_deat_dedt = delay_ADE * baud * 8;
77 	else
78 		rs485_deat_dedt = delay_ADE * baud * 16;
79 
80 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
81 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
82 			  rs485_deat_dedt_max : rs485_deat_dedt;
83 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
84 			   USART_CR1_DEAT_MASK;
85 	*cr1 |= rs485_deat_dedt;
86 
87 	if (over8)
88 		rs485_deat_dedt = delay_DDE * baud * 8;
89 	else
90 		rs485_deat_dedt = delay_DDE * baud * 16;
91 
92 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
93 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
94 			  rs485_deat_dedt_max : rs485_deat_dedt;
95 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
96 			   USART_CR1_DEDT_MASK;
97 	*cr1 |= rs485_deat_dedt;
98 }
99 
100 static int stm32_usart_config_rs485(struct uart_port *port,
101 				    struct serial_rs485 *rs485conf)
102 {
103 	struct stm32_port *stm32_port = to_stm32_port(port);
104 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
105 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
106 	u32 usartdiv, baud, cr1, cr3;
107 	bool over8;
108 
109 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
110 
111 	rs485conf->flags |= SER_RS485_RX_DURING_TX;
112 
113 	if (rs485conf->flags & SER_RS485_ENABLED) {
114 		cr1 = readl_relaxed(port->membase + ofs->cr1);
115 		cr3 = readl_relaxed(port->membase + ofs->cr3);
116 		usartdiv = readl_relaxed(port->membase + ofs->brr);
117 		usartdiv = usartdiv & GENMASK(15, 0);
118 		over8 = cr1 & USART_CR1_OVER8;
119 
120 		if (over8)
121 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
122 				   << USART_BRR_04_R_SHIFT;
123 
124 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
125 		stm32_usart_config_reg_rs485(&cr1, &cr3,
126 					     rs485conf->delay_rts_before_send,
127 					     rs485conf->delay_rts_after_send,
128 					     baud);
129 
130 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
131 			cr3 &= ~USART_CR3_DEP;
132 		else
133 			cr3 |= USART_CR3_DEP;
134 
135 		writel_relaxed(cr3, port->membase + ofs->cr3);
136 		writel_relaxed(cr1, port->membase + ofs->cr1);
137 	} else {
138 		stm32_usart_clr_bits(port, ofs->cr3,
139 				     USART_CR3_DEM | USART_CR3_DEP);
140 		stm32_usart_clr_bits(port, ofs->cr1,
141 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
142 	}
143 
144 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
145 
146 	return 0;
147 }
148 
149 static int stm32_usart_init_rs485(struct uart_port *port,
150 				  struct platform_device *pdev)
151 {
152 	struct serial_rs485 *rs485conf = &port->rs485;
153 
154 	rs485conf->flags = 0;
155 	rs485conf->delay_rts_before_send = 0;
156 	rs485conf->delay_rts_after_send = 0;
157 
158 	if (!pdev->dev.of_node)
159 		return -ENODEV;
160 
161 	return uart_get_rs485_mode(port);
162 }
163 
164 static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
165 {
166 	struct stm32_port *stm32_port = to_stm32_port(port);
167 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
168 
169 	if (!stm32_port->rx_ch)
170 		return false;
171 
172 	return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
173 }
174 
175 /* Return true when data is pending (in pio mode), and false when no data is pending. */
176 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
177 {
178 	struct stm32_port *stm32_port = to_stm32_port(port);
179 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
180 
181 	*sr = readl_relaxed(port->membase + ofs->isr);
182 	/* Get pending characters in RDR or FIFO */
183 	if (*sr & USART_SR_RXNE) {
184 		/* Get all pending characters from the RDR or the FIFO when using interrupts */
185 		if (!stm32_usart_rx_dma_enabled(port))
186 			return true;
187 
188 		/* Handle only RX data errors when using DMA */
189 		if (*sr & USART_SR_ERR_MASK)
190 			return true;
191 	}
192 
193 	return false;
194 }
195 
196 static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
197 {
198 	struct stm32_port *stm32_port = to_stm32_port(port);
199 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
200 	unsigned long c;
201 
202 	c = readl_relaxed(port->membase + ofs->rdr);
203 	/* Apply RDR data mask */
204 	c &= stm32_port->rdr_mask;
205 
206 	return c;
207 }
208 
209 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
210 {
211 	struct stm32_port *stm32_port = to_stm32_port(port);
212 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
213 	unsigned long c;
214 	unsigned int size = 0;
215 	u32 sr;
216 	char flag;
217 
218 	while (stm32_usart_pending_rx_pio(port, &sr)) {
219 		sr |= USART_SR_DUMMY_RX;
220 		flag = TTY_NORMAL;
221 
222 		/*
223 		 * Status bits has to be cleared before reading the RDR:
224 		 * In FIFO mode, reading the RDR will pop the next data
225 		 * (if any) along with its status bits into the SR.
226 		 * Not doing so leads to misalignement between RDR and SR,
227 		 * and clear status bits of the next rx data.
228 		 *
229 		 * Clear errors flags for stm32f7 and stm32h7 compatible
230 		 * devices. On stm32f4 compatible devices, the error bit is
231 		 * cleared by the sequence [read SR - read DR].
232 		 */
233 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
234 			writel_relaxed(sr & USART_SR_ERR_MASK,
235 				       port->membase + ofs->icr);
236 
237 		c = stm32_usart_get_char_pio(port);
238 		port->icount.rx++;
239 		size++;
240 		if (sr & USART_SR_ERR_MASK) {
241 			if (sr & USART_SR_ORE) {
242 				port->icount.overrun++;
243 			} else if (sr & USART_SR_PE) {
244 				port->icount.parity++;
245 			} else if (sr & USART_SR_FE) {
246 				/* Break detection if character is null */
247 				if (!c) {
248 					port->icount.brk++;
249 					if (uart_handle_break(port))
250 						continue;
251 				} else {
252 					port->icount.frame++;
253 				}
254 			}
255 
256 			sr &= port->read_status_mask;
257 
258 			if (sr & USART_SR_PE) {
259 				flag = TTY_PARITY;
260 			} else if (sr & USART_SR_FE) {
261 				if (!c)
262 					flag = TTY_BREAK;
263 				else
264 					flag = TTY_FRAME;
265 			}
266 		}
267 
268 		if (uart_prepare_sysrq_char(port, c))
269 			continue;
270 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
271 	}
272 
273 	return size;
274 }
275 
276 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
277 {
278 	struct stm32_port *stm32_port = to_stm32_port(port);
279 	struct tty_port *ttyport = &stm32_port->port.state->port;
280 	unsigned char *dma_start;
281 	int dma_count, i;
282 
283 	dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
284 
285 	/*
286 	 * Apply rdr_mask on buffer in order to mask parity bit.
287 	 * This loop is useless in cs8 mode because DMA copies only
288 	 * 8 bits and already ignores parity bit.
289 	 */
290 	if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
291 		for (i = 0; i < dma_size; i++)
292 			*(dma_start + i) &= stm32_port->rdr_mask;
293 
294 	dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
295 	port->icount.rx += dma_count;
296 	if (dma_count != dma_size)
297 		port->icount.buf_overrun++;
298 	stm32_port->last_res -= dma_count;
299 	if (stm32_port->last_res == 0)
300 		stm32_port->last_res = RX_BUF_L;
301 }
302 
303 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
304 {
305 	struct stm32_port *stm32_port = to_stm32_port(port);
306 	unsigned int dma_size, size = 0;
307 
308 	/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
309 	if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
310 		/* Conditional first part: from last_res to end of DMA buffer */
311 		dma_size = stm32_port->last_res;
312 		stm32_usart_push_buffer_dma(port, dma_size);
313 		size = dma_size;
314 	}
315 
316 	dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
317 	stm32_usart_push_buffer_dma(port, dma_size);
318 	size += dma_size;
319 
320 	return size;
321 }
322 
323 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
324 {
325 	struct stm32_port *stm32_port = to_stm32_port(port);
326 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
327 	enum dma_status rx_dma_status;
328 	u32 sr;
329 	unsigned int size = 0;
330 
331 	if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) {
332 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
333 						    stm32_port->rx_ch->cookie,
334 						    &stm32_port->rx_dma_state);
335 		if (rx_dma_status == DMA_IN_PROGRESS) {
336 			/* Empty DMA buffer */
337 			size = stm32_usart_receive_chars_dma(port);
338 			sr = readl_relaxed(port->membase + ofs->isr);
339 			if (sr & USART_SR_ERR_MASK) {
340 				/* Disable DMA request line */
341 				stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
342 
343 				/* Switch to PIO mode to handle the errors */
344 				size += stm32_usart_receive_chars_pio(port);
345 
346 				/* Switch back to DMA mode */
347 				stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
348 			}
349 		} else {
350 			/* Disable RX DMA */
351 			dmaengine_terminate_async(stm32_port->rx_ch);
352 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
353 			/* Fall back to interrupt mode */
354 			dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
355 			size = stm32_usart_receive_chars_pio(port);
356 		}
357 	} else {
358 		size = stm32_usart_receive_chars_pio(port);
359 	}
360 
361 	return size;
362 }
363 
364 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
365 {
366 	dmaengine_terminate_async(stm32_port->tx_ch);
367 	stm32_port->tx_dma_busy = false;
368 }
369 
370 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
371 {
372 	/*
373 	 * We cannot use the function "dmaengine_tx_status" to know the
374 	 * status of DMA. This function does not show if the "dma complete"
375 	 * callback of the DMA transaction has been called. So we prefer
376 	 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
377 	 * same time.
378 	 */
379 	return stm32_port->tx_dma_busy;
380 }
381 
382 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
383 {
384 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
385 
386 	return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
387 }
388 
389 static void stm32_usart_tx_dma_complete(void *arg)
390 {
391 	struct uart_port *port = arg;
392 	struct stm32_port *stm32port = to_stm32_port(port);
393 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
394 	unsigned long flags;
395 
396 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
397 	stm32_usart_tx_dma_terminate(stm32port);
398 
399 	/* Let's see if we have pending data to send */
400 	spin_lock_irqsave(&port->lock, flags);
401 	stm32_usart_transmit_chars(port);
402 	spin_unlock_irqrestore(&port->lock, flags);
403 }
404 
405 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
406 {
407 	struct stm32_port *stm32_port = to_stm32_port(port);
408 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
409 
410 	/*
411 	 * Enables TX FIFO threashold irq when FIFO is enabled,
412 	 * or TX empty irq when FIFO is disabled
413 	 */
414 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
415 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
416 	else
417 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
418 }
419 
420 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
421 {
422 	struct stm32_port *stm32_port = to_stm32_port(port);
423 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
424 
425 	stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
426 }
427 
428 static void stm32_usart_rx_dma_complete(void *arg)
429 {
430 	struct uart_port *port = arg;
431 	struct tty_port *tport = &port->state->port;
432 	unsigned int size;
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&port->lock, flags);
436 	size = stm32_usart_receive_chars(port, false);
437 	uart_unlock_and_check_sysrq_irqrestore(port, flags);
438 	if (size)
439 		tty_flip_buffer_push(tport);
440 }
441 
442 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
443 {
444 	struct stm32_port *stm32_port = to_stm32_port(port);
445 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
446 
447 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
448 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
449 	else
450 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
451 }
452 
453 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
454 {
455 	struct stm32_port *stm32_port = to_stm32_port(port);
456 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
457 
458 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
459 }
460 
461 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
462 {
463 	struct stm32_port *stm32_port = to_stm32_port(port);
464 	struct serial_rs485 *rs485conf = &port->rs485;
465 
466 	if (stm32_port->hw_flow_control ||
467 	    !(rs485conf->flags & SER_RS485_ENABLED))
468 		return;
469 
470 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
471 		mctrl_gpio_set(stm32_port->gpios,
472 			       stm32_port->port.mctrl | TIOCM_RTS);
473 	} else {
474 		mctrl_gpio_set(stm32_port->gpios,
475 			       stm32_port->port.mctrl & ~TIOCM_RTS);
476 	}
477 }
478 
479 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
480 {
481 	struct stm32_port *stm32_port = to_stm32_port(port);
482 	struct serial_rs485 *rs485conf = &port->rs485;
483 
484 	if (stm32_port->hw_flow_control ||
485 	    !(rs485conf->flags & SER_RS485_ENABLED))
486 		return;
487 
488 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
489 		mctrl_gpio_set(stm32_port->gpios,
490 			       stm32_port->port.mctrl & ~TIOCM_RTS);
491 	} else {
492 		mctrl_gpio_set(stm32_port->gpios,
493 			       stm32_port->port.mctrl | TIOCM_RTS);
494 	}
495 }
496 
497 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
498 {
499 	struct stm32_port *stm32_port = to_stm32_port(port);
500 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
501 	struct circ_buf *xmit = &port->state->xmit;
502 
503 	if (stm32_usart_tx_dma_enabled(stm32_port))
504 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
505 
506 	while (!uart_circ_empty(xmit)) {
507 		/* Check that TDR is empty before filling FIFO */
508 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
509 			break;
510 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
511 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
512 		port->icount.tx++;
513 	}
514 
515 	/* rely on TXE irq (mask or unmask) for sending remaining data */
516 	if (uart_circ_empty(xmit))
517 		stm32_usart_tx_interrupt_disable(port);
518 	else
519 		stm32_usart_tx_interrupt_enable(port);
520 }
521 
522 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
523 {
524 	struct stm32_port *stm32port = to_stm32_port(port);
525 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
526 	struct circ_buf *xmit = &port->state->xmit;
527 	struct dma_async_tx_descriptor *desc = NULL;
528 	unsigned int count;
529 
530 	if (stm32_usart_tx_dma_started(stm32port)) {
531 		if (!stm32_usart_tx_dma_enabled(stm32port))
532 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
533 		return;
534 	}
535 
536 	count = uart_circ_chars_pending(xmit);
537 
538 	if (count > TX_BUF_L)
539 		count = TX_BUF_L;
540 
541 	if (xmit->tail < xmit->head) {
542 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
543 	} else {
544 		size_t one = UART_XMIT_SIZE - xmit->tail;
545 		size_t two;
546 
547 		if (one > count)
548 			one = count;
549 		two = count - one;
550 
551 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
552 		if (two)
553 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
554 	}
555 
556 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
557 					   stm32port->tx_dma_buf,
558 					   count,
559 					   DMA_MEM_TO_DEV,
560 					   DMA_PREP_INTERRUPT);
561 
562 	if (!desc)
563 		goto fallback_err;
564 
565 	/*
566 	 * Set "tx_dma_busy" flag. This flag will be released when
567 	 * dmaengine_terminate_async will be called. This flag helps
568 	 * transmit_chars_dma not to start another DMA transaction
569 	 * if the callback of the previous is not yet called.
570 	 */
571 	stm32port->tx_dma_busy = true;
572 
573 	desc->callback = stm32_usart_tx_dma_complete;
574 	desc->callback_param = port;
575 
576 	/* Push current DMA TX transaction in the pending queue */
577 	if (dma_submit_error(dmaengine_submit(desc))) {
578 		/* dma no yet started, safe to free resources */
579 		stm32_usart_tx_dma_terminate(stm32port);
580 		goto fallback_err;
581 	}
582 
583 	/* Issue pending DMA TX requests */
584 	dma_async_issue_pending(stm32port->tx_ch);
585 
586 	stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
587 
588 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
589 	port->icount.tx += count;
590 	return;
591 
592 fallback_err:
593 	stm32_usart_transmit_chars_pio(port);
594 }
595 
596 static void stm32_usart_transmit_chars(struct uart_port *port)
597 {
598 	struct stm32_port *stm32_port = to_stm32_port(port);
599 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
600 	struct circ_buf *xmit = &port->state->xmit;
601 	u32 isr;
602 	int ret;
603 
604 	if (!stm32_port->hw_flow_control &&
605 	    port->rs485.flags & SER_RS485_ENABLED) {
606 		stm32_port->txdone = false;
607 		stm32_usart_tc_interrupt_disable(port);
608 		stm32_usart_rs485_rts_enable(port);
609 	}
610 
611 	if (port->x_char) {
612 		if (stm32_usart_tx_dma_started(stm32_port) &&
613 		    stm32_usart_tx_dma_enabled(stm32_port))
614 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
615 
616 		/* Check that TDR is empty before filling FIFO */
617 		ret =
618 		readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
619 						  isr,
620 						  (isr & USART_SR_TXE),
621 						  10, 1000);
622 		if (ret)
623 			dev_warn(port->dev, "1 character may be erased\n");
624 
625 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
626 		port->x_char = 0;
627 		port->icount.tx++;
628 		if (stm32_usart_tx_dma_started(stm32_port))
629 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
630 		return;
631 	}
632 
633 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
634 		stm32_usart_tx_interrupt_disable(port);
635 		return;
636 	}
637 
638 	if (ofs->icr == UNDEF_REG)
639 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
640 	else
641 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
642 
643 	if (stm32_port->tx_ch)
644 		stm32_usart_transmit_chars_dma(port);
645 	else
646 		stm32_usart_transmit_chars_pio(port);
647 
648 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
649 		uart_write_wakeup(port);
650 
651 	if (uart_circ_empty(xmit)) {
652 		stm32_usart_tx_interrupt_disable(port);
653 		if (!stm32_port->hw_flow_control &&
654 		    port->rs485.flags & SER_RS485_ENABLED) {
655 			stm32_port->txdone = true;
656 			stm32_usart_tc_interrupt_enable(port);
657 		}
658 	}
659 }
660 
661 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
662 {
663 	struct uart_port *port = ptr;
664 	struct tty_port *tport = &port->state->port;
665 	struct stm32_port *stm32_port = to_stm32_port(port);
666 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
667 	u32 sr;
668 	unsigned int size;
669 
670 	sr = readl_relaxed(port->membase + ofs->isr);
671 
672 	if (!stm32_port->hw_flow_control &&
673 	    port->rs485.flags & SER_RS485_ENABLED &&
674 	    (sr & USART_SR_TC)) {
675 		stm32_usart_tc_interrupt_disable(port);
676 		stm32_usart_rs485_rts_disable(port);
677 	}
678 
679 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
680 		writel_relaxed(USART_ICR_RTOCF,
681 			       port->membase + ofs->icr);
682 
683 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
684 		/* Clear wake up flag and disable wake up interrupt */
685 		writel_relaxed(USART_ICR_WUCF,
686 			       port->membase + ofs->icr);
687 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
688 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
689 			pm_wakeup_event(tport->tty->dev, 0);
690 	}
691 
692 	/*
693 	 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
694 	 * line has been masked by HW and rx data are stacking in FIFO.
695 	 */
696 	if (!stm32_port->throttled) {
697 		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
698 		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) {
699 			spin_lock(&port->lock);
700 			size = stm32_usart_receive_chars(port, false);
701 			uart_unlock_and_check_sysrq(port);
702 			if (size)
703 				tty_flip_buffer_push(tport);
704 		}
705 	}
706 
707 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
708 		spin_lock(&port->lock);
709 		stm32_usart_transmit_chars(port);
710 		spin_unlock(&port->lock);
711 	}
712 
713 	if (stm32_usart_rx_dma_enabled(port))
714 		return IRQ_WAKE_THREAD;
715 	else
716 		return IRQ_HANDLED;
717 }
718 
719 static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
720 {
721 	struct uart_port *port = ptr;
722 	struct tty_port *tport = &port->state->port;
723 	struct stm32_port *stm32_port = to_stm32_port(port);
724 	unsigned int size;
725 	unsigned long flags;
726 
727 	/* Receiver timeout irq for DMA RX */
728 	if (!stm32_port->throttled) {
729 		spin_lock_irqsave(&port->lock, flags);
730 		size = stm32_usart_receive_chars(port, false);
731 		uart_unlock_and_check_sysrq_irqrestore(port, flags);
732 		if (size)
733 			tty_flip_buffer_push(tport);
734 	}
735 
736 	return IRQ_HANDLED;
737 }
738 
739 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
740 {
741 	struct stm32_port *stm32_port = to_stm32_port(port);
742 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
743 
744 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
745 		return TIOCSER_TEMT;
746 
747 	return 0;
748 }
749 
750 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
751 {
752 	struct stm32_port *stm32_port = to_stm32_port(port);
753 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
754 
755 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
756 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
757 	else
758 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
759 
760 	mctrl_gpio_set(stm32_port->gpios, mctrl);
761 }
762 
763 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
764 {
765 	struct stm32_port *stm32_port = to_stm32_port(port);
766 	unsigned int ret;
767 
768 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
769 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
770 
771 	return mctrl_gpio_get(stm32_port->gpios, &ret);
772 }
773 
774 static void stm32_usart_enable_ms(struct uart_port *port)
775 {
776 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
777 }
778 
779 static void stm32_usart_disable_ms(struct uart_port *port)
780 {
781 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
782 }
783 
784 /* Transmit stop */
785 static void stm32_usart_stop_tx(struct uart_port *port)
786 {
787 	struct stm32_port *stm32_port = to_stm32_port(port);
788 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
789 
790 	stm32_usart_tx_interrupt_disable(port);
791 	if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
792 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
793 
794 	stm32_usart_rs485_rts_disable(port);
795 }
796 
797 /* There are probably characters waiting to be transmitted. */
798 static void stm32_usart_start_tx(struct uart_port *port)
799 {
800 	struct circ_buf *xmit = &port->state->xmit;
801 
802 	if (uart_circ_empty(xmit) && !port->x_char) {
803 		stm32_usart_rs485_rts_disable(port);
804 		return;
805 	}
806 
807 	stm32_usart_rs485_rts_enable(port);
808 
809 	stm32_usart_transmit_chars(port);
810 }
811 
812 /* Flush the transmit buffer. */
813 static void stm32_usart_flush_buffer(struct uart_port *port)
814 {
815 	struct stm32_port *stm32_port = to_stm32_port(port);
816 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
817 
818 	if (stm32_port->tx_ch) {
819 		stm32_usart_tx_dma_terminate(stm32_port);
820 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
821 	}
822 }
823 
824 /* Throttle the remote when input buffer is about to overflow. */
825 static void stm32_usart_throttle(struct uart_port *port)
826 {
827 	struct stm32_port *stm32_port = to_stm32_port(port);
828 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&port->lock, flags);
832 
833 	/*
834 	 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
835 	 * Hardware flow control is triggered when RX FIFO is full.
836 	 */
837 	if (stm32_usart_rx_dma_enabled(port))
838 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
839 
840 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
841 	if (stm32_port->cr3_irq)
842 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
843 
844 	stm32_port->throttled = true;
845 	spin_unlock_irqrestore(&port->lock, flags);
846 }
847 
848 /* Unthrottle the remote, the input buffer can now accept data. */
849 static void stm32_usart_unthrottle(struct uart_port *port)
850 {
851 	struct stm32_port *stm32_port = to_stm32_port(port);
852 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&port->lock, flags);
856 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
857 	if (stm32_port->cr3_irq)
858 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
859 
860 	/*
861 	 * Switch back to DMA mode (re-enable DMA request line).
862 	 * Hardware flow control is stopped when FIFO is not full any more.
863 	 */
864 	if (stm32_port->rx_ch)
865 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
866 
867 	stm32_port->throttled = false;
868 	spin_unlock_irqrestore(&port->lock, flags);
869 }
870 
871 /* Receive stop */
872 static void stm32_usart_stop_rx(struct uart_port *port)
873 {
874 	struct stm32_port *stm32_port = to_stm32_port(port);
875 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
876 
877 	/* Disable DMA request line. */
878 	if (stm32_port->rx_ch)
879 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
880 
881 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
882 	if (stm32_port->cr3_irq)
883 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
884 }
885 
886 /* Handle breaks - ignored by us */
887 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
888 {
889 }
890 
891 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
892 {
893 	struct stm32_port *stm32_port = to_stm32_port(port);
894 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
895 	struct dma_async_tx_descriptor *desc;
896 	int ret;
897 
898 	stm32_port->last_res = RX_BUF_L;
899 	/* Prepare a DMA cyclic transaction */
900 	desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
901 					 stm32_port->rx_dma_buf,
902 					 RX_BUF_L, RX_BUF_P,
903 					 DMA_DEV_TO_MEM,
904 					 DMA_PREP_INTERRUPT);
905 	if (!desc) {
906 		dev_err(port->dev, "rx dma prep cyclic failed\n");
907 		return -ENODEV;
908 	}
909 
910 	desc->callback = stm32_usart_rx_dma_complete;
911 	desc->callback_param = port;
912 
913 	/* Push current DMA transaction in the pending queue */
914 	ret = dma_submit_error(dmaengine_submit(desc));
915 	if (ret) {
916 		dmaengine_terminate_sync(stm32_port->rx_ch);
917 		return ret;
918 	}
919 
920 	/* Issue pending DMA requests */
921 	dma_async_issue_pending(stm32_port->rx_ch);
922 
923 	/*
924 	 * DMA request line not re-enabled at resume when port is throttled.
925 	 * It will be re-enabled by unthrottle ops.
926 	 */
927 	if (!stm32_port->throttled)
928 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
929 
930 	return 0;
931 }
932 
933 static int stm32_usart_startup(struct uart_port *port)
934 {
935 	struct stm32_port *stm32_port = to_stm32_port(port);
936 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
937 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
938 	const char *name = to_platform_device(port->dev)->name;
939 	u32 val;
940 	int ret;
941 
942 	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
943 				   stm32_usart_threaded_interrupt,
944 				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
945 				   name, port);
946 	if (ret)
947 		return ret;
948 
949 	if (stm32_port->swap) {
950 		val = readl_relaxed(port->membase + ofs->cr2);
951 		val |= USART_CR2_SWAP;
952 		writel_relaxed(val, port->membase + ofs->cr2);
953 	}
954 
955 	/* RX FIFO Flush */
956 	if (ofs->rqr != UNDEF_REG)
957 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
958 
959 	if (stm32_port->rx_ch) {
960 		ret = stm32_usart_start_rx_dma_cyclic(port);
961 		if (ret) {
962 			free_irq(port->irq, port);
963 			return ret;
964 		}
965 	}
966 
967 	/* RX enabling */
968 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
969 	stm32_usart_set_bits(port, ofs->cr1, val);
970 
971 	return 0;
972 }
973 
974 static void stm32_usart_shutdown(struct uart_port *port)
975 {
976 	struct stm32_port *stm32_port = to_stm32_port(port);
977 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
978 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
979 	u32 val, isr;
980 	int ret;
981 
982 	if (stm32_usart_tx_dma_enabled(stm32_port))
983 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
984 
985 	if (stm32_usart_tx_dma_started(stm32_port))
986 		stm32_usart_tx_dma_terminate(stm32_port);
987 
988 	/* Disable modem control interrupts */
989 	stm32_usart_disable_ms(port);
990 
991 	val = USART_CR1_TXEIE | USART_CR1_TE;
992 	val |= stm32_port->cr1_irq | USART_CR1_RE;
993 	val |= BIT(cfg->uart_enable_bit);
994 	if (stm32_port->fifoen)
995 		val |= USART_CR1_FIFOEN;
996 
997 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
998 					 isr, (isr & USART_SR_TC),
999 					 10, 100000);
1000 
1001 	/* Send the TC error message only when ISR_TC is not set */
1002 	if (ret)
1003 		dev_err(port->dev, "Transmission is not complete\n");
1004 
1005 	/* Disable RX DMA. */
1006 	if (stm32_port->rx_ch)
1007 		dmaengine_terminate_async(stm32_port->rx_ch);
1008 
1009 	/* flush RX & TX FIFO */
1010 	if (ofs->rqr != UNDEF_REG)
1011 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1012 			       port->membase + ofs->rqr);
1013 
1014 	stm32_usart_clr_bits(port, ofs->cr1, val);
1015 
1016 	free_irq(port->irq, port);
1017 }
1018 
1019 static void stm32_usart_set_termios(struct uart_port *port,
1020 				    struct ktermios *termios,
1021 				    struct ktermios *old)
1022 {
1023 	struct stm32_port *stm32_port = to_stm32_port(port);
1024 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1025 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1026 	struct serial_rs485 *rs485conf = &port->rs485;
1027 	unsigned int baud, bits;
1028 	u32 usartdiv, mantissa, fraction, oversampling;
1029 	tcflag_t cflag = termios->c_cflag;
1030 	u32 cr1, cr2, cr3, isr;
1031 	unsigned long flags;
1032 	int ret;
1033 
1034 	if (!stm32_port->hw_flow_control)
1035 		cflag &= ~CRTSCTS;
1036 
1037 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1038 
1039 	spin_lock_irqsave(&port->lock, flags);
1040 
1041 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1042 						isr,
1043 						(isr & USART_SR_TC),
1044 						10, 100000);
1045 
1046 	/* Send the TC error message only when ISR_TC is not set. */
1047 	if (ret)
1048 		dev_err(port->dev, "Transmission is not complete\n");
1049 
1050 	/* Stop serial port and reset value */
1051 	writel_relaxed(0, port->membase + ofs->cr1);
1052 
1053 	/* flush RX & TX FIFO */
1054 	if (ofs->rqr != UNDEF_REG)
1055 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1056 			       port->membase + ofs->rqr);
1057 
1058 	cr1 = USART_CR1_TE | USART_CR1_RE;
1059 	if (stm32_port->fifoen)
1060 		cr1 |= USART_CR1_FIFOEN;
1061 	cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1062 
1063 	/* Tx and RX FIFO configuration */
1064 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1065 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1066 	if (stm32_port->fifoen) {
1067 		if (stm32_port->txftcfg >= 0)
1068 			cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1069 		if (stm32_port->rxftcfg >= 0)
1070 			cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1071 	}
1072 
1073 	if (cflag & CSTOPB)
1074 		cr2 |= USART_CR2_STOP_2B;
1075 
1076 	bits = tty_get_char_size(cflag);
1077 	stm32_port->rdr_mask = (BIT(bits) - 1);
1078 
1079 	if (cflag & PARENB) {
1080 		bits++;
1081 		cr1 |= USART_CR1_PCE;
1082 	}
1083 
1084 	/*
1085 	 * Word length configuration:
1086 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1087 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1088 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1089 	 * M0 and M1 already cleared by cr1 initialization.
1090 	 */
1091 	if (bits == 9)
1092 		cr1 |= USART_CR1_M0;
1093 	else if ((bits == 7) && cfg->has_7bits_data)
1094 		cr1 |= USART_CR1_M1;
1095 	else if (bits != 8)
1096 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1097 			, bits);
1098 
1099 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1100 				       (stm32_port->fifoen &&
1101 					stm32_port->rxftcfg >= 0))) {
1102 		if (cflag & CSTOPB)
1103 			bits = bits + 3; /* 1 start bit + 2 stop bits */
1104 		else
1105 			bits = bits + 2; /* 1 start bit + 1 stop bit */
1106 
1107 		/* RX timeout irq to occur after last stop bit + bits */
1108 		stm32_port->cr1_irq = USART_CR1_RTOIE;
1109 		writel_relaxed(bits, port->membase + ofs->rtor);
1110 		cr2 |= USART_CR2_RTOEN;
1111 		/*
1112 		 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1113 		 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1114 		 */
1115 		stm32_port->cr3_irq =  USART_CR3_RXFTIE;
1116 	}
1117 
1118 	cr1 |= stm32_port->cr1_irq;
1119 	cr3 |= stm32_port->cr3_irq;
1120 
1121 	if (cflag & PARODD)
1122 		cr1 |= USART_CR1_PS;
1123 
1124 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1125 	if (cflag & CRTSCTS) {
1126 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1127 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1128 	}
1129 
1130 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1131 
1132 	/*
1133 	 * The USART supports 16 or 8 times oversampling.
1134 	 * By default we prefer 16 times oversampling, so that the receiver
1135 	 * has a better tolerance to clock deviations.
1136 	 * 8 times oversampling is only used to achieve higher speeds.
1137 	 */
1138 	if (usartdiv < 16) {
1139 		oversampling = 8;
1140 		cr1 |= USART_CR1_OVER8;
1141 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1142 	} else {
1143 		oversampling = 16;
1144 		cr1 &= ~USART_CR1_OVER8;
1145 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1146 	}
1147 
1148 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1149 	fraction = usartdiv % oversampling;
1150 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1151 
1152 	uart_update_timeout(port, cflag, baud);
1153 
1154 	port->read_status_mask = USART_SR_ORE;
1155 	if (termios->c_iflag & INPCK)
1156 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1157 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1158 		port->read_status_mask |= USART_SR_FE;
1159 
1160 	/* Characters to ignore */
1161 	port->ignore_status_mask = 0;
1162 	if (termios->c_iflag & IGNPAR)
1163 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1164 	if (termios->c_iflag & IGNBRK) {
1165 		port->ignore_status_mask |= USART_SR_FE;
1166 		/*
1167 		 * If we're ignoring parity and break indicators,
1168 		 * ignore overruns too (for real raw support).
1169 		 */
1170 		if (termios->c_iflag & IGNPAR)
1171 			port->ignore_status_mask |= USART_SR_ORE;
1172 	}
1173 
1174 	/* Ignore all characters if CREAD is not set */
1175 	if ((termios->c_cflag & CREAD) == 0)
1176 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
1177 
1178 	if (stm32_port->rx_ch) {
1179 		/*
1180 		 * Setup DMA to collect only valid data and enable error irqs.
1181 		 * This also enables break reception when using DMA.
1182 		 */
1183 		cr1 |= USART_CR1_PEIE;
1184 		cr3 |= USART_CR3_EIE;
1185 		cr3 |= USART_CR3_DMAR;
1186 		cr3 |= USART_CR3_DDRE;
1187 	}
1188 
1189 	if (rs485conf->flags & SER_RS485_ENABLED) {
1190 		stm32_usart_config_reg_rs485(&cr1, &cr3,
1191 					     rs485conf->delay_rts_before_send,
1192 					     rs485conf->delay_rts_after_send,
1193 					     baud);
1194 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1195 			cr3 &= ~USART_CR3_DEP;
1196 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1197 		} else {
1198 			cr3 |= USART_CR3_DEP;
1199 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1200 		}
1201 
1202 	} else {
1203 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1204 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1205 	}
1206 
1207 	/* Configure wake up from low power on start bit detection */
1208 	if (stm32_port->wakeup_src) {
1209 		cr3 &= ~USART_CR3_WUS_MASK;
1210 		cr3 |= USART_CR3_WUS_START_BIT;
1211 	}
1212 
1213 	writel_relaxed(cr3, port->membase + ofs->cr3);
1214 	writel_relaxed(cr2, port->membase + ofs->cr2);
1215 	writel_relaxed(cr1, port->membase + ofs->cr1);
1216 
1217 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1218 	spin_unlock_irqrestore(&port->lock, flags);
1219 
1220 	/* Handle modem control interrupts */
1221 	if (UART_ENABLE_MS(port, termios->c_cflag))
1222 		stm32_usart_enable_ms(port);
1223 	else
1224 		stm32_usart_disable_ms(port);
1225 }
1226 
1227 static const char *stm32_usart_type(struct uart_port *port)
1228 {
1229 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1230 }
1231 
1232 static void stm32_usart_release_port(struct uart_port *port)
1233 {
1234 }
1235 
1236 static int stm32_usart_request_port(struct uart_port *port)
1237 {
1238 	return 0;
1239 }
1240 
1241 static void stm32_usart_config_port(struct uart_port *port, int flags)
1242 {
1243 	if (flags & UART_CONFIG_TYPE)
1244 		port->type = PORT_STM32;
1245 }
1246 
1247 static int
1248 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1249 {
1250 	/* No user changeable parameters */
1251 	return -EINVAL;
1252 }
1253 
1254 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1255 			   unsigned int oldstate)
1256 {
1257 	struct stm32_port *stm32port = container_of(port,
1258 			struct stm32_port, port);
1259 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1260 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1261 	unsigned long flags;
1262 
1263 	switch (state) {
1264 	case UART_PM_STATE_ON:
1265 		pm_runtime_get_sync(port->dev);
1266 		break;
1267 	case UART_PM_STATE_OFF:
1268 		spin_lock_irqsave(&port->lock, flags);
1269 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1270 		spin_unlock_irqrestore(&port->lock, flags);
1271 		pm_runtime_put_sync(port->dev);
1272 		break;
1273 	}
1274 }
1275 
1276 #if defined(CONFIG_CONSOLE_POLL)
1277 
1278  /* Callbacks for characters polling in debug context (i.e. KGDB). */
1279 static int stm32_usart_poll_init(struct uart_port *port)
1280 {
1281 	struct stm32_port *stm32_port = to_stm32_port(port);
1282 
1283 	return clk_prepare_enable(stm32_port->clk);
1284 }
1285 
1286 static int stm32_usart_poll_get_char(struct uart_port *port)
1287 {
1288 	struct stm32_port *stm32_port = to_stm32_port(port);
1289 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1290 
1291 	if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1292 		return NO_POLL_CHAR;
1293 
1294 	return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1295 }
1296 
1297 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1298 {
1299 	stm32_usart_console_putchar(port, ch);
1300 }
1301 #endif /* CONFIG_CONSOLE_POLL */
1302 
1303 static const struct uart_ops stm32_uart_ops = {
1304 	.tx_empty	= stm32_usart_tx_empty,
1305 	.set_mctrl	= stm32_usart_set_mctrl,
1306 	.get_mctrl	= stm32_usart_get_mctrl,
1307 	.stop_tx	= stm32_usart_stop_tx,
1308 	.start_tx	= stm32_usart_start_tx,
1309 	.throttle	= stm32_usart_throttle,
1310 	.unthrottle	= stm32_usart_unthrottle,
1311 	.stop_rx	= stm32_usart_stop_rx,
1312 	.enable_ms	= stm32_usart_enable_ms,
1313 	.break_ctl	= stm32_usart_break_ctl,
1314 	.startup	= stm32_usart_startup,
1315 	.shutdown	= stm32_usart_shutdown,
1316 	.flush_buffer	= stm32_usart_flush_buffer,
1317 	.set_termios	= stm32_usart_set_termios,
1318 	.pm		= stm32_usart_pm,
1319 	.type		= stm32_usart_type,
1320 	.release_port	= stm32_usart_release_port,
1321 	.request_port	= stm32_usart_request_port,
1322 	.config_port	= stm32_usart_config_port,
1323 	.verify_port	= stm32_usart_verify_port,
1324 #if defined(CONFIG_CONSOLE_POLL)
1325 	.poll_init      = stm32_usart_poll_init,
1326 	.poll_get_char	= stm32_usart_poll_get_char,
1327 	.poll_put_char	= stm32_usart_poll_put_char,
1328 #endif /* CONFIG_CONSOLE_POLL */
1329 };
1330 
1331 /*
1332  * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1333  * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1334  * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1335  * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1336  */
1337 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1338 
1339 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1340 				  int *ftcfg)
1341 {
1342 	u32 bytes, i;
1343 
1344 	/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1345 	if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1346 		bytes = 8;
1347 
1348 	for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1349 		if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1350 			break;
1351 	if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1352 		i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1353 
1354 	dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1355 		stm32h7_usart_fifo_thresh_cfg[i]);
1356 
1357 	/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1358 	if (i)
1359 		*ftcfg = i - 1;
1360 	else
1361 		*ftcfg = -EINVAL;
1362 }
1363 
1364 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1365 {
1366 	clk_disable_unprepare(stm32port->clk);
1367 }
1368 
1369 static int stm32_usart_init_port(struct stm32_port *stm32port,
1370 				 struct platform_device *pdev)
1371 {
1372 	struct uart_port *port = &stm32port->port;
1373 	struct resource *res;
1374 	int ret, irq;
1375 
1376 	irq = platform_get_irq(pdev, 0);
1377 	if (irq < 0)
1378 		return irq;
1379 
1380 	port->iotype	= UPIO_MEM;
1381 	port->flags	= UPF_BOOT_AUTOCONF;
1382 	port->ops	= &stm32_uart_ops;
1383 	port->dev	= &pdev->dev;
1384 	port->fifosize	= stm32port->info->cfg.fifosize;
1385 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1386 	port->irq = irq;
1387 	port->rs485_config = stm32_usart_config_rs485;
1388 
1389 	ret = stm32_usart_init_rs485(port, pdev);
1390 	if (ret)
1391 		return ret;
1392 
1393 	stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1394 		of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1395 
1396 	stm32port->swap = stm32port->info->cfg.has_swap &&
1397 		of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1398 
1399 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
1400 	if (stm32port->fifoen) {
1401 		stm32_usart_get_ftcfg(pdev, "rx-threshold",
1402 				      &stm32port->rxftcfg);
1403 		stm32_usart_get_ftcfg(pdev, "tx-threshold",
1404 				      &stm32port->txftcfg);
1405 	}
1406 
1407 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1408 	if (IS_ERR(port->membase))
1409 		return PTR_ERR(port->membase);
1410 	port->mapbase = res->start;
1411 
1412 	spin_lock_init(&port->lock);
1413 
1414 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1415 	if (IS_ERR(stm32port->clk))
1416 		return PTR_ERR(stm32port->clk);
1417 
1418 	/* Ensure that clk rate is correct by enabling the clk */
1419 	ret = clk_prepare_enable(stm32port->clk);
1420 	if (ret)
1421 		return ret;
1422 
1423 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1424 	if (!stm32port->port.uartclk) {
1425 		ret = -EINVAL;
1426 		goto err_clk;
1427 	}
1428 
1429 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1430 	if (IS_ERR(stm32port->gpios)) {
1431 		ret = PTR_ERR(stm32port->gpios);
1432 		goto err_clk;
1433 	}
1434 
1435 	/*
1436 	 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1437 	 * properties should not be specified.
1438 	 */
1439 	if (stm32port->hw_flow_control) {
1440 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1441 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1442 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1443 			ret = -EINVAL;
1444 			goto err_clk;
1445 		}
1446 	}
1447 
1448 	return ret;
1449 
1450 err_clk:
1451 	clk_disable_unprepare(stm32port->clk);
1452 
1453 	return ret;
1454 }
1455 
1456 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1457 {
1458 	struct device_node *np = pdev->dev.of_node;
1459 	int id;
1460 
1461 	if (!np)
1462 		return NULL;
1463 
1464 	id = of_alias_get_id(np, "serial");
1465 	if (id < 0) {
1466 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1467 		return NULL;
1468 	}
1469 
1470 	if (WARN_ON(id >= STM32_MAX_PORTS))
1471 		return NULL;
1472 
1473 	stm32_ports[id].hw_flow_control =
1474 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1475 		of_property_read_bool (np, "uart-has-rtscts");
1476 	stm32_ports[id].port.line = id;
1477 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1478 	stm32_ports[id].cr3_irq = 0;
1479 	stm32_ports[id].last_res = RX_BUF_L;
1480 	return &stm32_ports[id];
1481 }
1482 
1483 #ifdef CONFIG_OF
1484 static const struct of_device_id stm32_match[] = {
1485 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
1486 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1487 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1488 	{},
1489 };
1490 
1491 MODULE_DEVICE_TABLE(of, stm32_match);
1492 #endif
1493 
1494 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1495 					 struct platform_device *pdev)
1496 {
1497 	if (stm32port->rx_buf)
1498 		dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1499 				  stm32port->rx_dma_buf);
1500 }
1501 
1502 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1503 				       struct platform_device *pdev)
1504 {
1505 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1506 	struct uart_port *port = &stm32port->port;
1507 	struct device *dev = &pdev->dev;
1508 	struct dma_slave_config config;
1509 	int ret;
1510 
1511 	/*
1512 	 * Using DMA and threaded handler for the console could lead to
1513 	 * deadlocks.
1514 	 */
1515 	if (uart_console(port))
1516 		return -ENODEV;
1517 
1518 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1519 					       &stm32port->rx_dma_buf,
1520 					       GFP_KERNEL);
1521 	if (!stm32port->rx_buf)
1522 		return -ENOMEM;
1523 
1524 	/* Configure DMA channel */
1525 	memset(&config, 0, sizeof(config));
1526 	config.src_addr = port->mapbase + ofs->rdr;
1527 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1528 
1529 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1530 	if (ret < 0) {
1531 		dev_err(dev, "rx dma channel config failed\n");
1532 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1533 		return ret;
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1540 					 struct platform_device *pdev)
1541 {
1542 	if (stm32port->tx_buf)
1543 		dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1544 				  stm32port->tx_dma_buf);
1545 }
1546 
1547 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1548 				       struct platform_device *pdev)
1549 {
1550 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1551 	struct uart_port *port = &stm32port->port;
1552 	struct device *dev = &pdev->dev;
1553 	struct dma_slave_config config;
1554 	int ret;
1555 
1556 	stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1557 					       &stm32port->tx_dma_buf,
1558 					       GFP_KERNEL);
1559 	if (!stm32port->tx_buf)
1560 		return -ENOMEM;
1561 
1562 	/* Configure DMA channel */
1563 	memset(&config, 0, sizeof(config));
1564 	config.dst_addr = port->mapbase + ofs->tdr;
1565 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1566 
1567 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1568 	if (ret < 0) {
1569 		dev_err(dev, "tx dma channel config failed\n");
1570 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1571 		return ret;
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static int stm32_usart_serial_probe(struct platform_device *pdev)
1578 {
1579 	struct stm32_port *stm32port;
1580 	int ret;
1581 
1582 	stm32port = stm32_usart_of_get_port(pdev);
1583 	if (!stm32port)
1584 		return -ENODEV;
1585 
1586 	stm32port->info = of_device_get_match_data(&pdev->dev);
1587 	if (!stm32port->info)
1588 		return -EINVAL;
1589 
1590 	ret = stm32_usart_init_port(stm32port, pdev);
1591 	if (ret)
1592 		return ret;
1593 
1594 	if (stm32port->wakeup_src) {
1595 		device_set_wakeup_capable(&pdev->dev, true);
1596 		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1597 		if (ret)
1598 			goto err_deinit_port;
1599 	}
1600 
1601 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1602 	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
1603 		ret = -EPROBE_DEFER;
1604 		goto err_wakeirq;
1605 	}
1606 	/* Fall back in interrupt mode for any non-deferral error */
1607 	if (IS_ERR(stm32port->rx_ch))
1608 		stm32port->rx_ch = NULL;
1609 
1610 	stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1611 	if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1612 		ret = -EPROBE_DEFER;
1613 		goto err_dma_rx;
1614 	}
1615 	/* Fall back in interrupt mode for any non-deferral error */
1616 	if (IS_ERR(stm32port->tx_ch))
1617 		stm32port->tx_ch = NULL;
1618 
1619 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1620 		/* Fall back in interrupt mode */
1621 		dma_release_channel(stm32port->rx_ch);
1622 		stm32port->rx_ch = NULL;
1623 	}
1624 
1625 	if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1626 		/* Fall back in interrupt mode */
1627 		dma_release_channel(stm32port->tx_ch);
1628 		stm32port->tx_ch = NULL;
1629 	}
1630 
1631 	if (!stm32port->rx_ch)
1632 		dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1633 	if (!stm32port->tx_ch)
1634 		dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1635 
1636 	platform_set_drvdata(pdev, &stm32port->port);
1637 
1638 	pm_runtime_get_noresume(&pdev->dev);
1639 	pm_runtime_set_active(&pdev->dev);
1640 	pm_runtime_enable(&pdev->dev);
1641 
1642 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1643 	if (ret)
1644 		goto err_port;
1645 
1646 	pm_runtime_put_sync(&pdev->dev);
1647 
1648 	return 0;
1649 
1650 err_port:
1651 	pm_runtime_disable(&pdev->dev);
1652 	pm_runtime_set_suspended(&pdev->dev);
1653 	pm_runtime_put_noidle(&pdev->dev);
1654 
1655 	if (stm32port->tx_ch) {
1656 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1657 		dma_release_channel(stm32port->tx_ch);
1658 	}
1659 
1660 	if (stm32port->rx_ch)
1661 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1662 
1663 err_dma_rx:
1664 	if (stm32port->rx_ch)
1665 		dma_release_channel(stm32port->rx_ch);
1666 
1667 err_wakeirq:
1668 	if (stm32port->wakeup_src)
1669 		dev_pm_clear_wake_irq(&pdev->dev);
1670 
1671 err_deinit_port:
1672 	if (stm32port->wakeup_src)
1673 		device_set_wakeup_capable(&pdev->dev, false);
1674 
1675 	stm32_usart_deinit_port(stm32port);
1676 
1677 	return ret;
1678 }
1679 
1680 static int stm32_usart_serial_remove(struct platform_device *pdev)
1681 {
1682 	struct uart_port *port = platform_get_drvdata(pdev);
1683 	struct stm32_port *stm32_port = to_stm32_port(port);
1684 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1685 	int err;
1686 	u32 cr3;
1687 
1688 	pm_runtime_get_sync(&pdev->dev);
1689 	err = uart_remove_one_port(&stm32_usart_driver, port);
1690 	if (err)
1691 		return(err);
1692 
1693 	pm_runtime_disable(&pdev->dev);
1694 	pm_runtime_set_suspended(&pdev->dev);
1695 	pm_runtime_put_noidle(&pdev->dev);
1696 
1697 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1698 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1699 	cr3 &= ~USART_CR3_EIE;
1700 	cr3 &= ~USART_CR3_DMAR;
1701 	cr3 &= ~USART_CR3_DDRE;
1702 	writel_relaxed(cr3, port->membase + ofs->cr3);
1703 
1704 	if (stm32_port->tx_ch) {
1705 		stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1706 		dma_release_channel(stm32_port->tx_ch);
1707 	}
1708 
1709 	if (stm32_port->rx_ch) {
1710 		stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1711 		dma_release_channel(stm32_port->rx_ch);
1712 	}
1713 
1714 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1715 
1716 	if (stm32_port->wakeup_src) {
1717 		dev_pm_clear_wake_irq(&pdev->dev);
1718 		device_init_wakeup(&pdev->dev, false);
1719 	}
1720 
1721 	stm32_usart_deinit_port(stm32_port);
1722 
1723 	return 0;
1724 }
1725 
1726 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1727 {
1728 	struct stm32_port *stm32_port = to_stm32_port(port);
1729 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1730 	u32 isr;
1731 	int ret;
1732 
1733 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1734 						(isr & USART_SR_TXE), 100,
1735 						STM32_USART_TIMEOUT_USEC);
1736 	if (ret != 0) {
1737 		dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1738 		return;
1739 	}
1740 	writel_relaxed(ch, port->membase + ofs->tdr);
1741 }
1742 
1743 #ifdef CONFIG_SERIAL_STM32_CONSOLE
1744 static void stm32_usart_console_write(struct console *co, const char *s,
1745 				      unsigned int cnt)
1746 {
1747 	struct uart_port *port = &stm32_ports[co->index].port;
1748 	struct stm32_port *stm32_port = to_stm32_port(port);
1749 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1750 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1751 	unsigned long flags;
1752 	u32 old_cr1, new_cr1;
1753 	int locked = 1;
1754 
1755 	if (oops_in_progress)
1756 		locked = spin_trylock_irqsave(&port->lock, flags);
1757 	else
1758 		spin_lock_irqsave(&port->lock, flags);
1759 
1760 	/* Save and disable interrupts, enable the transmitter */
1761 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1762 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1763 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
1764 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
1765 
1766 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1767 
1768 	/* Restore interrupt state */
1769 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1770 
1771 	if (locked)
1772 		spin_unlock_irqrestore(&port->lock, flags);
1773 }
1774 
1775 static int stm32_usart_console_setup(struct console *co, char *options)
1776 {
1777 	struct stm32_port *stm32port;
1778 	int baud = 9600;
1779 	int bits = 8;
1780 	int parity = 'n';
1781 	int flow = 'n';
1782 
1783 	if (co->index >= STM32_MAX_PORTS)
1784 		return -ENODEV;
1785 
1786 	stm32port = &stm32_ports[co->index];
1787 
1788 	/*
1789 	 * This driver does not support early console initialization
1790 	 * (use ARM early printk support instead), so we only expect
1791 	 * this to be called during the uart port registration when the
1792 	 * driver gets probed and the port should be mapped at that point.
1793 	 */
1794 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1795 		return -ENXIO;
1796 
1797 	if (options)
1798 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1799 
1800 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1801 }
1802 
1803 static struct console stm32_console = {
1804 	.name		= STM32_SERIAL_NAME,
1805 	.device		= uart_console_device,
1806 	.write		= stm32_usart_console_write,
1807 	.setup		= stm32_usart_console_setup,
1808 	.flags		= CON_PRINTBUFFER,
1809 	.index		= -1,
1810 	.data		= &stm32_usart_driver,
1811 };
1812 
1813 #define STM32_SERIAL_CONSOLE (&stm32_console)
1814 
1815 #else
1816 #define STM32_SERIAL_CONSOLE NULL
1817 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1818 
1819 #ifdef CONFIG_SERIAL_EARLYCON
1820 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1821 {
1822 	struct stm32_usart_info *info = port->private_data;
1823 
1824 	while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1825 		cpu_relax();
1826 
1827 	writel_relaxed(ch, port->membase + info->ofs.tdr);
1828 }
1829 
1830 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1831 {
1832 	struct earlycon_device *device = console->data;
1833 	struct uart_port *port = &device->port;
1834 
1835 	uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1836 }
1837 
1838 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1839 {
1840 	if (!(device->port.membase || device->port.iobase))
1841 		return -ENODEV;
1842 	device->port.private_data = &stm32h7_info;
1843 	device->con->write = early_stm32_serial_write;
1844 	return 0;
1845 }
1846 
1847 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1848 {
1849 	if (!(device->port.membase || device->port.iobase))
1850 		return -ENODEV;
1851 	device->port.private_data = &stm32f7_info;
1852 	device->con->write = early_stm32_serial_write;
1853 	return 0;
1854 }
1855 
1856 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1857 {
1858 	if (!(device->port.membase || device->port.iobase))
1859 		return -ENODEV;
1860 	device->port.private_data = &stm32f4_info;
1861 	device->con->write = early_stm32_serial_write;
1862 	return 0;
1863 }
1864 
1865 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
1866 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
1867 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
1868 #endif /* CONFIG_SERIAL_EARLYCON */
1869 
1870 static struct uart_driver stm32_usart_driver = {
1871 	.driver_name	= DRIVER_NAME,
1872 	.dev_name	= STM32_SERIAL_NAME,
1873 	.major		= 0,
1874 	.minor		= 0,
1875 	.nr		= STM32_MAX_PORTS,
1876 	.cons		= STM32_SERIAL_CONSOLE,
1877 };
1878 
1879 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
1880 						       bool enable)
1881 {
1882 	struct stm32_port *stm32_port = to_stm32_port(port);
1883 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1884 	struct tty_port *tport = &port->state->port;
1885 	int ret;
1886 	unsigned int size;
1887 	unsigned long flags;
1888 
1889 	if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
1890 		return 0;
1891 
1892 	/*
1893 	 * Enable low-power wake-up and wake-up irq if argument is set to
1894 	 * "enable", disable low-power wake-up and wake-up irq otherwise
1895 	 */
1896 	if (enable) {
1897 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
1898 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
1899 		mctrl_gpio_enable_irq_wake(stm32_port->gpios);
1900 
1901 		/*
1902 		 * When DMA is used for reception, it must be disabled before
1903 		 * entering low-power mode and re-enabled when exiting from
1904 		 * low-power mode.
1905 		 */
1906 		if (stm32_port->rx_ch) {
1907 			spin_lock_irqsave(&port->lock, flags);
1908 			/* Avoid race with RX IRQ when DMAR is cleared */
1909 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
1910 			/* Poll data from DMA RX buffer if any */
1911 			size = stm32_usart_receive_chars(port, true);
1912 			dmaengine_terminate_async(stm32_port->rx_ch);
1913 			uart_unlock_and_check_sysrq_irqrestore(port, flags);
1914 			if (size)
1915 				tty_flip_buffer_push(tport);
1916 		}
1917 
1918 		/* Poll data from RX FIFO if any */
1919 		stm32_usart_receive_chars(port, false);
1920 	} else {
1921 		if (stm32_port->rx_ch) {
1922 			ret = stm32_usart_start_rx_dma_cyclic(port);
1923 			if (ret)
1924 				return ret;
1925 		}
1926 		mctrl_gpio_disable_irq_wake(stm32_port->gpios);
1927 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
1928 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
1935 {
1936 	struct uart_port *port = dev_get_drvdata(dev);
1937 	int ret;
1938 
1939 	uart_suspend_port(&stm32_usart_driver, port);
1940 
1941 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1942 		ret = stm32_usart_serial_en_wakeup(port, true);
1943 		if (ret)
1944 			return ret;
1945 	}
1946 
1947 	/*
1948 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
1949 	 * and rely on bootloader stage to restore this state upon resume.
1950 	 * Otherwise, apply the idle or sleep states depending on wakeup
1951 	 * capabilities.
1952 	 */
1953 	if (console_suspend_enabled || !uart_console(port)) {
1954 		if (device_may_wakeup(dev) || device_wakeup_path(dev))
1955 			pinctrl_pm_select_idle_state(dev);
1956 		else
1957 			pinctrl_pm_select_sleep_state(dev);
1958 	}
1959 
1960 	return 0;
1961 }
1962 
1963 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
1964 {
1965 	struct uart_port *port = dev_get_drvdata(dev);
1966 	int ret;
1967 
1968 	pinctrl_pm_select_default_state(dev);
1969 
1970 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1971 		ret = stm32_usart_serial_en_wakeup(port, false);
1972 		if (ret)
1973 			return ret;
1974 	}
1975 
1976 	return uart_resume_port(&stm32_usart_driver, port);
1977 }
1978 
1979 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
1980 {
1981 	struct uart_port *port = dev_get_drvdata(dev);
1982 	struct stm32_port *stm32port = container_of(port,
1983 			struct stm32_port, port);
1984 
1985 	clk_disable_unprepare(stm32port->clk);
1986 
1987 	return 0;
1988 }
1989 
1990 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
1991 {
1992 	struct uart_port *port = dev_get_drvdata(dev);
1993 	struct stm32_port *stm32port = container_of(port,
1994 			struct stm32_port, port);
1995 
1996 	return clk_prepare_enable(stm32port->clk);
1997 }
1998 
1999 static const struct dev_pm_ops stm32_serial_pm_ops = {
2000 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2001 			   stm32_usart_runtime_resume, NULL)
2002 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2003 				stm32_usart_serial_resume)
2004 };
2005 
2006 static struct platform_driver stm32_serial_driver = {
2007 	.probe		= stm32_usart_serial_probe,
2008 	.remove		= stm32_usart_serial_remove,
2009 	.driver	= {
2010 		.name	= DRIVER_NAME,
2011 		.pm	= &stm32_serial_pm_ops,
2012 		.of_match_table = of_match_ptr(stm32_match),
2013 	},
2014 };
2015 
2016 static int __init stm32_usart_init(void)
2017 {
2018 	static char banner[] __initdata = "STM32 USART driver initialized";
2019 	int ret;
2020 
2021 	pr_info("%s\n", banner);
2022 
2023 	ret = uart_register_driver(&stm32_usart_driver);
2024 	if (ret)
2025 		return ret;
2026 
2027 	ret = platform_driver_register(&stm32_serial_driver);
2028 	if (ret)
2029 		uart_unregister_driver(&stm32_usart_driver);
2030 
2031 	return ret;
2032 }
2033 
2034 static void __exit stm32_usart_exit(void)
2035 {
2036 	platform_driver_unregister(&stm32_serial_driver);
2037 	uart_unregister_driver(&stm32_usart_driver);
2038 }
2039 
2040 module_init(stm32_usart_init);
2041 module_exit(stm32_usart_exit);
2042 
2043 MODULE_ALIAS("platform:" DRIVER_NAME);
2044 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2045 MODULE_LICENSE("GPL v2");
2046