xref: /openbmc/linux/drivers/tty/serial/stm32-usart.c (revision c494a447)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  *	     Gerald Baeza <gerald.baeza@foss.st.com>
7  *	     Erwan Le Ray <erwan.leray@foss.st.com>
8  *
9  * Inspired by st-asc.c from STMicroelectronics (c)
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34 
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37 
38 static void stm32_usart_stop_tx(struct uart_port *port);
39 static void stm32_usart_transmit_chars(struct uart_port *port);
40 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
41 
42 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
43 {
44 	return container_of(port, struct stm32_port, port);
45 }
46 
47 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
48 {
49 	u32 val;
50 
51 	val = readl_relaxed(port->membase + reg);
52 	val |= bits;
53 	writel_relaxed(val, port->membase + reg);
54 }
55 
56 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
57 {
58 	u32 val;
59 
60 	val = readl_relaxed(port->membase + reg);
61 	val &= ~bits;
62 	writel_relaxed(val, port->membase + reg);
63 }
64 
65 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
66 					 u32 delay_DDE, u32 baud)
67 {
68 	u32 rs485_deat_dedt;
69 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
70 	bool over8;
71 
72 	*cr3 |= USART_CR3_DEM;
73 	over8 = *cr1 & USART_CR1_OVER8;
74 
75 	if (over8)
76 		rs485_deat_dedt = delay_ADE * baud * 8;
77 	else
78 		rs485_deat_dedt = delay_ADE * baud * 16;
79 
80 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
81 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
82 			  rs485_deat_dedt_max : rs485_deat_dedt;
83 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
84 			   USART_CR1_DEAT_MASK;
85 	*cr1 |= rs485_deat_dedt;
86 
87 	if (over8)
88 		rs485_deat_dedt = delay_DDE * baud * 8;
89 	else
90 		rs485_deat_dedt = delay_DDE * baud * 16;
91 
92 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
93 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
94 			  rs485_deat_dedt_max : rs485_deat_dedt;
95 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
96 			   USART_CR1_DEDT_MASK;
97 	*cr1 |= rs485_deat_dedt;
98 }
99 
100 static int stm32_usart_config_rs485(struct uart_port *port,
101 				    struct serial_rs485 *rs485conf)
102 {
103 	struct stm32_port *stm32_port = to_stm32_port(port);
104 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
105 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
106 	u32 usartdiv, baud, cr1, cr3;
107 	bool over8;
108 
109 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
110 
111 	rs485conf->flags |= SER_RS485_RX_DURING_TX;
112 
113 	if (rs485conf->flags & SER_RS485_ENABLED) {
114 		cr1 = readl_relaxed(port->membase + ofs->cr1);
115 		cr3 = readl_relaxed(port->membase + ofs->cr3);
116 		usartdiv = readl_relaxed(port->membase + ofs->brr);
117 		usartdiv = usartdiv & GENMASK(15, 0);
118 		over8 = cr1 & USART_CR1_OVER8;
119 
120 		if (over8)
121 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
122 				   << USART_BRR_04_R_SHIFT;
123 
124 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
125 		stm32_usart_config_reg_rs485(&cr1, &cr3,
126 					     rs485conf->delay_rts_before_send,
127 					     rs485conf->delay_rts_after_send,
128 					     baud);
129 
130 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
131 			cr3 &= ~USART_CR3_DEP;
132 		else
133 			cr3 |= USART_CR3_DEP;
134 
135 		writel_relaxed(cr3, port->membase + ofs->cr3);
136 		writel_relaxed(cr1, port->membase + ofs->cr1);
137 	} else {
138 		stm32_usart_clr_bits(port, ofs->cr3,
139 				     USART_CR3_DEM | USART_CR3_DEP);
140 		stm32_usart_clr_bits(port, ofs->cr1,
141 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
142 	}
143 
144 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
145 
146 	return 0;
147 }
148 
149 static int stm32_usart_init_rs485(struct uart_port *port,
150 				  struct platform_device *pdev)
151 {
152 	struct serial_rs485 *rs485conf = &port->rs485;
153 
154 	rs485conf->flags = 0;
155 	rs485conf->delay_rts_before_send = 0;
156 	rs485conf->delay_rts_after_send = 0;
157 
158 	if (!pdev->dev.of_node)
159 		return -ENODEV;
160 
161 	return uart_get_rs485_mode(port);
162 }
163 
164 static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
165 {
166 	struct stm32_port *stm32_port = to_stm32_port(port);
167 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
168 
169 	if (!stm32_port->rx_ch)
170 		return false;
171 
172 	return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
173 }
174 
175 /* Return true when data is pending (in pio mode), and false when no data is pending. */
176 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
177 {
178 	struct stm32_port *stm32_port = to_stm32_port(port);
179 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
180 
181 	*sr = readl_relaxed(port->membase + ofs->isr);
182 	/* Get pending characters in RDR or FIFO */
183 	if (*sr & USART_SR_RXNE) {
184 		/* Get all pending characters from the RDR or the FIFO when using interrupts */
185 		if (!stm32_usart_rx_dma_enabled(port))
186 			return true;
187 
188 		/* Handle only RX data errors when using DMA */
189 		if (*sr & USART_SR_ERR_MASK)
190 			return true;
191 	}
192 
193 	return false;
194 }
195 
196 static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
197 {
198 	struct stm32_port *stm32_port = to_stm32_port(port);
199 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
200 	unsigned long c;
201 
202 	c = readl_relaxed(port->membase + ofs->rdr);
203 	/* Apply RDR data mask */
204 	c &= stm32_port->rdr_mask;
205 
206 	return c;
207 }
208 
209 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
210 {
211 	struct stm32_port *stm32_port = to_stm32_port(port);
212 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
213 	unsigned long c;
214 	unsigned int size = 0;
215 	u32 sr;
216 	char flag;
217 
218 	while (stm32_usart_pending_rx_pio(port, &sr)) {
219 		sr |= USART_SR_DUMMY_RX;
220 		flag = TTY_NORMAL;
221 
222 		/*
223 		 * Status bits has to be cleared before reading the RDR:
224 		 * In FIFO mode, reading the RDR will pop the next data
225 		 * (if any) along with its status bits into the SR.
226 		 * Not doing so leads to misalignement between RDR and SR,
227 		 * and clear status bits of the next rx data.
228 		 *
229 		 * Clear errors flags for stm32f7 and stm32h7 compatible
230 		 * devices. On stm32f4 compatible devices, the error bit is
231 		 * cleared by the sequence [read SR - read DR].
232 		 */
233 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
234 			writel_relaxed(sr & USART_SR_ERR_MASK,
235 				       port->membase + ofs->icr);
236 
237 		c = stm32_usart_get_char_pio(port);
238 		port->icount.rx++;
239 		size++;
240 		if (sr & USART_SR_ERR_MASK) {
241 			if (sr & USART_SR_ORE) {
242 				port->icount.overrun++;
243 			} else if (sr & USART_SR_PE) {
244 				port->icount.parity++;
245 			} else if (sr & USART_SR_FE) {
246 				/* Break detection if character is null */
247 				if (!c) {
248 					port->icount.brk++;
249 					if (uart_handle_break(port))
250 						continue;
251 				} else {
252 					port->icount.frame++;
253 				}
254 			}
255 
256 			sr &= port->read_status_mask;
257 
258 			if (sr & USART_SR_PE) {
259 				flag = TTY_PARITY;
260 			} else if (sr & USART_SR_FE) {
261 				if (!c)
262 					flag = TTY_BREAK;
263 				else
264 					flag = TTY_FRAME;
265 			}
266 		}
267 
268 		if (uart_prepare_sysrq_char(port, c))
269 			continue;
270 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
271 	}
272 
273 	return size;
274 }
275 
276 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
277 {
278 	struct stm32_port *stm32_port = to_stm32_port(port);
279 	struct tty_port *ttyport = &stm32_port->port.state->port;
280 	unsigned char *dma_start;
281 	int dma_count, i;
282 
283 	dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
284 
285 	/*
286 	 * Apply rdr_mask on buffer in order to mask parity bit.
287 	 * This loop is useless in cs8 mode because DMA copies only
288 	 * 8 bits and already ignores parity bit.
289 	 */
290 	if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
291 		for (i = 0; i < dma_size; i++)
292 			*(dma_start + i) &= stm32_port->rdr_mask;
293 
294 	dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
295 	port->icount.rx += dma_count;
296 	if (dma_count != dma_size)
297 		port->icount.buf_overrun++;
298 	stm32_port->last_res -= dma_count;
299 	if (stm32_port->last_res == 0)
300 		stm32_port->last_res = RX_BUF_L;
301 }
302 
303 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
304 {
305 	struct stm32_port *stm32_port = to_stm32_port(port);
306 	unsigned int dma_size, size = 0;
307 
308 	/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
309 	if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
310 		/* Conditional first part: from last_res to end of DMA buffer */
311 		dma_size = stm32_port->last_res;
312 		stm32_usart_push_buffer_dma(port, dma_size);
313 		size = dma_size;
314 	}
315 
316 	dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
317 	stm32_usart_push_buffer_dma(port, dma_size);
318 	size += dma_size;
319 
320 	return size;
321 }
322 
323 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
324 {
325 	struct stm32_port *stm32_port = to_stm32_port(port);
326 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
327 	enum dma_status rx_dma_status;
328 	u32 sr;
329 	unsigned int size = 0;
330 
331 	if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) {
332 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
333 						    stm32_port->rx_ch->cookie,
334 						    &stm32_port->rx_dma_state);
335 		if (rx_dma_status == DMA_IN_PROGRESS) {
336 			/* Empty DMA buffer */
337 			size = stm32_usart_receive_chars_dma(port);
338 			sr = readl_relaxed(port->membase + ofs->isr);
339 			if (sr & USART_SR_ERR_MASK) {
340 				/* Disable DMA request line */
341 				stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
342 
343 				/* Switch to PIO mode to handle the errors */
344 				size += stm32_usart_receive_chars_pio(port);
345 
346 				/* Switch back to DMA mode */
347 				stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
348 			}
349 		} else {
350 			/* Disable RX DMA */
351 			dmaengine_terminate_async(stm32_port->rx_ch);
352 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
353 			/* Fall back to interrupt mode */
354 			dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
355 			size = stm32_usart_receive_chars_pio(port);
356 		}
357 	} else {
358 		size = stm32_usart_receive_chars_pio(port);
359 	}
360 
361 	return size;
362 }
363 
364 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
365 {
366 	dmaengine_terminate_async(stm32_port->tx_ch);
367 	stm32_port->tx_dma_busy = false;
368 }
369 
370 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
371 {
372 	/*
373 	 * We cannot use the function "dmaengine_tx_status" to know the
374 	 * status of DMA. This function does not show if the "dma complete"
375 	 * callback of the DMA transaction has been called. So we prefer
376 	 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
377 	 * same time.
378 	 */
379 	return stm32_port->tx_dma_busy;
380 }
381 
382 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
383 {
384 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
385 
386 	return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
387 }
388 
389 static void stm32_usart_tx_dma_complete(void *arg)
390 {
391 	struct uart_port *port = arg;
392 	struct stm32_port *stm32port = to_stm32_port(port);
393 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
394 	unsigned long flags;
395 
396 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
397 	stm32_usart_tx_dma_terminate(stm32port);
398 
399 	/* Let's see if we have pending data to send */
400 	spin_lock_irqsave(&port->lock, flags);
401 	stm32_usart_transmit_chars(port);
402 	spin_unlock_irqrestore(&port->lock, flags);
403 }
404 
405 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
406 {
407 	struct stm32_port *stm32_port = to_stm32_port(port);
408 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
409 
410 	/*
411 	 * Enables TX FIFO threashold irq when FIFO is enabled,
412 	 * or TX empty irq when FIFO is disabled
413 	 */
414 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
415 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
416 	else
417 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
418 }
419 
420 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
421 {
422 	struct stm32_port *stm32_port = to_stm32_port(port);
423 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
424 
425 	stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
426 }
427 
428 static void stm32_usart_rx_dma_complete(void *arg)
429 {
430 	struct uart_port *port = arg;
431 	struct tty_port *tport = &port->state->port;
432 	unsigned int size;
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&port->lock, flags);
436 	size = stm32_usart_receive_chars(port, false);
437 	uart_unlock_and_check_sysrq_irqrestore(port, flags);
438 	if (size)
439 		tty_flip_buffer_push(tport);
440 }
441 
442 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
443 {
444 	struct stm32_port *stm32_port = to_stm32_port(port);
445 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
446 
447 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
448 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
449 	else
450 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
451 }
452 
453 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
454 {
455 	struct stm32_port *stm32_port = to_stm32_port(port);
456 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
457 
458 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
459 }
460 
461 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
462 {
463 	struct stm32_port *stm32_port = to_stm32_port(port);
464 	struct serial_rs485 *rs485conf = &port->rs485;
465 
466 	if (stm32_port->hw_flow_control ||
467 	    !(rs485conf->flags & SER_RS485_ENABLED))
468 		return;
469 
470 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
471 		mctrl_gpio_set(stm32_port->gpios,
472 			       stm32_port->port.mctrl | TIOCM_RTS);
473 	} else {
474 		mctrl_gpio_set(stm32_port->gpios,
475 			       stm32_port->port.mctrl & ~TIOCM_RTS);
476 	}
477 }
478 
479 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
480 {
481 	struct stm32_port *stm32_port = to_stm32_port(port);
482 	struct serial_rs485 *rs485conf = &port->rs485;
483 
484 	if (stm32_port->hw_flow_control ||
485 	    !(rs485conf->flags & SER_RS485_ENABLED))
486 		return;
487 
488 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
489 		mctrl_gpio_set(stm32_port->gpios,
490 			       stm32_port->port.mctrl & ~TIOCM_RTS);
491 	} else {
492 		mctrl_gpio_set(stm32_port->gpios,
493 			       stm32_port->port.mctrl | TIOCM_RTS);
494 	}
495 }
496 
497 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
498 {
499 	struct stm32_port *stm32_port = to_stm32_port(port);
500 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
501 	struct circ_buf *xmit = &port->state->xmit;
502 
503 	if (stm32_usart_tx_dma_enabled(stm32_port))
504 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
505 
506 	while (!uart_circ_empty(xmit)) {
507 		/* Check that TDR is empty before filling FIFO */
508 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
509 			break;
510 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
511 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
512 		port->icount.tx++;
513 	}
514 
515 	/* rely on TXE irq (mask or unmask) for sending remaining data */
516 	if (uart_circ_empty(xmit))
517 		stm32_usart_tx_interrupt_disable(port);
518 	else
519 		stm32_usart_tx_interrupt_enable(port);
520 }
521 
522 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
523 {
524 	struct stm32_port *stm32port = to_stm32_port(port);
525 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
526 	struct circ_buf *xmit = &port->state->xmit;
527 	struct dma_async_tx_descriptor *desc = NULL;
528 	unsigned int count;
529 
530 	if (stm32_usart_tx_dma_started(stm32port)) {
531 		if (!stm32_usart_tx_dma_enabled(stm32port))
532 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
533 		return;
534 	}
535 
536 	count = uart_circ_chars_pending(xmit);
537 
538 	if (count > TX_BUF_L)
539 		count = TX_BUF_L;
540 
541 	if (xmit->tail < xmit->head) {
542 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
543 	} else {
544 		size_t one = UART_XMIT_SIZE - xmit->tail;
545 		size_t two;
546 
547 		if (one > count)
548 			one = count;
549 		two = count - one;
550 
551 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
552 		if (two)
553 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
554 	}
555 
556 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
557 					   stm32port->tx_dma_buf,
558 					   count,
559 					   DMA_MEM_TO_DEV,
560 					   DMA_PREP_INTERRUPT);
561 
562 	if (!desc)
563 		goto fallback_err;
564 
565 	/*
566 	 * Set "tx_dma_busy" flag. This flag will be released when
567 	 * dmaengine_terminate_async will be called. This flag helps
568 	 * transmit_chars_dma not to start another DMA transaction
569 	 * if the callback of the previous is not yet called.
570 	 */
571 	stm32port->tx_dma_busy = true;
572 
573 	desc->callback = stm32_usart_tx_dma_complete;
574 	desc->callback_param = port;
575 
576 	/* Push current DMA TX transaction in the pending queue */
577 	if (dma_submit_error(dmaengine_submit(desc))) {
578 		/* dma no yet started, safe to free resources */
579 		stm32_usart_tx_dma_terminate(stm32port);
580 		goto fallback_err;
581 	}
582 
583 	/* Issue pending DMA TX requests */
584 	dma_async_issue_pending(stm32port->tx_ch);
585 
586 	stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
587 
588 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
589 	port->icount.tx += count;
590 	return;
591 
592 fallback_err:
593 	stm32_usart_transmit_chars_pio(port);
594 }
595 
596 static void stm32_usart_transmit_chars(struct uart_port *port)
597 {
598 	struct stm32_port *stm32_port = to_stm32_port(port);
599 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
600 	struct circ_buf *xmit = &port->state->xmit;
601 	u32 isr;
602 	int ret;
603 
604 	if (!stm32_port->hw_flow_control &&
605 	    port->rs485.flags & SER_RS485_ENABLED) {
606 		stm32_port->txdone = false;
607 		stm32_usart_tc_interrupt_disable(port);
608 		stm32_usart_rs485_rts_enable(port);
609 	}
610 
611 	if (port->x_char) {
612 		if (stm32_usart_tx_dma_started(stm32_port) &&
613 		    stm32_usart_tx_dma_enabled(stm32_port))
614 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
615 
616 		/* Check that TDR is empty before filling FIFO */
617 		ret =
618 		readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
619 						  isr,
620 						  (isr & USART_SR_TXE),
621 						  10, 1000);
622 		if (ret)
623 			dev_warn(port->dev, "1 character may be erased\n");
624 
625 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
626 		port->x_char = 0;
627 		port->icount.tx++;
628 		if (stm32_usart_tx_dma_started(stm32_port))
629 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
630 		return;
631 	}
632 
633 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
634 		stm32_usart_tx_interrupt_disable(port);
635 		return;
636 	}
637 
638 	if (ofs->icr == UNDEF_REG)
639 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
640 	else
641 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
642 
643 	if (stm32_port->tx_ch)
644 		stm32_usart_transmit_chars_dma(port);
645 	else
646 		stm32_usart_transmit_chars_pio(port);
647 
648 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
649 		uart_write_wakeup(port);
650 
651 	if (uart_circ_empty(xmit)) {
652 		stm32_usart_tx_interrupt_disable(port);
653 		if (!stm32_port->hw_flow_control &&
654 		    port->rs485.flags & SER_RS485_ENABLED) {
655 			stm32_port->txdone = true;
656 			stm32_usart_tc_interrupt_enable(port);
657 		}
658 	}
659 }
660 
661 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
662 {
663 	struct uart_port *port = ptr;
664 	struct tty_port *tport = &port->state->port;
665 	struct stm32_port *stm32_port = to_stm32_port(port);
666 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
667 	u32 sr;
668 	unsigned int size;
669 
670 	sr = readl_relaxed(port->membase + ofs->isr);
671 
672 	if (!stm32_port->hw_flow_control &&
673 	    port->rs485.flags & SER_RS485_ENABLED &&
674 	    (sr & USART_SR_TC)) {
675 		stm32_usart_tc_interrupt_disable(port);
676 		stm32_usart_rs485_rts_disable(port);
677 	}
678 
679 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
680 		writel_relaxed(USART_ICR_RTOCF,
681 			       port->membase + ofs->icr);
682 
683 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
684 		/* Clear wake up flag and disable wake up interrupt */
685 		writel_relaxed(USART_ICR_WUCF,
686 			       port->membase + ofs->icr);
687 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
688 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
689 			pm_wakeup_event(tport->tty->dev, 0);
690 	}
691 
692 	/*
693 	 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
694 	 * line has been masked by HW and rx data are stacking in FIFO.
695 	 */
696 	if (!stm32_port->throttled) {
697 		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
698 		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) {
699 			spin_lock(&port->lock);
700 			size = stm32_usart_receive_chars(port, false);
701 			uart_unlock_and_check_sysrq(port);
702 			if (size)
703 				tty_flip_buffer_push(tport);
704 		}
705 	}
706 
707 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
708 		spin_lock(&port->lock);
709 		stm32_usart_transmit_chars(port);
710 		spin_unlock(&port->lock);
711 	}
712 
713 	if (stm32_usart_rx_dma_enabled(port))
714 		return IRQ_WAKE_THREAD;
715 	else
716 		return IRQ_HANDLED;
717 }
718 
719 static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
720 {
721 	struct uart_port *port = ptr;
722 	struct tty_port *tport = &port->state->port;
723 	struct stm32_port *stm32_port = to_stm32_port(port);
724 	unsigned int size;
725 	unsigned long flags;
726 
727 	/* Receiver timeout irq for DMA RX */
728 	if (!stm32_port->throttled) {
729 		spin_lock_irqsave(&port->lock, flags);
730 		size = stm32_usart_receive_chars(port, false);
731 		uart_unlock_and_check_sysrq_irqrestore(port, flags);
732 		if (size)
733 			tty_flip_buffer_push(tport);
734 	}
735 
736 	return IRQ_HANDLED;
737 }
738 
739 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
740 {
741 	struct stm32_port *stm32_port = to_stm32_port(port);
742 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
743 
744 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
745 		return TIOCSER_TEMT;
746 
747 	return 0;
748 }
749 
750 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
751 {
752 	struct stm32_port *stm32_port = to_stm32_port(port);
753 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
754 
755 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
756 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
757 	else
758 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
759 
760 	mctrl_gpio_set(stm32_port->gpios, mctrl);
761 }
762 
763 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
764 {
765 	struct stm32_port *stm32_port = to_stm32_port(port);
766 	unsigned int ret;
767 
768 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
769 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
770 
771 	return mctrl_gpio_get(stm32_port->gpios, &ret);
772 }
773 
774 static void stm32_usart_enable_ms(struct uart_port *port)
775 {
776 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
777 }
778 
779 static void stm32_usart_disable_ms(struct uart_port *port)
780 {
781 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
782 }
783 
784 /* Transmit stop */
785 static void stm32_usart_stop_tx(struct uart_port *port)
786 {
787 	struct stm32_port *stm32_port = to_stm32_port(port);
788 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
789 
790 	stm32_usart_tx_interrupt_disable(port);
791 	if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
792 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
793 
794 	stm32_usart_rs485_rts_disable(port);
795 }
796 
797 /* There are probably characters waiting to be transmitted. */
798 static void stm32_usart_start_tx(struct uart_port *port)
799 {
800 	struct circ_buf *xmit = &port->state->xmit;
801 
802 	if (uart_circ_empty(xmit) && !port->x_char) {
803 		stm32_usart_rs485_rts_disable(port);
804 		return;
805 	}
806 
807 	stm32_usart_rs485_rts_enable(port);
808 
809 	stm32_usart_transmit_chars(port);
810 }
811 
812 /* Flush the transmit buffer. */
813 static void stm32_usart_flush_buffer(struct uart_port *port)
814 {
815 	struct stm32_port *stm32_port = to_stm32_port(port);
816 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
817 
818 	if (stm32_port->tx_ch) {
819 		stm32_usart_tx_dma_terminate(stm32_port);
820 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
821 	}
822 }
823 
824 /* Throttle the remote when input buffer is about to overflow. */
825 static void stm32_usart_throttle(struct uart_port *port)
826 {
827 	struct stm32_port *stm32_port = to_stm32_port(port);
828 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&port->lock, flags);
832 
833 	/*
834 	 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
835 	 * Hardware flow control is triggered when RX FIFO is full.
836 	 */
837 	if (stm32_usart_rx_dma_enabled(port))
838 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
839 
840 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
841 	if (stm32_port->cr3_irq)
842 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
843 
844 	stm32_port->throttled = true;
845 	spin_unlock_irqrestore(&port->lock, flags);
846 }
847 
848 /* Unthrottle the remote, the input buffer can now accept data. */
849 static void stm32_usart_unthrottle(struct uart_port *port)
850 {
851 	struct stm32_port *stm32_port = to_stm32_port(port);
852 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&port->lock, flags);
856 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
857 	if (stm32_port->cr3_irq)
858 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
859 
860 	/*
861 	 * Switch back to DMA mode (re-enable DMA request line).
862 	 * Hardware flow control is stopped when FIFO is not full any more.
863 	 */
864 	if (stm32_port->rx_ch)
865 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
866 
867 	stm32_port->throttled = false;
868 	spin_unlock_irqrestore(&port->lock, flags);
869 }
870 
871 /* Receive stop */
872 static void stm32_usart_stop_rx(struct uart_port *port)
873 {
874 	struct stm32_port *stm32_port = to_stm32_port(port);
875 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
876 
877 	/* Disable DMA request line. */
878 	if (stm32_port->rx_ch)
879 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
880 
881 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
882 	if (stm32_port->cr3_irq)
883 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
884 }
885 
886 /* Handle breaks - ignored by us */
887 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
888 {
889 }
890 
891 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
892 {
893 	struct stm32_port *stm32_port = to_stm32_port(port);
894 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
895 	struct dma_async_tx_descriptor *desc;
896 	int ret;
897 
898 	stm32_port->last_res = RX_BUF_L;
899 	/* Prepare a DMA cyclic transaction */
900 	desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
901 					 stm32_port->rx_dma_buf,
902 					 RX_BUF_L, RX_BUF_P,
903 					 DMA_DEV_TO_MEM,
904 					 DMA_PREP_INTERRUPT);
905 	if (!desc) {
906 		dev_err(port->dev, "rx dma prep cyclic failed\n");
907 		return -ENODEV;
908 	}
909 
910 	desc->callback = stm32_usart_rx_dma_complete;
911 	desc->callback_param = port;
912 
913 	/* Push current DMA transaction in the pending queue */
914 	ret = dma_submit_error(dmaengine_submit(desc));
915 	if (ret) {
916 		dmaengine_terminate_sync(stm32_port->rx_ch);
917 		return ret;
918 	}
919 
920 	/* Issue pending DMA requests */
921 	dma_async_issue_pending(stm32_port->rx_ch);
922 
923 	/*
924 	 * DMA request line not re-enabled at resume when port is throttled.
925 	 * It will be re-enabled by unthrottle ops.
926 	 */
927 	if (!stm32_port->throttled)
928 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
929 
930 	return 0;
931 }
932 
933 static int stm32_usart_startup(struct uart_port *port)
934 {
935 	struct stm32_port *stm32_port = to_stm32_port(port);
936 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
937 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
938 	const char *name = to_platform_device(port->dev)->name;
939 	u32 val;
940 	int ret;
941 
942 	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
943 				   stm32_usart_threaded_interrupt,
944 				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
945 				   name, port);
946 	if (ret)
947 		return ret;
948 
949 	if (stm32_port->swap) {
950 		val = readl_relaxed(port->membase + ofs->cr2);
951 		val |= USART_CR2_SWAP;
952 		writel_relaxed(val, port->membase + ofs->cr2);
953 	}
954 
955 	/* RX FIFO Flush */
956 	if (ofs->rqr != UNDEF_REG)
957 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
958 
959 	if (stm32_port->rx_ch) {
960 		ret = stm32_usart_start_rx_dma_cyclic(port);
961 		if (ret) {
962 			free_irq(port->irq, port);
963 			return ret;
964 		}
965 	}
966 
967 	/* RX enabling */
968 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
969 	stm32_usart_set_bits(port, ofs->cr1, val);
970 
971 	return 0;
972 }
973 
974 static void stm32_usart_shutdown(struct uart_port *port)
975 {
976 	struct stm32_port *stm32_port = to_stm32_port(port);
977 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
978 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
979 	u32 val, isr;
980 	int ret;
981 
982 	if (stm32_usart_tx_dma_enabled(stm32_port))
983 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
984 
985 	if (stm32_usart_tx_dma_started(stm32_port))
986 		stm32_usart_tx_dma_terminate(stm32_port);
987 
988 	/* Disable modem control interrupts */
989 	stm32_usart_disable_ms(port);
990 
991 	val = USART_CR1_TXEIE | USART_CR1_TE;
992 	val |= stm32_port->cr1_irq | USART_CR1_RE;
993 	val |= BIT(cfg->uart_enable_bit);
994 	if (stm32_port->fifoen)
995 		val |= USART_CR1_FIFOEN;
996 
997 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
998 					 isr, (isr & USART_SR_TC),
999 					 10, 100000);
1000 
1001 	/* Send the TC error message only when ISR_TC is not set */
1002 	if (ret)
1003 		dev_err(port->dev, "Transmission is not complete\n");
1004 
1005 	/* Disable RX DMA. */
1006 	if (stm32_port->rx_ch)
1007 		dmaengine_terminate_async(stm32_port->rx_ch);
1008 
1009 	/* flush RX & TX FIFO */
1010 	if (ofs->rqr != UNDEF_REG)
1011 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1012 			       port->membase + ofs->rqr);
1013 
1014 	stm32_usart_clr_bits(port, ofs->cr1, val);
1015 
1016 	free_irq(port->irq, port);
1017 }
1018 
1019 static void stm32_usart_set_termios(struct uart_port *port,
1020 				    struct ktermios *termios,
1021 				    struct ktermios *old)
1022 {
1023 	struct stm32_port *stm32_port = to_stm32_port(port);
1024 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1025 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1026 	struct serial_rs485 *rs485conf = &port->rs485;
1027 	unsigned int baud, bits;
1028 	u32 usartdiv, mantissa, fraction, oversampling;
1029 	tcflag_t cflag = termios->c_cflag;
1030 	u32 cr1, cr2, cr3, isr;
1031 	unsigned long flags;
1032 	int ret;
1033 
1034 	if (!stm32_port->hw_flow_control)
1035 		cflag &= ~CRTSCTS;
1036 
1037 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1038 
1039 	spin_lock_irqsave(&port->lock, flags);
1040 
1041 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1042 						isr,
1043 						(isr & USART_SR_TC),
1044 						10, 100000);
1045 
1046 	/* Send the TC error message only when ISR_TC is not set. */
1047 	if (ret)
1048 		dev_err(port->dev, "Transmission is not complete\n");
1049 
1050 	/* Stop serial port and reset value */
1051 	writel_relaxed(0, port->membase + ofs->cr1);
1052 
1053 	/* flush RX & TX FIFO */
1054 	if (ofs->rqr != UNDEF_REG)
1055 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1056 			       port->membase + ofs->rqr);
1057 
1058 	cr1 = USART_CR1_TE | USART_CR1_RE;
1059 	if (stm32_port->fifoen)
1060 		cr1 |= USART_CR1_FIFOEN;
1061 	cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1062 
1063 	/* Tx and RX FIFO configuration */
1064 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1065 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1066 	if (stm32_port->fifoen) {
1067 		if (stm32_port->txftcfg >= 0)
1068 			cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1069 		if (stm32_port->rxftcfg >= 0)
1070 			cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1071 	}
1072 
1073 	if (cflag & CSTOPB)
1074 		cr2 |= USART_CR2_STOP_2B;
1075 
1076 	bits = tty_get_char_size(cflag);
1077 	stm32_port->rdr_mask = (BIT(bits) - 1);
1078 
1079 	if (cflag & PARENB) {
1080 		bits++;
1081 		cr1 |= USART_CR1_PCE;
1082 	}
1083 
1084 	/*
1085 	 * Word length configuration:
1086 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1087 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1088 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1089 	 * M0 and M1 already cleared by cr1 initialization.
1090 	 */
1091 	if (bits == 9) {
1092 		cr1 |= USART_CR1_M0;
1093 	} else if ((bits == 7) && cfg->has_7bits_data) {
1094 		cr1 |= USART_CR1_M1;
1095 	} else if (bits != 8) {
1096 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1097 			, bits);
1098 		cflag &= ~CSIZE;
1099 		cflag |= CS8;
1100 		termios->c_cflag = cflag;
1101 		bits = 8;
1102 		if (cflag & PARENB) {
1103 			bits++;
1104 			cr1 |= USART_CR1_M0;
1105 		}
1106 	}
1107 
1108 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1109 				       (stm32_port->fifoen &&
1110 					stm32_port->rxftcfg >= 0))) {
1111 		if (cflag & CSTOPB)
1112 			bits = bits + 3; /* 1 start bit + 2 stop bits */
1113 		else
1114 			bits = bits + 2; /* 1 start bit + 1 stop bit */
1115 
1116 		/* RX timeout irq to occur after last stop bit + bits */
1117 		stm32_port->cr1_irq = USART_CR1_RTOIE;
1118 		writel_relaxed(bits, port->membase + ofs->rtor);
1119 		cr2 |= USART_CR2_RTOEN;
1120 		/*
1121 		 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1122 		 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1123 		 */
1124 		stm32_port->cr3_irq =  USART_CR3_RXFTIE;
1125 	}
1126 
1127 	cr1 |= stm32_port->cr1_irq;
1128 	cr3 |= stm32_port->cr3_irq;
1129 
1130 	if (cflag & PARODD)
1131 		cr1 |= USART_CR1_PS;
1132 
1133 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1134 	if (cflag & CRTSCTS) {
1135 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1136 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1137 	}
1138 
1139 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1140 
1141 	/*
1142 	 * The USART supports 16 or 8 times oversampling.
1143 	 * By default we prefer 16 times oversampling, so that the receiver
1144 	 * has a better tolerance to clock deviations.
1145 	 * 8 times oversampling is only used to achieve higher speeds.
1146 	 */
1147 	if (usartdiv < 16) {
1148 		oversampling = 8;
1149 		cr1 |= USART_CR1_OVER8;
1150 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1151 	} else {
1152 		oversampling = 16;
1153 		cr1 &= ~USART_CR1_OVER8;
1154 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1155 	}
1156 
1157 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1158 	fraction = usartdiv % oversampling;
1159 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1160 
1161 	uart_update_timeout(port, cflag, baud);
1162 
1163 	port->read_status_mask = USART_SR_ORE;
1164 	if (termios->c_iflag & INPCK)
1165 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1166 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1167 		port->read_status_mask |= USART_SR_FE;
1168 
1169 	/* Characters to ignore */
1170 	port->ignore_status_mask = 0;
1171 	if (termios->c_iflag & IGNPAR)
1172 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1173 	if (termios->c_iflag & IGNBRK) {
1174 		port->ignore_status_mask |= USART_SR_FE;
1175 		/*
1176 		 * If we're ignoring parity and break indicators,
1177 		 * ignore overruns too (for real raw support).
1178 		 */
1179 		if (termios->c_iflag & IGNPAR)
1180 			port->ignore_status_mask |= USART_SR_ORE;
1181 	}
1182 
1183 	/* Ignore all characters if CREAD is not set */
1184 	if ((termios->c_cflag & CREAD) == 0)
1185 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
1186 
1187 	if (stm32_port->rx_ch) {
1188 		/*
1189 		 * Setup DMA to collect only valid data and enable error irqs.
1190 		 * This also enables break reception when using DMA.
1191 		 */
1192 		cr1 |= USART_CR1_PEIE;
1193 		cr3 |= USART_CR3_EIE;
1194 		cr3 |= USART_CR3_DMAR;
1195 		cr3 |= USART_CR3_DDRE;
1196 	}
1197 
1198 	if (rs485conf->flags & SER_RS485_ENABLED) {
1199 		stm32_usart_config_reg_rs485(&cr1, &cr3,
1200 					     rs485conf->delay_rts_before_send,
1201 					     rs485conf->delay_rts_after_send,
1202 					     baud);
1203 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1204 			cr3 &= ~USART_CR3_DEP;
1205 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1206 		} else {
1207 			cr3 |= USART_CR3_DEP;
1208 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1209 		}
1210 
1211 	} else {
1212 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1213 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1214 	}
1215 
1216 	/* Configure wake up from low power on start bit detection */
1217 	if (stm32_port->wakeup_src) {
1218 		cr3 &= ~USART_CR3_WUS_MASK;
1219 		cr3 |= USART_CR3_WUS_START_BIT;
1220 	}
1221 
1222 	writel_relaxed(cr3, port->membase + ofs->cr3);
1223 	writel_relaxed(cr2, port->membase + ofs->cr2);
1224 	writel_relaxed(cr1, port->membase + ofs->cr1);
1225 
1226 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1227 	spin_unlock_irqrestore(&port->lock, flags);
1228 
1229 	/* Handle modem control interrupts */
1230 	if (UART_ENABLE_MS(port, termios->c_cflag))
1231 		stm32_usart_enable_ms(port);
1232 	else
1233 		stm32_usart_disable_ms(port);
1234 }
1235 
1236 static const char *stm32_usart_type(struct uart_port *port)
1237 {
1238 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1239 }
1240 
1241 static void stm32_usart_release_port(struct uart_port *port)
1242 {
1243 }
1244 
1245 static int stm32_usart_request_port(struct uart_port *port)
1246 {
1247 	return 0;
1248 }
1249 
1250 static void stm32_usart_config_port(struct uart_port *port, int flags)
1251 {
1252 	if (flags & UART_CONFIG_TYPE)
1253 		port->type = PORT_STM32;
1254 }
1255 
1256 static int
1257 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1258 {
1259 	/* No user changeable parameters */
1260 	return -EINVAL;
1261 }
1262 
1263 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1264 			   unsigned int oldstate)
1265 {
1266 	struct stm32_port *stm32port = container_of(port,
1267 			struct stm32_port, port);
1268 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1269 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1270 	unsigned long flags;
1271 
1272 	switch (state) {
1273 	case UART_PM_STATE_ON:
1274 		pm_runtime_get_sync(port->dev);
1275 		break;
1276 	case UART_PM_STATE_OFF:
1277 		spin_lock_irqsave(&port->lock, flags);
1278 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1279 		spin_unlock_irqrestore(&port->lock, flags);
1280 		pm_runtime_put_sync(port->dev);
1281 		break;
1282 	}
1283 }
1284 
1285 #if defined(CONFIG_CONSOLE_POLL)
1286 
1287  /* Callbacks for characters polling in debug context (i.e. KGDB). */
1288 static int stm32_usart_poll_init(struct uart_port *port)
1289 {
1290 	struct stm32_port *stm32_port = to_stm32_port(port);
1291 
1292 	return clk_prepare_enable(stm32_port->clk);
1293 }
1294 
1295 static int stm32_usart_poll_get_char(struct uart_port *port)
1296 {
1297 	struct stm32_port *stm32_port = to_stm32_port(port);
1298 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1299 
1300 	if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1301 		return NO_POLL_CHAR;
1302 
1303 	return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1304 }
1305 
1306 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1307 {
1308 	stm32_usart_console_putchar(port, ch);
1309 }
1310 #endif /* CONFIG_CONSOLE_POLL */
1311 
1312 static const struct uart_ops stm32_uart_ops = {
1313 	.tx_empty	= stm32_usart_tx_empty,
1314 	.set_mctrl	= stm32_usart_set_mctrl,
1315 	.get_mctrl	= stm32_usart_get_mctrl,
1316 	.stop_tx	= stm32_usart_stop_tx,
1317 	.start_tx	= stm32_usart_start_tx,
1318 	.throttle	= stm32_usart_throttle,
1319 	.unthrottle	= stm32_usart_unthrottle,
1320 	.stop_rx	= stm32_usart_stop_rx,
1321 	.enable_ms	= stm32_usart_enable_ms,
1322 	.break_ctl	= stm32_usart_break_ctl,
1323 	.startup	= stm32_usart_startup,
1324 	.shutdown	= stm32_usart_shutdown,
1325 	.flush_buffer	= stm32_usart_flush_buffer,
1326 	.set_termios	= stm32_usart_set_termios,
1327 	.pm		= stm32_usart_pm,
1328 	.type		= stm32_usart_type,
1329 	.release_port	= stm32_usart_release_port,
1330 	.request_port	= stm32_usart_request_port,
1331 	.config_port	= stm32_usart_config_port,
1332 	.verify_port	= stm32_usart_verify_port,
1333 #if defined(CONFIG_CONSOLE_POLL)
1334 	.poll_init      = stm32_usart_poll_init,
1335 	.poll_get_char	= stm32_usart_poll_get_char,
1336 	.poll_put_char	= stm32_usart_poll_put_char,
1337 #endif /* CONFIG_CONSOLE_POLL */
1338 };
1339 
1340 /*
1341  * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1342  * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1343  * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1344  * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1345  */
1346 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1347 
1348 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1349 				  int *ftcfg)
1350 {
1351 	u32 bytes, i;
1352 
1353 	/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1354 	if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1355 		bytes = 8;
1356 
1357 	for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1358 		if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1359 			break;
1360 	if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1361 		i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1362 
1363 	dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1364 		stm32h7_usart_fifo_thresh_cfg[i]);
1365 
1366 	/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1367 	if (i)
1368 		*ftcfg = i - 1;
1369 	else
1370 		*ftcfg = -EINVAL;
1371 }
1372 
1373 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1374 {
1375 	clk_disable_unprepare(stm32port->clk);
1376 }
1377 
1378 static int stm32_usart_init_port(struct stm32_port *stm32port,
1379 				 struct platform_device *pdev)
1380 {
1381 	struct uart_port *port = &stm32port->port;
1382 	struct resource *res;
1383 	int ret, irq;
1384 
1385 	irq = platform_get_irq(pdev, 0);
1386 	if (irq < 0)
1387 		return irq;
1388 
1389 	port->iotype	= UPIO_MEM;
1390 	port->flags	= UPF_BOOT_AUTOCONF;
1391 	port->ops	= &stm32_uart_ops;
1392 	port->dev	= &pdev->dev;
1393 	port->fifosize	= stm32port->info->cfg.fifosize;
1394 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1395 	port->irq = irq;
1396 	port->rs485_config = stm32_usart_config_rs485;
1397 
1398 	ret = stm32_usart_init_rs485(port, pdev);
1399 	if (ret)
1400 		return ret;
1401 
1402 	stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1403 		of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1404 
1405 	stm32port->swap = stm32port->info->cfg.has_swap &&
1406 		of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1407 
1408 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
1409 	if (stm32port->fifoen) {
1410 		stm32_usart_get_ftcfg(pdev, "rx-threshold",
1411 				      &stm32port->rxftcfg);
1412 		stm32_usart_get_ftcfg(pdev, "tx-threshold",
1413 				      &stm32port->txftcfg);
1414 	}
1415 
1416 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1417 	if (IS_ERR(port->membase))
1418 		return PTR_ERR(port->membase);
1419 	port->mapbase = res->start;
1420 
1421 	spin_lock_init(&port->lock);
1422 
1423 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1424 	if (IS_ERR(stm32port->clk))
1425 		return PTR_ERR(stm32port->clk);
1426 
1427 	/* Ensure that clk rate is correct by enabling the clk */
1428 	ret = clk_prepare_enable(stm32port->clk);
1429 	if (ret)
1430 		return ret;
1431 
1432 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1433 	if (!stm32port->port.uartclk) {
1434 		ret = -EINVAL;
1435 		goto err_clk;
1436 	}
1437 
1438 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1439 	if (IS_ERR(stm32port->gpios)) {
1440 		ret = PTR_ERR(stm32port->gpios);
1441 		goto err_clk;
1442 	}
1443 
1444 	/*
1445 	 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1446 	 * properties should not be specified.
1447 	 */
1448 	if (stm32port->hw_flow_control) {
1449 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1450 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1451 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1452 			ret = -EINVAL;
1453 			goto err_clk;
1454 		}
1455 	}
1456 
1457 	return ret;
1458 
1459 err_clk:
1460 	clk_disable_unprepare(stm32port->clk);
1461 
1462 	return ret;
1463 }
1464 
1465 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1466 {
1467 	struct device_node *np = pdev->dev.of_node;
1468 	int id;
1469 
1470 	if (!np)
1471 		return NULL;
1472 
1473 	id = of_alias_get_id(np, "serial");
1474 	if (id < 0) {
1475 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1476 		return NULL;
1477 	}
1478 
1479 	if (WARN_ON(id >= STM32_MAX_PORTS))
1480 		return NULL;
1481 
1482 	stm32_ports[id].hw_flow_control =
1483 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1484 		of_property_read_bool (np, "uart-has-rtscts");
1485 	stm32_ports[id].port.line = id;
1486 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1487 	stm32_ports[id].cr3_irq = 0;
1488 	stm32_ports[id].last_res = RX_BUF_L;
1489 	return &stm32_ports[id];
1490 }
1491 
1492 #ifdef CONFIG_OF
1493 static const struct of_device_id stm32_match[] = {
1494 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
1495 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1496 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1497 	{},
1498 };
1499 
1500 MODULE_DEVICE_TABLE(of, stm32_match);
1501 #endif
1502 
1503 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1504 					 struct platform_device *pdev)
1505 {
1506 	if (stm32port->rx_buf)
1507 		dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1508 				  stm32port->rx_dma_buf);
1509 }
1510 
1511 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1512 				       struct platform_device *pdev)
1513 {
1514 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1515 	struct uart_port *port = &stm32port->port;
1516 	struct device *dev = &pdev->dev;
1517 	struct dma_slave_config config;
1518 	int ret;
1519 
1520 	/*
1521 	 * Using DMA and threaded handler for the console could lead to
1522 	 * deadlocks.
1523 	 */
1524 	if (uart_console(port))
1525 		return -ENODEV;
1526 
1527 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1528 					       &stm32port->rx_dma_buf,
1529 					       GFP_KERNEL);
1530 	if (!stm32port->rx_buf)
1531 		return -ENOMEM;
1532 
1533 	/* Configure DMA channel */
1534 	memset(&config, 0, sizeof(config));
1535 	config.src_addr = port->mapbase + ofs->rdr;
1536 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1537 
1538 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1539 	if (ret < 0) {
1540 		dev_err(dev, "rx dma channel config failed\n");
1541 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1542 		return ret;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1549 					 struct platform_device *pdev)
1550 {
1551 	if (stm32port->tx_buf)
1552 		dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1553 				  stm32port->tx_dma_buf);
1554 }
1555 
1556 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1557 				       struct platform_device *pdev)
1558 {
1559 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1560 	struct uart_port *port = &stm32port->port;
1561 	struct device *dev = &pdev->dev;
1562 	struct dma_slave_config config;
1563 	int ret;
1564 
1565 	stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1566 					       &stm32port->tx_dma_buf,
1567 					       GFP_KERNEL);
1568 	if (!stm32port->tx_buf)
1569 		return -ENOMEM;
1570 
1571 	/* Configure DMA channel */
1572 	memset(&config, 0, sizeof(config));
1573 	config.dst_addr = port->mapbase + ofs->tdr;
1574 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1575 
1576 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1577 	if (ret < 0) {
1578 		dev_err(dev, "tx dma channel config failed\n");
1579 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1580 		return ret;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 static int stm32_usart_serial_probe(struct platform_device *pdev)
1587 {
1588 	struct stm32_port *stm32port;
1589 	int ret;
1590 
1591 	stm32port = stm32_usart_of_get_port(pdev);
1592 	if (!stm32port)
1593 		return -ENODEV;
1594 
1595 	stm32port->info = of_device_get_match_data(&pdev->dev);
1596 	if (!stm32port->info)
1597 		return -EINVAL;
1598 
1599 	ret = stm32_usart_init_port(stm32port, pdev);
1600 	if (ret)
1601 		return ret;
1602 
1603 	if (stm32port->wakeup_src) {
1604 		device_set_wakeup_capable(&pdev->dev, true);
1605 		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1606 		if (ret)
1607 			goto err_deinit_port;
1608 	}
1609 
1610 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1611 	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
1612 		ret = -EPROBE_DEFER;
1613 		goto err_wakeirq;
1614 	}
1615 	/* Fall back in interrupt mode for any non-deferral error */
1616 	if (IS_ERR(stm32port->rx_ch))
1617 		stm32port->rx_ch = NULL;
1618 
1619 	stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1620 	if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1621 		ret = -EPROBE_DEFER;
1622 		goto err_dma_rx;
1623 	}
1624 	/* Fall back in interrupt mode for any non-deferral error */
1625 	if (IS_ERR(stm32port->tx_ch))
1626 		stm32port->tx_ch = NULL;
1627 
1628 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1629 		/* Fall back in interrupt mode */
1630 		dma_release_channel(stm32port->rx_ch);
1631 		stm32port->rx_ch = NULL;
1632 	}
1633 
1634 	if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1635 		/* Fall back in interrupt mode */
1636 		dma_release_channel(stm32port->tx_ch);
1637 		stm32port->tx_ch = NULL;
1638 	}
1639 
1640 	if (!stm32port->rx_ch)
1641 		dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1642 	if (!stm32port->tx_ch)
1643 		dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1644 
1645 	platform_set_drvdata(pdev, &stm32port->port);
1646 
1647 	pm_runtime_get_noresume(&pdev->dev);
1648 	pm_runtime_set_active(&pdev->dev);
1649 	pm_runtime_enable(&pdev->dev);
1650 
1651 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1652 	if (ret)
1653 		goto err_port;
1654 
1655 	pm_runtime_put_sync(&pdev->dev);
1656 
1657 	return 0;
1658 
1659 err_port:
1660 	pm_runtime_disable(&pdev->dev);
1661 	pm_runtime_set_suspended(&pdev->dev);
1662 	pm_runtime_put_noidle(&pdev->dev);
1663 
1664 	if (stm32port->tx_ch) {
1665 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1666 		dma_release_channel(stm32port->tx_ch);
1667 	}
1668 
1669 	if (stm32port->rx_ch)
1670 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1671 
1672 err_dma_rx:
1673 	if (stm32port->rx_ch)
1674 		dma_release_channel(stm32port->rx_ch);
1675 
1676 err_wakeirq:
1677 	if (stm32port->wakeup_src)
1678 		dev_pm_clear_wake_irq(&pdev->dev);
1679 
1680 err_deinit_port:
1681 	if (stm32port->wakeup_src)
1682 		device_set_wakeup_capable(&pdev->dev, false);
1683 
1684 	stm32_usart_deinit_port(stm32port);
1685 
1686 	return ret;
1687 }
1688 
1689 static int stm32_usart_serial_remove(struct platform_device *pdev)
1690 {
1691 	struct uart_port *port = platform_get_drvdata(pdev);
1692 	struct stm32_port *stm32_port = to_stm32_port(port);
1693 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1694 	int err;
1695 	u32 cr3;
1696 
1697 	pm_runtime_get_sync(&pdev->dev);
1698 	err = uart_remove_one_port(&stm32_usart_driver, port);
1699 	if (err)
1700 		return(err);
1701 
1702 	pm_runtime_disable(&pdev->dev);
1703 	pm_runtime_set_suspended(&pdev->dev);
1704 	pm_runtime_put_noidle(&pdev->dev);
1705 
1706 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1707 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1708 	cr3 &= ~USART_CR3_EIE;
1709 	cr3 &= ~USART_CR3_DMAR;
1710 	cr3 &= ~USART_CR3_DDRE;
1711 	writel_relaxed(cr3, port->membase + ofs->cr3);
1712 
1713 	if (stm32_port->tx_ch) {
1714 		stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1715 		dma_release_channel(stm32_port->tx_ch);
1716 	}
1717 
1718 	if (stm32_port->rx_ch) {
1719 		stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1720 		dma_release_channel(stm32_port->rx_ch);
1721 	}
1722 
1723 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1724 
1725 	if (stm32_port->wakeup_src) {
1726 		dev_pm_clear_wake_irq(&pdev->dev);
1727 		device_init_wakeup(&pdev->dev, false);
1728 	}
1729 
1730 	stm32_usart_deinit_port(stm32_port);
1731 
1732 	return 0;
1733 }
1734 
1735 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1736 {
1737 	struct stm32_port *stm32_port = to_stm32_port(port);
1738 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1739 	u32 isr;
1740 	int ret;
1741 
1742 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1743 						(isr & USART_SR_TXE), 100,
1744 						STM32_USART_TIMEOUT_USEC);
1745 	if (ret != 0) {
1746 		dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1747 		return;
1748 	}
1749 	writel_relaxed(ch, port->membase + ofs->tdr);
1750 }
1751 
1752 #ifdef CONFIG_SERIAL_STM32_CONSOLE
1753 static void stm32_usart_console_write(struct console *co, const char *s,
1754 				      unsigned int cnt)
1755 {
1756 	struct uart_port *port = &stm32_ports[co->index].port;
1757 	struct stm32_port *stm32_port = to_stm32_port(port);
1758 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1759 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1760 	unsigned long flags;
1761 	u32 old_cr1, new_cr1;
1762 	int locked = 1;
1763 
1764 	if (oops_in_progress)
1765 		locked = spin_trylock_irqsave(&port->lock, flags);
1766 	else
1767 		spin_lock_irqsave(&port->lock, flags);
1768 
1769 	/* Save and disable interrupts, enable the transmitter */
1770 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1771 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1772 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
1773 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
1774 
1775 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1776 
1777 	/* Restore interrupt state */
1778 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1779 
1780 	if (locked)
1781 		spin_unlock_irqrestore(&port->lock, flags);
1782 }
1783 
1784 static int stm32_usart_console_setup(struct console *co, char *options)
1785 {
1786 	struct stm32_port *stm32port;
1787 	int baud = 9600;
1788 	int bits = 8;
1789 	int parity = 'n';
1790 	int flow = 'n';
1791 
1792 	if (co->index >= STM32_MAX_PORTS)
1793 		return -ENODEV;
1794 
1795 	stm32port = &stm32_ports[co->index];
1796 
1797 	/*
1798 	 * This driver does not support early console initialization
1799 	 * (use ARM early printk support instead), so we only expect
1800 	 * this to be called during the uart port registration when the
1801 	 * driver gets probed and the port should be mapped at that point.
1802 	 */
1803 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1804 		return -ENXIO;
1805 
1806 	if (options)
1807 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1808 
1809 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1810 }
1811 
1812 static struct console stm32_console = {
1813 	.name		= STM32_SERIAL_NAME,
1814 	.device		= uart_console_device,
1815 	.write		= stm32_usart_console_write,
1816 	.setup		= stm32_usart_console_setup,
1817 	.flags		= CON_PRINTBUFFER,
1818 	.index		= -1,
1819 	.data		= &stm32_usart_driver,
1820 };
1821 
1822 #define STM32_SERIAL_CONSOLE (&stm32_console)
1823 
1824 #else
1825 #define STM32_SERIAL_CONSOLE NULL
1826 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1827 
1828 #ifdef CONFIG_SERIAL_EARLYCON
1829 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1830 {
1831 	struct stm32_usart_info *info = port->private_data;
1832 
1833 	while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1834 		cpu_relax();
1835 
1836 	writel_relaxed(ch, port->membase + info->ofs.tdr);
1837 }
1838 
1839 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1840 {
1841 	struct earlycon_device *device = console->data;
1842 	struct uart_port *port = &device->port;
1843 
1844 	uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1845 }
1846 
1847 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1848 {
1849 	if (!(device->port.membase || device->port.iobase))
1850 		return -ENODEV;
1851 	device->port.private_data = &stm32h7_info;
1852 	device->con->write = early_stm32_serial_write;
1853 	return 0;
1854 }
1855 
1856 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1857 {
1858 	if (!(device->port.membase || device->port.iobase))
1859 		return -ENODEV;
1860 	device->port.private_data = &stm32f7_info;
1861 	device->con->write = early_stm32_serial_write;
1862 	return 0;
1863 }
1864 
1865 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1866 {
1867 	if (!(device->port.membase || device->port.iobase))
1868 		return -ENODEV;
1869 	device->port.private_data = &stm32f4_info;
1870 	device->con->write = early_stm32_serial_write;
1871 	return 0;
1872 }
1873 
1874 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
1875 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
1876 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
1877 #endif /* CONFIG_SERIAL_EARLYCON */
1878 
1879 static struct uart_driver stm32_usart_driver = {
1880 	.driver_name	= DRIVER_NAME,
1881 	.dev_name	= STM32_SERIAL_NAME,
1882 	.major		= 0,
1883 	.minor		= 0,
1884 	.nr		= STM32_MAX_PORTS,
1885 	.cons		= STM32_SERIAL_CONSOLE,
1886 };
1887 
1888 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
1889 						       bool enable)
1890 {
1891 	struct stm32_port *stm32_port = to_stm32_port(port);
1892 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1893 	struct tty_port *tport = &port->state->port;
1894 	int ret;
1895 	unsigned int size;
1896 	unsigned long flags;
1897 
1898 	if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
1899 		return 0;
1900 
1901 	/*
1902 	 * Enable low-power wake-up and wake-up irq if argument is set to
1903 	 * "enable", disable low-power wake-up and wake-up irq otherwise
1904 	 */
1905 	if (enable) {
1906 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
1907 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
1908 		mctrl_gpio_enable_irq_wake(stm32_port->gpios);
1909 
1910 		/*
1911 		 * When DMA is used for reception, it must be disabled before
1912 		 * entering low-power mode and re-enabled when exiting from
1913 		 * low-power mode.
1914 		 */
1915 		if (stm32_port->rx_ch) {
1916 			spin_lock_irqsave(&port->lock, flags);
1917 			/* Avoid race with RX IRQ when DMAR is cleared */
1918 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
1919 			/* Poll data from DMA RX buffer if any */
1920 			size = stm32_usart_receive_chars(port, true);
1921 			dmaengine_terminate_async(stm32_port->rx_ch);
1922 			uart_unlock_and_check_sysrq_irqrestore(port, flags);
1923 			if (size)
1924 				tty_flip_buffer_push(tport);
1925 		}
1926 
1927 		/* Poll data from RX FIFO if any */
1928 		stm32_usart_receive_chars(port, false);
1929 	} else {
1930 		if (stm32_port->rx_ch) {
1931 			ret = stm32_usart_start_rx_dma_cyclic(port);
1932 			if (ret)
1933 				return ret;
1934 		}
1935 		mctrl_gpio_disable_irq_wake(stm32_port->gpios);
1936 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
1937 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
1944 {
1945 	struct uart_port *port = dev_get_drvdata(dev);
1946 	int ret;
1947 
1948 	uart_suspend_port(&stm32_usart_driver, port);
1949 
1950 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1951 		ret = stm32_usart_serial_en_wakeup(port, true);
1952 		if (ret)
1953 			return ret;
1954 	}
1955 
1956 	/*
1957 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
1958 	 * and rely on bootloader stage to restore this state upon resume.
1959 	 * Otherwise, apply the idle or sleep states depending on wakeup
1960 	 * capabilities.
1961 	 */
1962 	if (console_suspend_enabled || !uart_console(port)) {
1963 		if (device_may_wakeup(dev) || device_wakeup_path(dev))
1964 			pinctrl_pm_select_idle_state(dev);
1965 		else
1966 			pinctrl_pm_select_sleep_state(dev);
1967 	}
1968 
1969 	return 0;
1970 }
1971 
1972 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
1973 {
1974 	struct uart_port *port = dev_get_drvdata(dev);
1975 	int ret;
1976 
1977 	pinctrl_pm_select_default_state(dev);
1978 
1979 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1980 		ret = stm32_usart_serial_en_wakeup(port, false);
1981 		if (ret)
1982 			return ret;
1983 	}
1984 
1985 	return uart_resume_port(&stm32_usart_driver, port);
1986 }
1987 
1988 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
1989 {
1990 	struct uart_port *port = dev_get_drvdata(dev);
1991 	struct stm32_port *stm32port = container_of(port,
1992 			struct stm32_port, port);
1993 
1994 	clk_disable_unprepare(stm32port->clk);
1995 
1996 	return 0;
1997 }
1998 
1999 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2000 {
2001 	struct uart_port *port = dev_get_drvdata(dev);
2002 	struct stm32_port *stm32port = container_of(port,
2003 			struct stm32_port, port);
2004 
2005 	return clk_prepare_enable(stm32port->clk);
2006 }
2007 
2008 static const struct dev_pm_ops stm32_serial_pm_ops = {
2009 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2010 			   stm32_usart_runtime_resume, NULL)
2011 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2012 				stm32_usart_serial_resume)
2013 };
2014 
2015 static struct platform_driver stm32_serial_driver = {
2016 	.probe		= stm32_usart_serial_probe,
2017 	.remove		= stm32_usart_serial_remove,
2018 	.driver	= {
2019 		.name	= DRIVER_NAME,
2020 		.pm	= &stm32_serial_pm_ops,
2021 		.of_match_table = of_match_ptr(stm32_match),
2022 	},
2023 };
2024 
2025 static int __init stm32_usart_init(void)
2026 {
2027 	static char banner[] __initdata = "STM32 USART driver initialized";
2028 	int ret;
2029 
2030 	pr_info("%s\n", banner);
2031 
2032 	ret = uart_register_driver(&stm32_usart_driver);
2033 	if (ret)
2034 		return ret;
2035 
2036 	ret = platform_driver_register(&stm32_serial_driver);
2037 	if (ret)
2038 		uart_unregister_driver(&stm32_usart_driver);
2039 
2040 	return ret;
2041 }
2042 
2043 static void __exit stm32_usart_exit(void)
2044 {
2045 	platform_driver_unregister(&stm32_serial_driver);
2046 	uart_unregister_driver(&stm32_usart_driver);
2047 }
2048 
2049 module_init(stm32_usart_init);
2050 module_exit(stm32_usart_exit);
2051 
2052 MODULE_ALIAS("platform:" DRIVER_NAME);
2053 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2054 MODULE_LICENSE("GPL v2");
2055