xref: /openbmc/linux/drivers/tty/serial/stm32-usart.c (revision 8e74a48d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  *	     Gerald Baeza <gerald.baeza@foss.st.com>
7  *	     Erwan Le Ray <erwan.leray@foss.st.com>
8  *
9  * Inspired by st-asc.c from STMicroelectronics (c)
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34 
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37 
38 static void stm32_usart_stop_tx(struct uart_port *port);
39 static void stm32_usart_transmit_chars(struct uart_port *port);
40 
41 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
42 {
43 	return container_of(port, struct stm32_port, port);
44 }
45 
46 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
47 {
48 	u32 val;
49 
50 	val = readl_relaxed(port->membase + reg);
51 	val |= bits;
52 	writel_relaxed(val, port->membase + reg);
53 }
54 
55 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
56 {
57 	u32 val;
58 
59 	val = readl_relaxed(port->membase + reg);
60 	val &= ~bits;
61 	writel_relaxed(val, port->membase + reg);
62 }
63 
64 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
65 					 u32 delay_DDE, u32 baud)
66 {
67 	u32 rs485_deat_dedt;
68 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
69 	bool over8;
70 
71 	*cr3 |= USART_CR3_DEM;
72 	over8 = *cr1 & USART_CR1_OVER8;
73 
74 	if (over8)
75 		rs485_deat_dedt = delay_ADE * baud * 8;
76 	else
77 		rs485_deat_dedt = delay_ADE * baud * 16;
78 
79 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
80 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
81 			  rs485_deat_dedt_max : rs485_deat_dedt;
82 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
83 			   USART_CR1_DEAT_MASK;
84 	*cr1 |= rs485_deat_dedt;
85 
86 	if (over8)
87 		rs485_deat_dedt = delay_DDE * baud * 8;
88 	else
89 		rs485_deat_dedt = delay_DDE * baud * 16;
90 
91 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
92 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
93 			  rs485_deat_dedt_max : rs485_deat_dedt;
94 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
95 			   USART_CR1_DEDT_MASK;
96 	*cr1 |= rs485_deat_dedt;
97 }
98 
99 static int stm32_usart_config_rs485(struct uart_port *port,
100 				    struct serial_rs485 *rs485conf)
101 {
102 	struct stm32_port *stm32_port = to_stm32_port(port);
103 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
104 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
105 	u32 usartdiv, baud, cr1, cr3;
106 	bool over8;
107 
108 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
109 
110 	port->rs485 = *rs485conf;
111 
112 	rs485conf->flags |= SER_RS485_RX_DURING_TX;
113 
114 	if (rs485conf->flags & SER_RS485_ENABLED) {
115 		cr1 = readl_relaxed(port->membase + ofs->cr1);
116 		cr3 = readl_relaxed(port->membase + ofs->cr3);
117 		usartdiv = readl_relaxed(port->membase + ofs->brr);
118 		usartdiv = usartdiv & GENMASK(15, 0);
119 		over8 = cr1 & USART_CR1_OVER8;
120 
121 		if (over8)
122 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
123 				   << USART_BRR_04_R_SHIFT;
124 
125 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
126 		stm32_usart_config_reg_rs485(&cr1, &cr3,
127 					     rs485conf->delay_rts_before_send,
128 					     rs485conf->delay_rts_after_send,
129 					     baud);
130 
131 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
132 			cr3 &= ~USART_CR3_DEP;
133 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
134 		} else {
135 			cr3 |= USART_CR3_DEP;
136 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
137 		}
138 
139 		writel_relaxed(cr3, port->membase + ofs->cr3);
140 		writel_relaxed(cr1, port->membase + ofs->cr1);
141 	} else {
142 		stm32_usart_clr_bits(port, ofs->cr3,
143 				     USART_CR3_DEM | USART_CR3_DEP);
144 		stm32_usart_clr_bits(port, ofs->cr1,
145 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
146 	}
147 
148 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
149 
150 	return 0;
151 }
152 
153 static int stm32_usart_init_rs485(struct uart_port *port,
154 				  struct platform_device *pdev)
155 {
156 	struct serial_rs485 *rs485conf = &port->rs485;
157 
158 	rs485conf->flags = 0;
159 	rs485conf->delay_rts_before_send = 0;
160 	rs485conf->delay_rts_after_send = 0;
161 
162 	if (!pdev->dev.of_node)
163 		return -ENODEV;
164 
165 	return uart_get_rs485_mode(port);
166 }
167 
168 static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
169 {
170 	struct stm32_port *stm32_port = to_stm32_port(port);
171 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
172 
173 	if (!stm32_port->rx_ch)
174 		return false;
175 
176 	return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
177 }
178 
179 /* Return true when data is pending (in pio mode), and false when no data is pending. */
180 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
181 {
182 	struct stm32_port *stm32_port = to_stm32_port(port);
183 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
184 
185 	*sr = readl_relaxed(port->membase + ofs->isr);
186 	/* Get pending characters in RDR or FIFO */
187 	if (*sr & USART_SR_RXNE) {
188 		/* Get all pending characters from the RDR or the FIFO when using interrupts */
189 		if (!stm32_usart_rx_dma_enabled(port))
190 			return true;
191 
192 		/* Handle only RX data errors when using DMA */
193 		if (*sr & USART_SR_ERR_MASK)
194 			return true;
195 	}
196 
197 	return false;
198 }
199 
200 static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
201 {
202 	struct stm32_port *stm32_port = to_stm32_port(port);
203 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
204 	unsigned long c;
205 
206 	c = readl_relaxed(port->membase + ofs->rdr);
207 	/* Apply RDR data mask */
208 	c &= stm32_port->rdr_mask;
209 
210 	return c;
211 }
212 
213 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
214 {
215 	struct stm32_port *stm32_port = to_stm32_port(port);
216 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
217 	unsigned long c;
218 	unsigned int size = 0;
219 	u32 sr;
220 	char flag;
221 
222 	while (stm32_usart_pending_rx_pio(port, &sr)) {
223 		sr |= USART_SR_DUMMY_RX;
224 		flag = TTY_NORMAL;
225 
226 		/*
227 		 * Status bits has to be cleared before reading the RDR:
228 		 * In FIFO mode, reading the RDR will pop the next data
229 		 * (if any) along with its status bits into the SR.
230 		 * Not doing so leads to misalignement between RDR and SR,
231 		 * and clear status bits of the next rx data.
232 		 *
233 		 * Clear errors flags for stm32f7 and stm32h7 compatible
234 		 * devices. On stm32f4 compatible devices, the error bit is
235 		 * cleared by the sequence [read SR - read DR].
236 		 */
237 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
238 			writel_relaxed(sr & USART_SR_ERR_MASK,
239 				       port->membase + ofs->icr);
240 
241 		c = stm32_usart_get_char_pio(port);
242 		port->icount.rx++;
243 		size++;
244 		if (sr & USART_SR_ERR_MASK) {
245 			if (sr & USART_SR_ORE) {
246 				port->icount.overrun++;
247 			} else if (sr & USART_SR_PE) {
248 				port->icount.parity++;
249 			} else if (sr & USART_SR_FE) {
250 				/* Break detection if character is null */
251 				if (!c) {
252 					port->icount.brk++;
253 					if (uart_handle_break(port))
254 						continue;
255 				} else {
256 					port->icount.frame++;
257 				}
258 			}
259 
260 			sr &= port->read_status_mask;
261 
262 			if (sr & USART_SR_PE) {
263 				flag = TTY_PARITY;
264 			} else if (sr & USART_SR_FE) {
265 				if (!c)
266 					flag = TTY_BREAK;
267 				else
268 					flag = TTY_FRAME;
269 			}
270 		}
271 
272 		if (uart_prepare_sysrq_char(port, c))
273 			continue;
274 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
275 	}
276 
277 	return size;
278 }
279 
280 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
281 {
282 	struct stm32_port *stm32_port = to_stm32_port(port);
283 	struct tty_port *ttyport = &stm32_port->port.state->port;
284 	unsigned char *dma_start;
285 	int dma_count, i;
286 
287 	dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
288 
289 	/*
290 	 * Apply rdr_mask on buffer in order to mask parity bit.
291 	 * This loop is useless in cs8 mode because DMA copies only
292 	 * 8 bits and already ignores parity bit.
293 	 */
294 	if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
295 		for (i = 0; i < dma_size; i++)
296 			*(dma_start + i) &= stm32_port->rdr_mask;
297 
298 	dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
299 	port->icount.rx += dma_count;
300 	if (dma_count != dma_size)
301 		port->icount.buf_overrun++;
302 	stm32_port->last_res -= dma_count;
303 	if (stm32_port->last_res == 0)
304 		stm32_port->last_res = RX_BUF_L;
305 }
306 
307 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
308 {
309 	struct stm32_port *stm32_port = to_stm32_port(port);
310 	unsigned int dma_size, size = 0;
311 
312 	/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
313 	if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
314 		/* Conditional first part: from last_res to end of DMA buffer */
315 		dma_size = stm32_port->last_res;
316 		stm32_usart_push_buffer_dma(port, dma_size);
317 		size = dma_size;
318 	}
319 
320 	dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
321 	stm32_usart_push_buffer_dma(port, dma_size);
322 	size += dma_size;
323 
324 	return size;
325 }
326 
327 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
328 {
329 	struct stm32_port *stm32_port = to_stm32_port(port);
330 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
331 	enum dma_status rx_dma_status;
332 	u32 sr;
333 	unsigned int size = 0;
334 
335 	if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) {
336 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
337 						    stm32_port->rx_ch->cookie,
338 						    &stm32_port->rx_dma_state);
339 		if (rx_dma_status == DMA_IN_PROGRESS) {
340 			/* Empty DMA buffer */
341 			size = stm32_usart_receive_chars_dma(port);
342 			sr = readl_relaxed(port->membase + ofs->isr);
343 			if (sr & USART_SR_ERR_MASK) {
344 				/* Disable DMA request line */
345 				stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
346 
347 				/* Switch to PIO mode to handle the errors */
348 				size += stm32_usart_receive_chars_pio(port);
349 
350 				/* Switch back to DMA mode */
351 				stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
352 			}
353 		} else {
354 			/* Disable RX DMA */
355 			dmaengine_terminate_async(stm32_port->rx_ch);
356 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
357 			/* Fall back to interrupt mode */
358 			dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
359 			size = stm32_usart_receive_chars_pio(port);
360 		}
361 	} else {
362 		size = stm32_usart_receive_chars_pio(port);
363 	}
364 
365 	return size;
366 }
367 
368 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
369 {
370 	dmaengine_terminate_async(stm32_port->tx_ch);
371 	stm32_port->tx_dma_busy = false;
372 }
373 
374 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
375 {
376 	/*
377 	 * We cannot use the function "dmaengine_tx_status" to know the
378 	 * status of DMA. This function does not show if the "dma complete"
379 	 * callback of the DMA transaction has been called. So we prefer
380 	 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
381 	 * same time.
382 	 */
383 	return stm32_port->tx_dma_busy;
384 }
385 
386 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
387 {
388 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
389 
390 	return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
391 }
392 
393 static void stm32_usart_tx_dma_complete(void *arg)
394 {
395 	struct uart_port *port = arg;
396 	struct stm32_port *stm32port = to_stm32_port(port);
397 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
398 	unsigned long flags;
399 
400 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
401 	stm32_usart_tx_dma_terminate(stm32port);
402 
403 	/* Let's see if we have pending data to send */
404 	spin_lock_irqsave(&port->lock, flags);
405 	stm32_usart_transmit_chars(port);
406 	spin_unlock_irqrestore(&port->lock, flags);
407 }
408 
409 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
410 {
411 	struct stm32_port *stm32_port = to_stm32_port(port);
412 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
413 
414 	/*
415 	 * Enables TX FIFO threashold irq when FIFO is enabled,
416 	 * or TX empty irq when FIFO is disabled
417 	 */
418 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
419 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
420 	else
421 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
422 }
423 
424 static void stm32_usart_rx_dma_complete(void *arg)
425 {
426 	struct uart_port *port = arg;
427 	struct tty_port *tport = &port->state->port;
428 	unsigned int size;
429 	unsigned long flags;
430 
431 	spin_lock_irqsave(&port->lock, flags);
432 	size = stm32_usart_receive_chars(port, false);
433 	uart_unlock_and_check_sysrq_irqrestore(port, flags);
434 	if (size)
435 		tty_flip_buffer_push(tport);
436 }
437 
438 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
439 {
440 	struct stm32_port *stm32_port = to_stm32_port(port);
441 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
442 
443 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
444 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
445 	else
446 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
447 }
448 
449 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
450 {
451 	struct stm32_port *stm32_port = to_stm32_port(port);
452 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
453 	struct circ_buf *xmit = &port->state->xmit;
454 
455 	if (stm32_usart_tx_dma_enabled(stm32_port))
456 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
457 
458 	while (!uart_circ_empty(xmit)) {
459 		/* Check that TDR is empty before filling FIFO */
460 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
461 			break;
462 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
463 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
464 		port->icount.tx++;
465 	}
466 
467 	/* rely on TXE irq (mask or unmask) for sending remaining data */
468 	if (uart_circ_empty(xmit))
469 		stm32_usart_tx_interrupt_disable(port);
470 	else
471 		stm32_usart_tx_interrupt_enable(port);
472 }
473 
474 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
475 {
476 	struct stm32_port *stm32port = to_stm32_port(port);
477 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
478 	struct circ_buf *xmit = &port->state->xmit;
479 	struct dma_async_tx_descriptor *desc = NULL;
480 	unsigned int count;
481 
482 	if (stm32_usart_tx_dma_started(stm32port)) {
483 		if (!stm32_usart_tx_dma_enabled(stm32port))
484 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
485 		return;
486 	}
487 
488 	count = uart_circ_chars_pending(xmit);
489 
490 	if (count > TX_BUF_L)
491 		count = TX_BUF_L;
492 
493 	if (xmit->tail < xmit->head) {
494 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
495 	} else {
496 		size_t one = UART_XMIT_SIZE - xmit->tail;
497 		size_t two;
498 
499 		if (one > count)
500 			one = count;
501 		two = count - one;
502 
503 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
504 		if (two)
505 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
506 	}
507 
508 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
509 					   stm32port->tx_dma_buf,
510 					   count,
511 					   DMA_MEM_TO_DEV,
512 					   DMA_PREP_INTERRUPT);
513 
514 	if (!desc)
515 		goto fallback_err;
516 
517 	/*
518 	 * Set "tx_dma_busy" flag. This flag will be released when
519 	 * dmaengine_terminate_async will be called. This flag helps
520 	 * transmit_chars_dma not to start another DMA transaction
521 	 * if the callback of the previous is not yet called.
522 	 */
523 	stm32port->tx_dma_busy = true;
524 
525 	desc->callback = stm32_usart_tx_dma_complete;
526 	desc->callback_param = port;
527 
528 	/* Push current DMA TX transaction in the pending queue */
529 	if (dma_submit_error(dmaengine_submit(desc))) {
530 		/* dma no yet started, safe to free resources */
531 		stm32_usart_tx_dma_terminate(stm32port);
532 		goto fallback_err;
533 	}
534 
535 	/* Issue pending DMA TX requests */
536 	dma_async_issue_pending(stm32port->tx_ch);
537 
538 	stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
539 
540 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
541 	port->icount.tx += count;
542 	return;
543 
544 fallback_err:
545 	stm32_usart_transmit_chars_pio(port);
546 }
547 
548 static void stm32_usart_transmit_chars(struct uart_port *port)
549 {
550 	struct stm32_port *stm32_port = to_stm32_port(port);
551 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
552 	struct circ_buf *xmit = &port->state->xmit;
553 
554 	if (port->x_char) {
555 		if (stm32_usart_tx_dma_started(stm32_port) &&
556 		    stm32_usart_tx_dma_enabled(stm32_port))
557 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
558 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
559 		port->x_char = 0;
560 		port->icount.tx++;
561 		if (stm32_usart_tx_dma_started(stm32_port))
562 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
563 		return;
564 	}
565 
566 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
567 		stm32_usart_tx_interrupt_disable(port);
568 		return;
569 	}
570 
571 	if (ofs->icr == UNDEF_REG)
572 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
573 	else
574 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
575 
576 	if (stm32_port->tx_ch)
577 		stm32_usart_transmit_chars_dma(port);
578 	else
579 		stm32_usart_transmit_chars_pio(port);
580 
581 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
582 		uart_write_wakeup(port);
583 
584 	if (uart_circ_empty(xmit))
585 		stm32_usart_tx_interrupt_disable(port);
586 }
587 
588 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
589 {
590 	struct uart_port *port = ptr;
591 	struct tty_port *tport = &port->state->port;
592 	struct stm32_port *stm32_port = to_stm32_port(port);
593 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
594 	u32 sr;
595 	unsigned int size;
596 
597 	sr = readl_relaxed(port->membase + ofs->isr);
598 
599 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
600 		writel_relaxed(USART_ICR_RTOCF,
601 			       port->membase + ofs->icr);
602 
603 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
604 		/* Clear wake up flag and disable wake up interrupt */
605 		writel_relaxed(USART_ICR_WUCF,
606 			       port->membase + ofs->icr);
607 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
608 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
609 			pm_wakeup_event(tport->tty->dev, 0);
610 	}
611 
612 	/*
613 	 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
614 	 * line has been masked by HW and rx data are stacking in FIFO.
615 	 */
616 	if (!stm32_port->throttled) {
617 		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
618 		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) {
619 			spin_lock(&port->lock);
620 			size = stm32_usart_receive_chars(port, false);
621 			uart_unlock_and_check_sysrq(port);
622 			if (size)
623 				tty_flip_buffer_push(tport);
624 		}
625 	}
626 
627 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
628 		spin_lock(&port->lock);
629 		stm32_usart_transmit_chars(port);
630 		spin_unlock(&port->lock);
631 	}
632 
633 	if (stm32_usart_rx_dma_enabled(port))
634 		return IRQ_WAKE_THREAD;
635 	else
636 		return IRQ_HANDLED;
637 }
638 
639 static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
640 {
641 	struct uart_port *port = ptr;
642 	struct tty_port *tport = &port->state->port;
643 	struct stm32_port *stm32_port = to_stm32_port(port);
644 	unsigned int size;
645 	unsigned long flags;
646 
647 	/* Receiver timeout irq for DMA RX */
648 	if (!stm32_port->throttled) {
649 		spin_lock_irqsave(&port->lock, flags);
650 		size = stm32_usart_receive_chars(port, false);
651 		uart_unlock_and_check_sysrq_irqrestore(port, flags);
652 		if (size)
653 			tty_flip_buffer_push(tport);
654 	}
655 
656 	return IRQ_HANDLED;
657 }
658 
659 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
660 {
661 	struct stm32_port *stm32_port = to_stm32_port(port);
662 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
663 
664 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
665 		return TIOCSER_TEMT;
666 
667 	return 0;
668 }
669 
670 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
671 {
672 	struct stm32_port *stm32_port = to_stm32_port(port);
673 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
674 
675 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
676 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
677 	else
678 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
679 
680 	mctrl_gpio_set(stm32_port->gpios, mctrl);
681 }
682 
683 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
684 {
685 	struct stm32_port *stm32_port = to_stm32_port(port);
686 	unsigned int ret;
687 
688 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
689 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
690 
691 	return mctrl_gpio_get(stm32_port->gpios, &ret);
692 }
693 
694 static void stm32_usart_enable_ms(struct uart_port *port)
695 {
696 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
697 }
698 
699 static void stm32_usart_disable_ms(struct uart_port *port)
700 {
701 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
702 }
703 
704 /* Transmit stop */
705 static void stm32_usart_stop_tx(struct uart_port *port)
706 {
707 	struct stm32_port *stm32_port = to_stm32_port(port);
708 	struct serial_rs485 *rs485conf = &port->rs485;
709 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
710 
711 	stm32_usart_tx_interrupt_disable(port);
712 	if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
713 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
714 
715 	if (rs485conf->flags & SER_RS485_ENABLED) {
716 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
717 			mctrl_gpio_set(stm32_port->gpios,
718 					stm32_port->port.mctrl & ~TIOCM_RTS);
719 		} else {
720 			mctrl_gpio_set(stm32_port->gpios,
721 					stm32_port->port.mctrl | TIOCM_RTS);
722 		}
723 	}
724 }
725 
726 /* There are probably characters waiting to be transmitted. */
727 static void stm32_usart_start_tx(struct uart_port *port)
728 {
729 	struct stm32_port *stm32_port = to_stm32_port(port);
730 	struct serial_rs485 *rs485conf = &port->rs485;
731 	struct circ_buf *xmit = &port->state->xmit;
732 
733 	if (uart_circ_empty(xmit))
734 		return;
735 
736 	if (rs485conf->flags & SER_RS485_ENABLED) {
737 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
738 			mctrl_gpio_set(stm32_port->gpios,
739 					stm32_port->port.mctrl | TIOCM_RTS);
740 		} else {
741 			mctrl_gpio_set(stm32_port->gpios,
742 					stm32_port->port.mctrl & ~TIOCM_RTS);
743 		}
744 	}
745 
746 	stm32_usart_transmit_chars(port);
747 }
748 
749 /* Flush the transmit buffer. */
750 static void stm32_usart_flush_buffer(struct uart_port *port)
751 {
752 	struct stm32_port *stm32_port = to_stm32_port(port);
753 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
754 
755 	if (stm32_port->tx_ch) {
756 		stm32_usart_tx_dma_terminate(stm32_port);
757 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
758 	}
759 }
760 
761 /* Throttle the remote when input buffer is about to overflow. */
762 static void stm32_usart_throttle(struct uart_port *port)
763 {
764 	struct stm32_port *stm32_port = to_stm32_port(port);
765 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
766 	unsigned long flags;
767 
768 	spin_lock_irqsave(&port->lock, flags);
769 
770 	/*
771 	 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
772 	 * Hardware flow control is triggered when RX FIFO is full.
773 	 */
774 	if (stm32_usart_rx_dma_enabled(port))
775 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
776 
777 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
778 	if (stm32_port->cr3_irq)
779 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
780 
781 	stm32_port->throttled = true;
782 	spin_unlock_irqrestore(&port->lock, flags);
783 }
784 
785 /* Unthrottle the remote, the input buffer can now accept data. */
786 static void stm32_usart_unthrottle(struct uart_port *port)
787 {
788 	struct stm32_port *stm32_port = to_stm32_port(port);
789 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
790 	unsigned long flags;
791 
792 	spin_lock_irqsave(&port->lock, flags);
793 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
794 	if (stm32_port->cr3_irq)
795 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
796 
797 	/*
798 	 * Switch back to DMA mode (re-enable DMA request line).
799 	 * Hardware flow control is stopped when FIFO is not full any more.
800 	 */
801 	if (stm32_port->rx_ch)
802 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
803 
804 	stm32_port->throttled = false;
805 	spin_unlock_irqrestore(&port->lock, flags);
806 }
807 
808 /* Receive stop */
809 static void stm32_usart_stop_rx(struct uart_port *port)
810 {
811 	struct stm32_port *stm32_port = to_stm32_port(port);
812 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
813 
814 	/* Disable DMA request line. */
815 	if (stm32_port->rx_ch)
816 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
817 
818 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
819 	if (stm32_port->cr3_irq)
820 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
821 }
822 
823 /* Handle breaks - ignored by us */
824 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
825 {
826 }
827 
828 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
829 {
830 	struct stm32_port *stm32_port = to_stm32_port(port);
831 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
832 	struct dma_async_tx_descriptor *desc;
833 	int ret;
834 
835 	stm32_port->last_res = RX_BUF_L;
836 	/* Prepare a DMA cyclic transaction */
837 	desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
838 					 stm32_port->rx_dma_buf,
839 					 RX_BUF_L, RX_BUF_P,
840 					 DMA_DEV_TO_MEM,
841 					 DMA_PREP_INTERRUPT);
842 	if (!desc) {
843 		dev_err(port->dev, "rx dma prep cyclic failed\n");
844 		return -ENODEV;
845 	}
846 
847 	desc->callback = stm32_usart_rx_dma_complete;
848 	desc->callback_param = port;
849 
850 	/* Push current DMA transaction in the pending queue */
851 	ret = dma_submit_error(dmaengine_submit(desc));
852 	if (ret) {
853 		dmaengine_terminate_sync(stm32_port->rx_ch);
854 		return ret;
855 	}
856 
857 	/* Issue pending DMA requests */
858 	dma_async_issue_pending(stm32_port->rx_ch);
859 
860 	/*
861 	 * DMA request line not re-enabled at resume when port is throttled.
862 	 * It will be re-enabled by unthrottle ops.
863 	 */
864 	if (!stm32_port->throttled)
865 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
866 
867 	return 0;
868 }
869 
870 static int stm32_usart_startup(struct uart_port *port)
871 {
872 	struct stm32_port *stm32_port = to_stm32_port(port);
873 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
874 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
875 	const char *name = to_platform_device(port->dev)->name;
876 	u32 val;
877 	int ret;
878 
879 	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
880 				   stm32_usart_threaded_interrupt,
881 				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
882 				   name, port);
883 	if (ret)
884 		return ret;
885 
886 	if (stm32_port->swap) {
887 		val = readl_relaxed(port->membase + ofs->cr2);
888 		val |= USART_CR2_SWAP;
889 		writel_relaxed(val, port->membase + ofs->cr2);
890 	}
891 
892 	/* RX FIFO Flush */
893 	if (ofs->rqr != UNDEF_REG)
894 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
895 
896 	if (stm32_port->rx_ch) {
897 		ret = stm32_usart_start_rx_dma_cyclic(port);
898 		if (ret) {
899 			free_irq(port->irq, port);
900 			return ret;
901 		}
902 	}
903 
904 	/* RX enabling */
905 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
906 	stm32_usart_set_bits(port, ofs->cr1, val);
907 
908 	return 0;
909 }
910 
911 static void stm32_usart_shutdown(struct uart_port *port)
912 {
913 	struct stm32_port *stm32_port = to_stm32_port(port);
914 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
915 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
916 	u32 val, isr;
917 	int ret;
918 
919 	if (stm32_usart_tx_dma_enabled(stm32_port))
920 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
921 
922 	if (stm32_usart_tx_dma_started(stm32_port))
923 		stm32_usart_tx_dma_terminate(stm32_port);
924 
925 	/* Disable modem control interrupts */
926 	stm32_usart_disable_ms(port);
927 
928 	val = USART_CR1_TXEIE | USART_CR1_TE;
929 	val |= stm32_port->cr1_irq | USART_CR1_RE;
930 	val |= BIT(cfg->uart_enable_bit);
931 	if (stm32_port->fifoen)
932 		val |= USART_CR1_FIFOEN;
933 
934 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
935 					 isr, (isr & USART_SR_TC),
936 					 10, 100000);
937 
938 	/* Send the TC error message only when ISR_TC is not set */
939 	if (ret)
940 		dev_err(port->dev, "Transmission is not complete\n");
941 
942 	/* Disable RX DMA. */
943 	if (stm32_port->rx_ch)
944 		dmaengine_terminate_async(stm32_port->rx_ch);
945 
946 	/* flush RX & TX FIFO */
947 	if (ofs->rqr != UNDEF_REG)
948 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
949 			       port->membase + ofs->rqr);
950 
951 	stm32_usart_clr_bits(port, ofs->cr1, val);
952 
953 	free_irq(port->irq, port);
954 }
955 
956 static void stm32_usart_set_termios(struct uart_port *port,
957 				    struct ktermios *termios,
958 				    struct ktermios *old)
959 {
960 	struct stm32_port *stm32_port = to_stm32_port(port);
961 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
962 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
963 	struct serial_rs485 *rs485conf = &port->rs485;
964 	unsigned int baud, bits;
965 	u32 usartdiv, mantissa, fraction, oversampling;
966 	tcflag_t cflag = termios->c_cflag;
967 	u32 cr1, cr2, cr3, isr;
968 	unsigned long flags;
969 	int ret;
970 
971 	if (!stm32_port->hw_flow_control)
972 		cflag &= ~CRTSCTS;
973 
974 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
975 
976 	spin_lock_irqsave(&port->lock, flags);
977 
978 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
979 						isr,
980 						(isr & USART_SR_TC),
981 						10, 100000);
982 
983 	/* Send the TC error message only when ISR_TC is not set. */
984 	if (ret)
985 		dev_err(port->dev, "Transmission is not complete\n");
986 
987 	/* Stop serial port and reset value */
988 	writel_relaxed(0, port->membase + ofs->cr1);
989 
990 	/* flush RX & TX FIFO */
991 	if (ofs->rqr != UNDEF_REG)
992 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
993 			       port->membase + ofs->rqr);
994 
995 	cr1 = USART_CR1_TE | USART_CR1_RE;
996 	if (stm32_port->fifoen)
997 		cr1 |= USART_CR1_FIFOEN;
998 	cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
999 
1000 	/* Tx and RX FIFO configuration */
1001 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1002 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1003 	if (stm32_port->fifoen) {
1004 		if (stm32_port->txftcfg >= 0)
1005 			cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1006 		if (stm32_port->rxftcfg >= 0)
1007 			cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1008 	}
1009 
1010 	if (cflag & CSTOPB)
1011 		cr2 |= USART_CR2_STOP_2B;
1012 
1013 	bits = tty_get_char_size(cflag);
1014 	stm32_port->rdr_mask = (BIT(bits) - 1);
1015 
1016 	if (cflag & PARENB) {
1017 		bits++;
1018 		cr1 |= USART_CR1_PCE;
1019 	}
1020 
1021 	/*
1022 	 * Word length configuration:
1023 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1024 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1025 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1026 	 * M0 and M1 already cleared by cr1 initialization.
1027 	 */
1028 	if (bits == 9)
1029 		cr1 |= USART_CR1_M0;
1030 	else if ((bits == 7) && cfg->has_7bits_data)
1031 		cr1 |= USART_CR1_M1;
1032 	else if (bits != 8)
1033 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1034 			, bits);
1035 
1036 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1037 				       (stm32_port->fifoen &&
1038 					stm32_port->rxftcfg >= 0))) {
1039 		if (cflag & CSTOPB)
1040 			bits = bits + 3; /* 1 start bit + 2 stop bits */
1041 		else
1042 			bits = bits + 2; /* 1 start bit + 1 stop bit */
1043 
1044 		/* RX timeout irq to occur after last stop bit + bits */
1045 		stm32_port->cr1_irq = USART_CR1_RTOIE;
1046 		writel_relaxed(bits, port->membase + ofs->rtor);
1047 		cr2 |= USART_CR2_RTOEN;
1048 		/*
1049 		 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1050 		 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1051 		 */
1052 		stm32_port->cr3_irq =  USART_CR3_RXFTIE;
1053 	}
1054 
1055 	cr1 |= stm32_port->cr1_irq;
1056 	cr3 |= stm32_port->cr3_irq;
1057 
1058 	if (cflag & PARODD)
1059 		cr1 |= USART_CR1_PS;
1060 
1061 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1062 	if (cflag & CRTSCTS) {
1063 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1064 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1065 	}
1066 
1067 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1068 
1069 	/*
1070 	 * The USART supports 16 or 8 times oversampling.
1071 	 * By default we prefer 16 times oversampling, so that the receiver
1072 	 * has a better tolerance to clock deviations.
1073 	 * 8 times oversampling is only used to achieve higher speeds.
1074 	 */
1075 	if (usartdiv < 16) {
1076 		oversampling = 8;
1077 		cr1 |= USART_CR1_OVER8;
1078 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1079 	} else {
1080 		oversampling = 16;
1081 		cr1 &= ~USART_CR1_OVER8;
1082 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1083 	}
1084 
1085 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1086 	fraction = usartdiv % oversampling;
1087 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1088 
1089 	uart_update_timeout(port, cflag, baud);
1090 
1091 	port->read_status_mask = USART_SR_ORE;
1092 	if (termios->c_iflag & INPCK)
1093 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1094 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1095 		port->read_status_mask |= USART_SR_FE;
1096 
1097 	/* Characters to ignore */
1098 	port->ignore_status_mask = 0;
1099 	if (termios->c_iflag & IGNPAR)
1100 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1101 	if (termios->c_iflag & IGNBRK) {
1102 		port->ignore_status_mask |= USART_SR_FE;
1103 		/*
1104 		 * If we're ignoring parity and break indicators,
1105 		 * ignore overruns too (for real raw support).
1106 		 */
1107 		if (termios->c_iflag & IGNPAR)
1108 			port->ignore_status_mask |= USART_SR_ORE;
1109 	}
1110 
1111 	/* Ignore all characters if CREAD is not set */
1112 	if ((termios->c_cflag & CREAD) == 0)
1113 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
1114 
1115 	if (stm32_port->rx_ch) {
1116 		/*
1117 		 * Setup DMA to collect only valid data and enable error irqs.
1118 		 * This also enables break reception when using DMA.
1119 		 */
1120 		cr1 |= USART_CR1_PEIE;
1121 		cr3 |= USART_CR3_EIE;
1122 		cr3 |= USART_CR3_DMAR;
1123 		cr3 |= USART_CR3_DDRE;
1124 	}
1125 
1126 	if (rs485conf->flags & SER_RS485_ENABLED) {
1127 		stm32_usart_config_reg_rs485(&cr1, &cr3,
1128 					     rs485conf->delay_rts_before_send,
1129 					     rs485conf->delay_rts_after_send,
1130 					     baud);
1131 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1132 			cr3 &= ~USART_CR3_DEP;
1133 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1134 		} else {
1135 			cr3 |= USART_CR3_DEP;
1136 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1137 		}
1138 
1139 	} else {
1140 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1141 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1142 	}
1143 
1144 	/* Configure wake up from low power on start bit detection */
1145 	if (stm32_port->wakeup_src) {
1146 		cr3 &= ~USART_CR3_WUS_MASK;
1147 		cr3 |= USART_CR3_WUS_START_BIT;
1148 	}
1149 
1150 	writel_relaxed(cr3, port->membase + ofs->cr3);
1151 	writel_relaxed(cr2, port->membase + ofs->cr2);
1152 	writel_relaxed(cr1, port->membase + ofs->cr1);
1153 
1154 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1155 	spin_unlock_irqrestore(&port->lock, flags);
1156 
1157 	/* Handle modem control interrupts */
1158 	if (UART_ENABLE_MS(port, termios->c_cflag))
1159 		stm32_usart_enable_ms(port);
1160 	else
1161 		stm32_usart_disable_ms(port);
1162 }
1163 
1164 static const char *stm32_usart_type(struct uart_port *port)
1165 {
1166 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1167 }
1168 
1169 static void stm32_usart_release_port(struct uart_port *port)
1170 {
1171 }
1172 
1173 static int stm32_usart_request_port(struct uart_port *port)
1174 {
1175 	return 0;
1176 }
1177 
1178 static void stm32_usart_config_port(struct uart_port *port, int flags)
1179 {
1180 	if (flags & UART_CONFIG_TYPE)
1181 		port->type = PORT_STM32;
1182 }
1183 
1184 static int
1185 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1186 {
1187 	/* No user changeable parameters */
1188 	return -EINVAL;
1189 }
1190 
1191 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1192 			   unsigned int oldstate)
1193 {
1194 	struct stm32_port *stm32port = container_of(port,
1195 			struct stm32_port, port);
1196 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1197 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1198 	unsigned long flags;
1199 
1200 	switch (state) {
1201 	case UART_PM_STATE_ON:
1202 		pm_runtime_get_sync(port->dev);
1203 		break;
1204 	case UART_PM_STATE_OFF:
1205 		spin_lock_irqsave(&port->lock, flags);
1206 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1207 		spin_unlock_irqrestore(&port->lock, flags);
1208 		pm_runtime_put_sync(port->dev);
1209 		break;
1210 	}
1211 }
1212 
1213 static const struct uart_ops stm32_uart_ops = {
1214 	.tx_empty	= stm32_usart_tx_empty,
1215 	.set_mctrl	= stm32_usart_set_mctrl,
1216 	.get_mctrl	= stm32_usart_get_mctrl,
1217 	.stop_tx	= stm32_usart_stop_tx,
1218 	.start_tx	= stm32_usart_start_tx,
1219 	.throttle	= stm32_usart_throttle,
1220 	.unthrottle	= stm32_usart_unthrottle,
1221 	.stop_rx	= stm32_usart_stop_rx,
1222 	.enable_ms	= stm32_usart_enable_ms,
1223 	.break_ctl	= stm32_usart_break_ctl,
1224 	.startup	= stm32_usart_startup,
1225 	.shutdown	= stm32_usart_shutdown,
1226 	.flush_buffer	= stm32_usart_flush_buffer,
1227 	.set_termios	= stm32_usart_set_termios,
1228 	.pm		= stm32_usart_pm,
1229 	.type		= stm32_usart_type,
1230 	.release_port	= stm32_usart_release_port,
1231 	.request_port	= stm32_usart_request_port,
1232 	.config_port	= stm32_usart_config_port,
1233 	.verify_port	= stm32_usart_verify_port,
1234 };
1235 
1236 /*
1237  * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1238  * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1239  * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1240  * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1241  */
1242 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1243 
1244 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1245 				  int *ftcfg)
1246 {
1247 	u32 bytes, i;
1248 
1249 	/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1250 	if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1251 		bytes = 8;
1252 
1253 	for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1254 		if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1255 			break;
1256 	if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1257 		i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1258 
1259 	dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1260 		stm32h7_usart_fifo_thresh_cfg[i]);
1261 
1262 	/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1263 	if (i)
1264 		*ftcfg = i - 1;
1265 	else
1266 		*ftcfg = -EINVAL;
1267 }
1268 
1269 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1270 {
1271 	clk_disable_unprepare(stm32port->clk);
1272 }
1273 
1274 static int stm32_usart_init_port(struct stm32_port *stm32port,
1275 				 struct platform_device *pdev)
1276 {
1277 	struct uart_port *port = &stm32port->port;
1278 	struct resource *res;
1279 	int ret, irq;
1280 
1281 	irq = platform_get_irq(pdev, 0);
1282 	if (irq < 0)
1283 		return irq;
1284 
1285 	port->iotype	= UPIO_MEM;
1286 	port->flags	= UPF_BOOT_AUTOCONF;
1287 	port->ops	= &stm32_uart_ops;
1288 	port->dev	= &pdev->dev;
1289 	port->fifosize	= stm32port->info->cfg.fifosize;
1290 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1291 	port->irq = irq;
1292 	port->rs485_config = stm32_usart_config_rs485;
1293 
1294 	ret = stm32_usart_init_rs485(port, pdev);
1295 	if (ret)
1296 		return ret;
1297 
1298 	stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1299 		of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1300 
1301 	stm32port->swap = stm32port->info->cfg.has_swap &&
1302 		of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1303 
1304 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
1305 	if (stm32port->fifoen) {
1306 		stm32_usart_get_ftcfg(pdev, "rx-threshold",
1307 				      &stm32port->rxftcfg);
1308 		stm32_usart_get_ftcfg(pdev, "tx-threshold",
1309 				      &stm32port->txftcfg);
1310 	}
1311 
1312 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1313 	if (IS_ERR(port->membase))
1314 		return PTR_ERR(port->membase);
1315 	port->mapbase = res->start;
1316 
1317 	spin_lock_init(&port->lock);
1318 
1319 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1320 	if (IS_ERR(stm32port->clk))
1321 		return PTR_ERR(stm32port->clk);
1322 
1323 	/* Ensure that clk rate is correct by enabling the clk */
1324 	ret = clk_prepare_enable(stm32port->clk);
1325 	if (ret)
1326 		return ret;
1327 
1328 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1329 	if (!stm32port->port.uartclk) {
1330 		ret = -EINVAL;
1331 		goto err_clk;
1332 	}
1333 
1334 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1335 	if (IS_ERR(stm32port->gpios)) {
1336 		ret = PTR_ERR(stm32port->gpios);
1337 		goto err_clk;
1338 	}
1339 
1340 	/*
1341 	 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1342 	 * properties should not be specified.
1343 	 */
1344 	if (stm32port->hw_flow_control) {
1345 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1346 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1347 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1348 			ret = -EINVAL;
1349 			goto err_clk;
1350 		}
1351 	}
1352 
1353 	return ret;
1354 
1355 err_clk:
1356 	clk_disable_unprepare(stm32port->clk);
1357 
1358 	return ret;
1359 }
1360 
1361 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1362 {
1363 	struct device_node *np = pdev->dev.of_node;
1364 	int id;
1365 
1366 	if (!np)
1367 		return NULL;
1368 
1369 	id = of_alias_get_id(np, "serial");
1370 	if (id < 0) {
1371 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1372 		return NULL;
1373 	}
1374 
1375 	if (WARN_ON(id >= STM32_MAX_PORTS))
1376 		return NULL;
1377 
1378 	stm32_ports[id].hw_flow_control =
1379 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1380 		of_property_read_bool (np, "uart-has-rtscts");
1381 	stm32_ports[id].port.line = id;
1382 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1383 	stm32_ports[id].cr3_irq = 0;
1384 	stm32_ports[id].last_res = RX_BUF_L;
1385 	return &stm32_ports[id];
1386 }
1387 
1388 #ifdef CONFIG_OF
1389 static const struct of_device_id stm32_match[] = {
1390 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
1391 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1392 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1393 	{},
1394 };
1395 
1396 MODULE_DEVICE_TABLE(of, stm32_match);
1397 #endif
1398 
1399 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1400 					 struct platform_device *pdev)
1401 {
1402 	if (stm32port->rx_buf)
1403 		dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1404 				  stm32port->rx_dma_buf);
1405 }
1406 
1407 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1408 				       struct platform_device *pdev)
1409 {
1410 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1411 	struct uart_port *port = &stm32port->port;
1412 	struct device *dev = &pdev->dev;
1413 	struct dma_slave_config config;
1414 	int ret;
1415 
1416 	/*
1417 	 * Using DMA and threaded handler for the console could lead to
1418 	 * deadlocks.
1419 	 */
1420 	if (uart_console(port))
1421 		return -ENODEV;
1422 
1423 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1424 					       &stm32port->rx_dma_buf,
1425 					       GFP_KERNEL);
1426 	if (!stm32port->rx_buf)
1427 		return -ENOMEM;
1428 
1429 	/* Configure DMA channel */
1430 	memset(&config, 0, sizeof(config));
1431 	config.src_addr = port->mapbase + ofs->rdr;
1432 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1433 
1434 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1435 	if (ret < 0) {
1436 		dev_err(dev, "rx dma channel config failed\n");
1437 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1438 		return ret;
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1445 					 struct platform_device *pdev)
1446 {
1447 	if (stm32port->tx_buf)
1448 		dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1449 				  stm32port->tx_dma_buf);
1450 }
1451 
1452 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1453 				       struct platform_device *pdev)
1454 {
1455 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1456 	struct uart_port *port = &stm32port->port;
1457 	struct device *dev = &pdev->dev;
1458 	struct dma_slave_config config;
1459 	int ret;
1460 
1461 	stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1462 					       &stm32port->tx_dma_buf,
1463 					       GFP_KERNEL);
1464 	if (!stm32port->tx_buf)
1465 		return -ENOMEM;
1466 
1467 	/* Configure DMA channel */
1468 	memset(&config, 0, sizeof(config));
1469 	config.dst_addr = port->mapbase + ofs->tdr;
1470 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1471 
1472 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1473 	if (ret < 0) {
1474 		dev_err(dev, "tx dma channel config failed\n");
1475 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1476 		return ret;
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 static int stm32_usart_serial_probe(struct platform_device *pdev)
1483 {
1484 	struct stm32_port *stm32port;
1485 	int ret;
1486 
1487 	stm32port = stm32_usart_of_get_port(pdev);
1488 	if (!stm32port)
1489 		return -ENODEV;
1490 
1491 	stm32port->info = of_device_get_match_data(&pdev->dev);
1492 	if (!stm32port->info)
1493 		return -EINVAL;
1494 
1495 	ret = stm32_usart_init_port(stm32port, pdev);
1496 	if (ret)
1497 		return ret;
1498 
1499 	if (stm32port->wakeup_src) {
1500 		device_set_wakeup_capable(&pdev->dev, true);
1501 		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1502 		if (ret)
1503 			goto err_deinit_port;
1504 	}
1505 
1506 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1507 	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
1508 		ret = -EPROBE_DEFER;
1509 		goto err_wakeirq;
1510 	}
1511 	/* Fall back in interrupt mode for any non-deferral error */
1512 	if (IS_ERR(stm32port->rx_ch))
1513 		stm32port->rx_ch = NULL;
1514 
1515 	stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1516 	if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1517 		ret = -EPROBE_DEFER;
1518 		goto err_dma_rx;
1519 	}
1520 	/* Fall back in interrupt mode for any non-deferral error */
1521 	if (IS_ERR(stm32port->tx_ch))
1522 		stm32port->tx_ch = NULL;
1523 
1524 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1525 		/* Fall back in interrupt mode */
1526 		dma_release_channel(stm32port->rx_ch);
1527 		stm32port->rx_ch = NULL;
1528 	}
1529 
1530 	if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1531 		/* Fall back in interrupt mode */
1532 		dma_release_channel(stm32port->tx_ch);
1533 		stm32port->tx_ch = NULL;
1534 	}
1535 
1536 	if (!stm32port->rx_ch)
1537 		dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1538 	if (!stm32port->tx_ch)
1539 		dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1540 
1541 	platform_set_drvdata(pdev, &stm32port->port);
1542 
1543 	pm_runtime_get_noresume(&pdev->dev);
1544 	pm_runtime_set_active(&pdev->dev);
1545 	pm_runtime_enable(&pdev->dev);
1546 
1547 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1548 	if (ret)
1549 		goto err_port;
1550 
1551 	pm_runtime_put_sync(&pdev->dev);
1552 
1553 	return 0;
1554 
1555 err_port:
1556 	pm_runtime_disable(&pdev->dev);
1557 	pm_runtime_set_suspended(&pdev->dev);
1558 	pm_runtime_put_noidle(&pdev->dev);
1559 
1560 	if (stm32port->tx_ch) {
1561 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1562 		dma_release_channel(stm32port->tx_ch);
1563 	}
1564 
1565 	if (stm32port->rx_ch)
1566 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1567 
1568 err_dma_rx:
1569 	if (stm32port->rx_ch)
1570 		dma_release_channel(stm32port->rx_ch);
1571 
1572 err_wakeirq:
1573 	if (stm32port->wakeup_src)
1574 		dev_pm_clear_wake_irq(&pdev->dev);
1575 
1576 err_deinit_port:
1577 	if (stm32port->wakeup_src)
1578 		device_set_wakeup_capable(&pdev->dev, false);
1579 
1580 	stm32_usart_deinit_port(stm32port);
1581 
1582 	return ret;
1583 }
1584 
1585 static int stm32_usart_serial_remove(struct platform_device *pdev)
1586 {
1587 	struct uart_port *port = platform_get_drvdata(pdev);
1588 	struct stm32_port *stm32_port = to_stm32_port(port);
1589 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1590 	int err;
1591 	u32 cr3;
1592 
1593 	pm_runtime_get_sync(&pdev->dev);
1594 	err = uart_remove_one_port(&stm32_usart_driver, port);
1595 	if (err)
1596 		return(err);
1597 
1598 	pm_runtime_disable(&pdev->dev);
1599 	pm_runtime_set_suspended(&pdev->dev);
1600 	pm_runtime_put_noidle(&pdev->dev);
1601 
1602 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1603 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1604 	cr3 &= ~USART_CR3_EIE;
1605 	cr3 &= ~USART_CR3_DMAR;
1606 	cr3 &= ~USART_CR3_DDRE;
1607 	writel_relaxed(cr3, port->membase + ofs->cr3);
1608 
1609 	if (stm32_port->tx_ch) {
1610 		stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1611 		dma_release_channel(stm32_port->tx_ch);
1612 	}
1613 
1614 	if (stm32_port->rx_ch) {
1615 		stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1616 		dma_release_channel(stm32_port->rx_ch);
1617 	}
1618 
1619 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1620 
1621 	if (stm32_port->wakeup_src) {
1622 		dev_pm_clear_wake_irq(&pdev->dev);
1623 		device_init_wakeup(&pdev->dev, false);
1624 	}
1625 
1626 	stm32_usart_deinit_port(stm32_port);
1627 
1628 	return 0;
1629 }
1630 
1631 #ifdef CONFIG_SERIAL_STM32_CONSOLE
1632 static void stm32_usart_console_putchar(struct uart_port *port, int ch)
1633 {
1634 	struct stm32_port *stm32_port = to_stm32_port(port);
1635 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1636 
1637 	while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
1638 		cpu_relax();
1639 
1640 	writel_relaxed(ch, port->membase + ofs->tdr);
1641 }
1642 
1643 static void stm32_usart_console_write(struct console *co, const char *s,
1644 				      unsigned int cnt)
1645 {
1646 	struct uart_port *port = &stm32_ports[co->index].port;
1647 	struct stm32_port *stm32_port = to_stm32_port(port);
1648 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1649 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1650 	unsigned long flags;
1651 	u32 old_cr1, new_cr1;
1652 	int locked = 1;
1653 
1654 	if (oops_in_progress)
1655 		locked = spin_trylock_irqsave(&port->lock, flags);
1656 	else
1657 		spin_lock_irqsave(&port->lock, flags);
1658 
1659 	/* Save and disable interrupts, enable the transmitter */
1660 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1661 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1662 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
1663 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
1664 
1665 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1666 
1667 	/* Restore interrupt state */
1668 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1669 
1670 	if (locked)
1671 		spin_unlock_irqrestore(&port->lock, flags);
1672 }
1673 
1674 static int stm32_usart_console_setup(struct console *co, char *options)
1675 {
1676 	struct stm32_port *stm32port;
1677 	int baud = 9600;
1678 	int bits = 8;
1679 	int parity = 'n';
1680 	int flow = 'n';
1681 
1682 	if (co->index >= STM32_MAX_PORTS)
1683 		return -ENODEV;
1684 
1685 	stm32port = &stm32_ports[co->index];
1686 
1687 	/*
1688 	 * This driver does not support early console initialization
1689 	 * (use ARM early printk support instead), so we only expect
1690 	 * this to be called during the uart port registration when the
1691 	 * driver gets probed and the port should be mapped at that point.
1692 	 */
1693 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1694 		return -ENXIO;
1695 
1696 	if (options)
1697 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1698 
1699 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1700 }
1701 
1702 static struct console stm32_console = {
1703 	.name		= STM32_SERIAL_NAME,
1704 	.device		= uart_console_device,
1705 	.write		= stm32_usart_console_write,
1706 	.setup		= stm32_usart_console_setup,
1707 	.flags		= CON_PRINTBUFFER,
1708 	.index		= -1,
1709 	.data		= &stm32_usart_driver,
1710 };
1711 
1712 #define STM32_SERIAL_CONSOLE (&stm32_console)
1713 
1714 #else
1715 #define STM32_SERIAL_CONSOLE NULL
1716 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1717 
1718 static struct uart_driver stm32_usart_driver = {
1719 	.driver_name	= DRIVER_NAME,
1720 	.dev_name	= STM32_SERIAL_NAME,
1721 	.major		= 0,
1722 	.minor		= 0,
1723 	.nr		= STM32_MAX_PORTS,
1724 	.cons		= STM32_SERIAL_CONSOLE,
1725 };
1726 
1727 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
1728 						       bool enable)
1729 {
1730 	struct stm32_port *stm32_port = to_stm32_port(port);
1731 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1732 	struct tty_port *tport = &port->state->port;
1733 	int ret;
1734 	unsigned int size;
1735 	unsigned long flags;
1736 
1737 	if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
1738 		return 0;
1739 
1740 	/*
1741 	 * Enable low-power wake-up and wake-up irq if argument is set to
1742 	 * "enable", disable low-power wake-up and wake-up irq otherwise
1743 	 */
1744 	if (enable) {
1745 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
1746 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
1747 
1748 		/*
1749 		 * When DMA is used for reception, it must be disabled before
1750 		 * entering low-power mode and re-enabled when exiting from
1751 		 * low-power mode.
1752 		 */
1753 		if (stm32_port->rx_ch) {
1754 			spin_lock_irqsave(&port->lock, flags);
1755 			/* Avoid race with RX IRQ when DMAR is cleared */
1756 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
1757 			/* Poll data from DMA RX buffer if any */
1758 			size = stm32_usart_receive_chars(port, true);
1759 			dmaengine_terminate_async(stm32_port->rx_ch);
1760 			uart_unlock_and_check_sysrq_irqrestore(port, flags);
1761 			if (size)
1762 				tty_flip_buffer_push(tport);
1763 		}
1764 
1765 		/* Poll data from RX FIFO if any */
1766 		stm32_usart_receive_chars(port, false);
1767 	} else {
1768 		if (stm32_port->rx_ch) {
1769 			ret = stm32_usart_start_rx_dma_cyclic(port);
1770 			if (ret)
1771 				return ret;
1772 		}
1773 
1774 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
1775 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
1782 {
1783 	struct uart_port *port = dev_get_drvdata(dev);
1784 	int ret;
1785 
1786 	uart_suspend_port(&stm32_usart_driver, port);
1787 
1788 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1789 		ret = stm32_usart_serial_en_wakeup(port, true);
1790 		if (ret)
1791 			return ret;
1792 	}
1793 
1794 	/*
1795 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
1796 	 * and rely on bootloader stage to restore this state upon resume.
1797 	 * Otherwise, apply the idle or sleep states depending on wakeup
1798 	 * capabilities.
1799 	 */
1800 	if (console_suspend_enabled || !uart_console(port)) {
1801 		if (device_may_wakeup(dev) || device_wakeup_path(dev))
1802 			pinctrl_pm_select_idle_state(dev);
1803 		else
1804 			pinctrl_pm_select_sleep_state(dev);
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
1811 {
1812 	struct uart_port *port = dev_get_drvdata(dev);
1813 	int ret;
1814 
1815 	pinctrl_pm_select_default_state(dev);
1816 
1817 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
1818 		ret = stm32_usart_serial_en_wakeup(port, false);
1819 		if (ret)
1820 			return ret;
1821 	}
1822 
1823 	return uart_resume_port(&stm32_usart_driver, port);
1824 }
1825 
1826 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
1827 {
1828 	struct uart_port *port = dev_get_drvdata(dev);
1829 	struct stm32_port *stm32port = container_of(port,
1830 			struct stm32_port, port);
1831 
1832 	clk_disable_unprepare(stm32port->clk);
1833 
1834 	return 0;
1835 }
1836 
1837 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
1838 {
1839 	struct uart_port *port = dev_get_drvdata(dev);
1840 	struct stm32_port *stm32port = container_of(port,
1841 			struct stm32_port, port);
1842 
1843 	return clk_prepare_enable(stm32port->clk);
1844 }
1845 
1846 static const struct dev_pm_ops stm32_serial_pm_ops = {
1847 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
1848 			   stm32_usart_runtime_resume, NULL)
1849 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
1850 				stm32_usart_serial_resume)
1851 };
1852 
1853 static struct platform_driver stm32_serial_driver = {
1854 	.probe		= stm32_usart_serial_probe,
1855 	.remove		= stm32_usart_serial_remove,
1856 	.driver	= {
1857 		.name	= DRIVER_NAME,
1858 		.pm	= &stm32_serial_pm_ops,
1859 		.of_match_table = of_match_ptr(stm32_match),
1860 	},
1861 };
1862 
1863 static int __init stm32_usart_init(void)
1864 {
1865 	static char banner[] __initdata = "STM32 USART driver initialized";
1866 	int ret;
1867 
1868 	pr_info("%s\n", banner);
1869 
1870 	ret = uart_register_driver(&stm32_usart_driver);
1871 	if (ret)
1872 		return ret;
1873 
1874 	ret = platform_driver_register(&stm32_serial_driver);
1875 	if (ret)
1876 		uart_unregister_driver(&stm32_usart_driver);
1877 
1878 	return ret;
1879 }
1880 
1881 static void __exit stm32_usart_exit(void)
1882 {
1883 	platform_driver_unregister(&stm32_serial_driver);
1884 	uart_unregister_driver(&stm32_usart_driver);
1885 }
1886 
1887 module_init(stm32_usart_init);
1888 module_exit(stm32_usart_exit);
1889 
1890 MODULE_ALIAS("platform:" DRIVER_NAME);
1891 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
1892 MODULE_LICENSE("GPL v2");
1893