xref: /openbmc/linux/drivers/tty/serial/stm32-usart.c (revision a34a9f1a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  *	     Gerald Baeza <gerald.baeza@foss.st.com>
7  *	     Erwan Le Ray <erwan.leray@foss.st.com>
8  *
9  * Inspired by st-asc.c from STMicroelectronics (c)
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34 
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37 
38 
39 /* Register offsets */
40 static struct stm32_usart_info __maybe_unused stm32f4_info = {
41 	.ofs = {
42 		.isr	= 0x00,
43 		.rdr	= 0x04,
44 		.tdr	= 0x04,
45 		.brr	= 0x08,
46 		.cr1	= 0x0c,
47 		.cr2	= 0x10,
48 		.cr3	= 0x14,
49 		.gtpr	= 0x18,
50 		.rtor	= UNDEF_REG,
51 		.rqr	= UNDEF_REG,
52 		.icr	= UNDEF_REG,
53 	},
54 	.cfg = {
55 		.uart_enable_bit = 13,
56 		.has_7bits_data = false,
57 		.fifosize = 1,
58 	}
59 };
60 
61 static struct stm32_usart_info __maybe_unused stm32f7_info = {
62 	.ofs = {
63 		.cr1	= 0x00,
64 		.cr2	= 0x04,
65 		.cr3	= 0x08,
66 		.brr	= 0x0c,
67 		.gtpr	= 0x10,
68 		.rtor	= 0x14,
69 		.rqr	= 0x18,
70 		.isr	= 0x1c,
71 		.icr	= 0x20,
72 		.rdr	= 0x24,
73 		.tdr	= 0x28,
74 	},
75 	.cfg = {
76 		.uart_enable_bit = 0,
77 		.has_7bits_data = true,
78 		.has_swap = true,
79 		.fifosize = 1,
80 	}
81 };
82 
83 static struct stm32_usart_info __maybe_unused stm32h7_info = {
84 	.ofs = {
85 		.cr1	= 0x00,
86 		.cr2	= 0x04,
87 		.cr3	= 0x08,
88 		.brr	= 0x0c,
89 		.gtpr	= 0x10,
90 		.rtor	= 0x14,
91 		.rqr	= 0x18,
92 		.isr	= 0x1c,
93 		.icr	= 0x20,
94 		.rdr	= 0x24,
95 		.tdr	= 0x28,
96 	},
97 	.cfg = {
98 		.uart_enable_bit = 0,
99 		.has_7bits_data = true,
100 		.has_swap = true,
101 		.has_wakeup = true,
102 		.has_fifo = true,
103 		.fifosize = 16,
104 	}
105 };
106 
107 static void stm32_usart_stop_tx(struct uart_port *port);
108 static void stm32_usart_transmit_chars(struct uart_port *port);
109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
110 
111 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
112 {
113 	return container_of(port, struct stm32_port, port);
114 }
115 
116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
117 {
118 	u32 val;
119 
120 	val = readl_relaxed(port->membase + reg);
121 	val |= bits;
122 	writel_relaxed(val, port->membase + reg);
123 }
124 
125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
126 {
127 	u32 val;
128 
129 	val = readl_relaxed(port->membase + reg);
130 	val &= ~bits;
131 	writel_relaxed(val, port->membase + reg);
132 }
133 
134 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
135 {
136 	struct stm32_port *stm32_port = to_stm32_port(port);
137 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
138 
139 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
140 		return TIOCSER_TEMT;
141 
142 	return 0;
143 }
144 
145 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
146 {
147 	struct stm32_port *stm32_port = to_stm32_port(port);
148 	struct serial_rs485 *rs485conf = &port->rs485;
149 
150 	if (stm32_port->hw_flow_control ||
151 	    !(rs485conf->flags & SER_RS485_ENABLED))
152 		return;
153 
154 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
155 		mctrl_gpio_set(stm32_port->gpios,
156 			       stm32_port->port.mctrl | TIOCM_RTS);
157 	} else {
158 		mctrl_gpio_set(stm32_port->gpios,
159 			       stm32_port->port.mctrl & ~TIOCM_RTS);
160 	}
161 }
162 
163 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
164 {
165 	struct stm32_port *stm32_port = to_stm32_port(port);
166 	struct serial_rs485 *rs485conf = &port->rs485;
167 
168 	if (stm32_port->hw_flow_control ||
169 	    !(rs485conf->flags & SER_RS485_ENABLED))
170 		return;
171 
172 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
173 		mctrl_gpio_set(stm32_port->gpios,
174 			       stm32_port->port.mctrl & ~TIOCM_RTS);
175 	} else {
176 		mctrl_gpio_set(stm32_port->gpios,
177 			       stm32_port->port.mctrl | TIOCM_RTS);
178 	}
179 }
180 
181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
182 					 u32 delay_DDE, u32 baud)
183 {
184 	u32 rs485_deat_dedt;
185 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
186 	bool over8;
187 
188 	*cr3 |= USART_CR3_DEM;
189 	over8 = *cr1 & USART_CR1_OVER8;
190 
191 	*cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
192 
193 	if (over8)
194 		rs485_deat_dedt = delay_ADE * baud * 8;
195 	else
196 		rs485_deat_dedt = delay_ADE * baud * 16;
197 
198 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
199 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
200 			  rs485_deat_dedt_max : rs485_deat_dedt;
201 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
202 			   USART_CR1_DEAT_MASK;
203 	*cr1 |= rs485_deat_dedt;
204 
205 	if (over8)
206 		rs485_deat_dedt = delay_DDE * baud * 8;
207 	else
208 		rs485_deat_dedt = delay_DDE * baud * 16;
209 
210 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
211 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
212 			  rs485_deat_dedt_max : rs485_deat_dedt;
213 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
214 			   USART_CR1_DEDT_MASK;
215 	*cr1 |= rs485_deat_dedt;
216 }
217 
218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
219 				    struct serial_rs485 *rs485conf)
220 {
221 	struct stm32_port *stm32_port = to_stm32_port(port);
222 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
223 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
224 	u32 usartdiv, baud, cr1, cr3;
225 	bool over8;
226 
227 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
228 
229 	if (port->rs485_rx_during_tx_gpio)
230 		gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
231 					 !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
232 	else
233 		rs485conf->flags |= SER_RS485_RX_DURING_TX;
234 
235 	if (rs485conf->flags & SER_RS485_ENABLED) {
236 		cr1 = readl_relaxed(port->membase + ofs->cr1);
237 		cr3 = readl_relaxed(port->membase + ofs->cr3);
238 		usartdiv = readl_relaxed(port->membase + ofs->brr);
239 		usartdiv = usartdiv & GENMASK(15, 0);
240 		over8 = cr1 & USART_CR1_OVER8;
241 
242 		if (over8)
243 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
244 				   << USART_BRR_04_R_SHIFT;
245 
246 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
247 		stm32_usart_config_reg_rs485(&cr1, &cr3,
248 					     rs485conf->delay_rts_before_send,
249 					     rs485conf->delay_rts_after_send,
250 					     baud);
251 
252 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
253 			cr3 &= ~USART_CR3_DEP;
254 		else
255 			cr3 |= USART_CR3_DEP;
256 
257 		writel_relaxed(cr3, port->membase + ofs->cr3);
258 		writel_relaxed(cr1, port->membase + ofs->cr1);
259 	} else {
260 		stm32_usart_clr_bits(port, ofs->cr3,
261 				     USART_CR3_DEM | USART_CR3_DEP);
262 		stm32_usart_clr_bits(port, ofs->cr1,
263 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
264 	}
265 
266 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
267 
268 	/* Adjust RTS polarity in case it's driven in software */
269 	if (stm32_usart_tx_empty(port))
270 		stm32_usart_rs485_rts_disable(port);
271 	else
272 		stm32_usart_rs485_rts_enable(port);
273 
274 	return 0;
275 }
276 
277 static int stm32_usart_init_rs485(struct uart_port *port,
278 				  struct platform_device *pdev)
279 {
280 	struct serial_rs485 *rs485conf = &port->rs485;
281 
282 	rs485conf->flags = 0;
283 	rs485conf->delay_rts_before_send = 0;
284 	rs485conf->delay_rts_after_send = 0;
285 
286 	if (!pdev->dev.of_node)
287 		return -ENODEV;
288 
289 	return uart_get_rs485_mode(port);
290 }
291 
292 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
293 {
294 	return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
295 }
296 
297 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
298 {
299 	dmaengine_terminate_async(stm32_port->rx_ch);
300 	stm32_port->rx_dma_busy = false;
301 }
302 
303 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
304 					struct dma_chan *chan,
305 					enum dma_status expected_status,
306 					int dmaengine_pause_or_resume(struct dma_chan *),
307 					bool stm32_usart_xx_dma_started(struct stm32_port *),
308 					void stm32_usart_xx_dma_terminate(struct stm32_port *))
309 {
310 	struct uart_port *port = &stm32_port->port;
311 	enum dma_status dma_status;
312 	int ret;
313 
314 	if (!stm32_usart_xx_dma_started(stm32_port))
315 		return -EPERM;
316 
317 	dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
318 	if (dma_status != expected_status)
319 		return -EAGAIN;
320 
321 	ret = dmaengine_pause_or_resume(chan);
322 	if (ret) {
323 		dev_err(port->dev, "DMA failed with error code: %d\n", ret);
324 		stm32_usart_xx_dma_terminate(stm32_port);
325 	}
326 	return ret;
327 }
328 
329 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
330 {
331 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
332 					    DMA_IN_PROGRESS, dmaengine_pause,
333 					    stm32_usart_rx_dma_started,
334 					    stm32_usart_rx_dma_terminate);
335 }
336 
337 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
338 {
339 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
340 					    DMA_PAUSED, dmaengine_resume,
341 					    stm32_usart_rx_dma_started,
342 					    stm32_usart_rx_dma_terminate);
343 }
344 
345 /* Return true when data is pending (in pio mode), and false when no data is pending. */
346 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
347 {
348 	struct stm32_port *stm32_port = to_stm32_port(port);
349 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
350 
351 	*sr = readl_relaxed(port->membase + ofs->isr);
352 	/* Get pending characters in RDR or FIFO */
353 	if (*sr & USART_SR_RXNE) {
354 		/* Get all pending characters from the RDR or the FIFO when using interrupts */
355 		if (!stm32_usart_rx_dma_started(stm32_port))
356 			return true;
357 
358 		/* Handle only RX data errors when using DMA */
359 		if (*sr & USART_SR_ERR_MASK)
360 			return true;
361 	}
362 
363 	return false;
364 }
365 
366 static u8 stm32_usart_get_char_pio(struct uart_port *port)
367 {
368 	struct stm32_port *stm32_port = to_stm32_port(port);
369 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
370 	unsigned long c;
371 
372 	c = readl_relaxed(port->membase + ofs->rdr);
373 	/* Apply RDR data mask */
374 	c &= stm32_port->rdr_mask;
375 
376 	return c;
377 }
378 
379 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
380 {
381 	struct stm32_port *stm32_port = to_stm32_port(port);
382 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
383 	unsigned int size = 0;
384 	u32 sr;
385 	u8 c, flag;
386 
387 	while (stm32_usart_pending_rx_pio(port, &sr)) {
388 		sr |= USART_SR_DUMMY_RX;
389 		flag = TTY_NORMAL;
390 
391 		/*
392 		 * Status bits has to be cleared before reading the RDR:
393 		 * In FIFO mode, reading the RDR will pop the next data
394 		 * (if any) along with its status bits into the SR.
395 		 * Not doing so leads to misalignement between RDR and SR,
396 		 * and clear status bits of the next rx data.
397 		 *
398 		 * Clear errors flags for stm32f7 and stm32h7 compatible
399 		 * devices. On stm32f4 compatible devices, the error bit is
400 		 * cleared by the sequence [read SR - read DR].
401 		 */
402 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
403 			writel_relaxed(sr & USART_SR_ERR_MASK,
404 				       port->membase + ofs->icr);
405 
406 		c = stm32_usart_get_char_pio(port);
407 		port->icount.rx++;
408 		size++;
409 		if (sr & USART_SR_ERR_MASK) {
410 			if (sr & USART_SR_ORE) {
411 				port->icount.overrun++;
412 			} else if (sr & USART_SR_PE) {
413 				port->icount.parity++;
414 			} else if (sr & USART_SR_FE) {
415 				/* Break detection if character is null */
416 				if (!c) {
417 					port->icount.brk++;
418 					if (uart_handle_break(port))
419 						continue;
420 				} else {
421 					port->icount.frame++;
422 				}
423 			}
424 
425 			sr &= port->read_status_mask;
426 
427 			if (sr & USART_SR_PE) {
428 				flag = TTY_PARITY;
429 			} else if (sr & USART_SR_FE) {
430 				if (!c)
431 					flag = TTY_BREAK;
432 				else
433 					flag = TTY_FRAME;
434 			}
435 		}
436 
437 		if (uart_prepare_sysrq_char(port, c))
438 			continue;
439 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
440 	}
441 
442 	return size;
443 }
444 
445 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
446 {
447 	struct stm32_port *stm32_port = to_stm32_port(port);
448 	struct tty_port *ttyport = &stm32_port->port.state->port;
449 	unsigned char *dma_start;
450 	int dma_count, i;
451 
452 	dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
453 
454 	/*
455 	 * Apply rdr_mask on buffer in order to mask parity bit.
456 	 * This loop is useless in cs8 mode because DMA copies only
457 	 * 8 bits and already ignores parity bit.
458 	 */
459 	if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
460 		for (i = 0; i < dma_size; i++)
461 			*(dma_start + i) &= stm32_port->rdr_mask;
462 
463 	dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
464 	port->icount.rx += dma_count;
465 	if (dma_count != dma_size)
466 		port->icount.buf_overrun++;
467 	stm32_port->last_res -= dma_count;
468 	if (stm32_port->last_res == 0)
469 		stm32_port->last_res = RX_BUF_L;
470 }
471 
472 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
473 {
474 	struct stm32_port *stm32_port = to_stm32_port(port);
475 	unsigned int dma_size, size = 0;
476 
477 	/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
478 	if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
479 		/* Conditional first part: from last_res to end of DMA buffer */
480 		dma_size = stm32_port->last_res;
481 		stm32_usart_push_buffer_dma(port, dma_size);
482 		size = dma_size;
483 	}
484 
485 	dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
486 	stm32_usart_push_buffer_dma(port, dma_size);
487 	size += dma_size;
488 
489 	return size;
490 }
491 
492 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
493 {
494 	struct stm32_port *stm32_port = to_stm32_port(port);
495 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
496 	enum dma_status rx_dma_status;
497 	u32 sr;
498 	unsigned int size = 0;
499 
500 	if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
501 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
502 						    stm32_port->rx_ch->cookie,
503 						    &stm32_port->rx_dma_state);
504 		if (rx_dma_status == DMA_IN_PROGRESS ||
505 		    rx_dma_status == DMA_PAUSED) {
506 			/* Empty DMA buffer */
507 			size = stm32_usart_receive_chars_dma(port);
508 			sr = readl_relaxed(port->membase + ofs->isr);
509 			if (sr & USART_SR_ERR_MASK) {
510 				/* Disable DMA request line */
511 				stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
512 
513 				/* Switch to PIO mode to handle the errors */
514 				size += stm32_usart_receive_chars_pio(port);
515 
516 				/* Switch back to DMA mode */
517 				stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
518 			}
519 		} else {
520 			/* Disable RX DMA */
521 			stm32_usart_rx_dma_terminate(stm32_port);
522 			/* Fall back to interrupt mode */
523 			dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
524 			size = stm32_usart_receive_chars_pio(port);
525 		}
526 	} else {
527 		size = stm32_usart_receive_chars_pio(port);
528 	}
529 
530 	return size;
531 }
532 
533 static void stm32_usart_rx_dma_complete(void *arg)
534 {
535 	struct uart_port *port = arg;
536 	struct tty_port *tport = &port->state->port;
537 	unsigned int size;
538 	unsigned long flags;
539 
540 	spin_lock_irqsave(&port->lock, flags);
541 	size = stm32_usart_receive_chars(port, false);
542 	uart_unlock_and_check_sysrq_irqrestore(port, flags);
543 	if (size)
544 		tty_flip_buffer_push(tport);
545 }
546 
547 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
548 {
549 	struct stm32_port *stm32_port = to_stm32_port(port);
550 	struct dma_async_tx_descriptor *desc;
551 	enum dma_status rx_dma_status;
552 	int ret;
553 
554 	if (stm32_port->throttled)
555 		return 0;
556 
557 	if (stm32_port->rx_dma_busy) {
558 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
559 						    stm32_port->rx_ch->cookie,
560 						    NULL);
561 		if (rx_dma_status == DMA_IN_PROGRESS)
562 			return 0;
563 
564 		if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
565 			return 0;
566 
567 		dev_err(port->dev, "DMA failed : status error.\n");
568 		stm32_usart_rx_dma_terminate(stm32_port);
569 	}
570 
571 	stm32_port->rx_dma_busy = true;
572 
573 	stm32_port->last_res = RX_BUF_L;
574 	/* Prepare a DMA cyclic transaction */
575 	desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
576 					 stm32_port->rx_dma_buf,
577 					 RX_BUF_L, RX_BUF_P,
578 					 DMA_DEV_TO_MEM,
579 					 DMA_PREP_INTERRUPT);
580 	if (!desc) {
581 		dev_err(port->dev, "rx dma prep cyclic failed\n");
582 		stm32_port->rx_dma_busy = false;
583 		return -ENODEV;
584 	}
585 
586 	desc->callback = stm32_usart_rx_dma_complete;
587 	desc->callback_param = port;
588 
589 	/* Push current DMA transaction in the pending queue */
590 	ret = dma_submit_error(dmaengine_submit(desc));
591 	if (ret) {
592 		dmaengine_terminate_sync(stm32_port->rx_ch);
593 		stm32_port->rx_dma_busy = false;
594 		return ret;
595 	}
596 
597 	/* Issue pending DMA requests */
598 	dma_async_issue_pending(stm32_port->rx_ch);
599 
600 	return 0;
601 }
602 
603 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
604 {
605 	dmaengine_terminate_async(stm32_port->tx_ch);
606 	stm32_port->tx_dma_busy = false;
607 }
608 
609 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
610 {
611 	/*
612 	 * We cannot use the function "dmaengine_tx_status" to know the
613 	 * status of DMA. This function does not show if the "dma complete"
614 	 * callback of the DMA transaction has been called. So we prefer
615 	 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
616 	 * same time.
617 	 */
618 	return stm32_port->tx_dma_busy;
619 }
620 
621 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
622 {
623 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
624 					    DMA_IN_PROGRESS, dmaengine_pause,
625 					    stm32_usart_tx_dma_started,
626 					    stm32_usart_tx_dma_terminate);
627 }
628 
629 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
630 {
631 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
632 					    DMA_PAUSED, dmaengine_resume,
633 					    stm32_usart_tx_dma_started,
634 					    stm32_usart_tx_dma_terminate);
635 }
636 
637 static void stm32_usart_tx_dma_complete(void *arg)
638 {
639 	struct uart_port *port = arg;
640 	struct stm32_port *stm32port = to_stm32_port(port);
641 	unsigned long flags;
642 
643 	stm32_usart_tx_dma_terminate(stm32port);
644 
645 	/* Let's see if we have pending data to send */
646 	spin_lock_irqsave(&port->lock, flags);
647 	stm32_usart_transmit_chars(port);
648 	spin_unlock_irqrestore(&port->lock, flags);
649 }
650 
651 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
652 {
653 	struct stm32_port *stm32_port = to_stm32_port(port);
654 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
655 
656 	/*
657 	 * Enables TX FIFO threashold irq when FIFO is enabled,
658 	 * or TX empty irq when FIFO is disabled
659 	 */
660 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
661 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
662 	else
663 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
664 }
665 
666 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
667 {
668 	struct stm32_port *stm32_port = to_stm32_port(port);
669 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
670 
671 	stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
672 }
673 
674 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
675 {
676 	struct stm32_port *stm32_port = to_stm32_port(port);
677 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
678 
679 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
680 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
681 	else
682 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
683 }
684 
685 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
686 {
687 	struct stm32_port *stm32_port = to_stm32_port(port);
688 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
689 
690 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
691 }
692 
693 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
694 {
695 	struct stm32_port *stm32_port = to_stm32_port(port);
696 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
697 	struct circ_buf *xmit = &port->state->xmit;
698 
699 	while (!uart_circ_empty(xmit)) {
700 		/* Check that TDR is empty before filling FIFO */
701 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
702 			break;
703 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
704 		uart_xmit_advance(port, 1);
705 	}
706 
707 	/* rely on TXE irq (mask or unmask) for sending remaining data */
708 	if (uart_circ_empty(xmit))
709 		stm32_usart_tx_interrupt_disable(port);
710 	else
711 		stm32_usart_tx_interrupt_enable(port);
712 }
713 
714 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
715 {
716 	struct stm32_port *stm32port = to_stm32_port(port);
717 	struct circ_buf *xmit = &port->state->xmit;
718 	struct dma_async_tx_descriptor *desc = NULL;
719 	unsigned int count;
720 	int ret;
721 
722 	if (stm32_usart_tx_dma_started(stm32port)) {
723 		ret = stm32_usart_tx_dma_resume(stm32port);
724 		if (ret < 0 && ret != -EAGAIN)
725 			goto fallback_err;
726 		return;
727 	}
728 
729 	count = uart_circ_chars_pending(xmit);
730 
731 	if (count > TX_BUF_L)
732 		count = TX_BUF_L;
733 
734 	if (xmit->tail < xmit->head) {
735 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
736 	} else {
737 		size_t one = UART_XMIT_SIZE - xmit->tail;
738 		size_t two;
739 
740 		if (one > count)
741 			one = count;
742 		two = count - one;
743 
744 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
745 		if (two)
746 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
747 	}
748 
749 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
750 					   stm32port->tx_dma_buf,
751 					   count,
752 					   DMA_MEM_TO_DEV,
753 					   DMA_PREP_INTERRUPT);
754 
755 	if (!desc)
756 		goto fallback_err;
757 
758 	/*
759 	 * Set "tx_dma_busy" flag. This flag will be released when
760 	 * dmaengine_terminate_async will be called. This flag helps
761 	 * transmit_chars_dma not to start another DMA transaction
762 	 * if the callback of the previous is not yet called.
763 	 */
764 	stm32port->tx_dma_busy = true;
765 
766 	desc->callback = stm32_usart_tx_dma_complete;
767 	desc->callback_param = port;
768 
769 	/* Push current DMA TX transaction in the pending queue */
770 	/* DMA no yet started, safe to free resources */
771 	ret = dma_submit_error(dmaengine_submit(desc));
772 	if (ret) {
773 		dev_err(port->dev, "DMA failed with error code: %d\n", ret);
774 		stm32_usart_tx_dma_terminate(stm32port);
775 		goto fallback_err;
776 	}
777 
778 	/* Issue pending DMA TX requests */
779 	dma_async_issue_pending(stm32port->tx_ch);
780 
781 	uart_xmit_advance(port, count);
782 
783 	return;
784 
785 fallback_err:
786 	stm32_usart_transmit_chars_pio(port);
787 }
788 
789 static void stm32_usart_transmit_chars(struct uart_port *port)
790 {
791 	struct stm32_port *stm32_port = to_stm32_port(port);
792 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
793 	struct circ_buf *xmit = &port->state->xmit;
794 	u32 isr;
795 	int ret;
796 
797 	if (!stm32_port->hw_flow_control &&
798 	    port->rs485.flags & SER_RS485_ENABLED &&
799 	    (port->x_char ||
800 	     !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
801 		stm32_usart_tc_interrupt_disable(port);
802 		stm32_usart_rs485_rts_enable(port);
803 	}
804 
805 	if (port->x_char) {
806 		/* dma terminate may have been called in case of dma pause failure */
807 		stm32_usart_tx_dma_pause(stm32_port);
808 
809 		/* Check that TDR is empty before filling FIFO */
810 		ret =
811 		readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
812 						  isr,
813 						  (isr & USART_SR_TXE),
814 						  10, 1000);
815 		if (ret)
816 			dev_warn(port->dev, "1 character may be erased\n");
817 
818 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
819 		port->x_char = 0;
820 		port->icount.tx++;
821 
822 		/* dma terminate may have been called in case of dma resume failure */
823 		stm32_usart_tx_dma_resume(stm32_port);
824 		return;
825 	}
826 
827 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
828 		stm32_usart_tx_interrupt_disable(port);
829 		return;
830 	}
831 
832 	if (ofs->icr == UNDEF_REG)
833 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
834 	else
835 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
836 
837 	if (stm32_port->tx_ch)
838 		stm32_usart_transmit_chars_dma(port);
839 	else
840 		stm32_usart_transmit_chars_pio(port);
841 
842 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
843 		uart_write_wakeup(port);
844 
845 	if (uart_circ_empty(xmit)) {
846 		stm32_usart_tx_interrupt_disable(port);
847 		if (!stm32_port->hw_flow_control &&
848 		    port->rs485.flags & SER_RS485_ENABLED) {
849 			stm32_usart_tc_interrupt_enable(port);
850 		}
851 	}
852 }
853 
854 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
855 {
856 	struct uart_port *port = ptr;
857 	struct tty_port *tport = &port->state->port;
858 	struct stm32_port *stm32_port = to_stm32_port(port);
859 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
860 	u32 sr;
861 	unsigned int size;
862 
863 	sr = readl_relaxed(port->membase + ofs->isr);
864 
865 	if (!stm32_port->hw_flow_control &&
866 	    port->rs485.flags & SER_RS485_ENABLED &&
867 	    (sr & USART_SR_TC)) {
868 		stm32_usart_tc_interrupt_disable(port);
869 		stm32_usart_rs485_rts_disable(port);
870 	}
871 
872 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
873 		writel_relaxed(USART_ICR_RTOCF,
874 			       port->membase + ofs->icr);
875 
876 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
877 		/* Clear wake up flag and disable wake up interrupt */
878 		writel_relaxed(USART_ICR_WUCF,
879 			       port->membase + ofs->icr);
880 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
881 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
882 			pm_wakeup_event(tport->tty->dev, 0);
883 	}
884 
885 	/*
886 	 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
887 	 * line has been masked by HW and rx data are stacking in FIFO.
888 	 */
889 	if (!stm32_port->throttled) {
890 		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
891 		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
892 			spin_lock(&port->lock);
893 			size = stm32_usart_receive_chars(port, false);
894 			uart_unlock_and_check_sysrq(port);
895 			if (size)
896 				tty_flip_buffer_push(tport);
897 		}
898 	}
899 
900 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
901 		spin_lock(&port->lock);
902 		stm32_usart_transmit_chars(port);
903 		spin_unlock(&port->lock);
904 	}
905 
906 	/* Receiver timeout irq for DMA RX */
907 	if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
908 		spin_lock(&port->lock);
909 		size = stm32_usart_receive_chars(port, false);
910 		uart_unlock_and_check_sysrq(port);
911 		if (size)
912 			tty_flip_buffer_push(tport);
913 	}
914 
915 	return IRQ_HANDLED;
916 }
917 
918 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
919 {
920 	struct stm32_port *stm32_port = to_stm32_port(port);
921 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
922 
923 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
924 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
925 	else
926 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
927 
928 	mctrl_gpio_set(stm32_port->gpios, mctrl);
929 }
930 
931 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
932 {
933 	struct stm32_port *stm32_port = to_stm32_port(port);
934 	unsigned int ret;
935 
936 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
937 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
938 
939 	return mctrl_gpio_get(stm32_port->gpios, &ret);
940 }
941 
942 static void stm32_usart_enable_ms(struct uart_port *port)
943 {
944 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
945 }
946 
947 static void stm32_usart_disable_ms(struct uart_port *port)
948 {
949 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
950 }
951 
952 /* Transmit stop */
953 static void stm32_usart_stop_tx(struct uart_port *port)
954 {
955 	struct stm32_port *stm32_port = to_stm32_port(port);
956 
957 	stm32_usart_tx_interrupt_disable(port);
958 
959 	/* dma terminate may have been called in case of dma pause failure */
960 	stm32_usart_tx_dma_pause(stm32_port);
961 
962 	stm32_usart_rs485_rts_disable(port);
963 }
964 
965 /* There are probably characters waiting to be transmitted. */
966 static void stm32_usart_start_tx(struct uart_port *port)
967 {
968 	struct circ_buf *xmit = &port->state->xmit;
969 
970 	if (uart_circ_empty(xmit) && !port->x_char) {
971 		stm32_usart_rs485_rts_disable(port);
972 		return;
973 	}
974 
975 	stm32_usart_rs485_rts_enable(port);
976 
977 	stm32_usart_transmit_chars(port);
978 }
979 
980 /* Flush the transmit buffer. */
981 static void stm32_usart_flush_buffer(struct uart_port *port)
982 {
983 	struct stm32_port *stm32_port = to_stm32_port(port);
984 
985 	if (stm32_port->tx_ch)
986 		stm32_usart_tx_dma_terminate(stm32_port);
987 }
988 
989 /* Throttle the remote when input buffer is about to overflow. */
990 static void stm32_usart_throttle(struct uart_port *port)
991 {
992 	struct stm32_port *stm32_port = to_stm32_port(port);
993 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
994 	unsigned long flags;
995 
996 	spin_lock_irqsave(&port->lock, flags);
997 
998 	/*
999 	 * Pause DMA transfer, so the RX data gets queued into the FIFO.
1000 	 * Hardware flow control is triggered when RX FIFO is full.
1001 	 */
1002 	stm32_usart_rx_dma_pause(stm32_port);
1003 
1004 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1005 	if (stm32_port->cr3_irq)
1006 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1007 
1008 	stm32_port->throttled = true;
1009 	spin_unlock_irqrestore(&port->lock, flags);
1010 }
1011 
1012 /* Unthrottle the remote, the input buffer can now accept data. */
1013 static void stm32_usart_unthrottle(struct uart_port *port)
1014 {
1015 	struct stm32_port *stm32_port = to_stm32_port(port);
1016 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1017 	unsigned long flags;
1018 
1019 	spin_lock_irqsave(&port->lock, flags);
1020 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1021 	if (stm32_port->cr3_irq)
1022 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1023 
1024 	stm32_port->throttled = false;
1025 
1026 	/*
1027 	 * Switch back to DMA mode (resume DMA).
1028 	 * Hardware flow control is stopped when FIFO is not full any more.
1029 	 */
1030 	if (stm32_port->rx_ch)
1031 		stm32_usart_rx_dma_start_or_resume(port);
1032 
1033 	spin_unlock_irqrestore(&port->lock, flags);
1034 }
1035 
1036 /* Receive stop */
1037 static void stm32_usart_stop_rx(struct uart_port *port)
1038 {
1039 	struct stm32_port *stm32_port = to_stm32_port(port);
1040 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1041 
1042 	/* Disable DMA request line. */
1043 	stm32_usart_rx_dma_pause(stm32_port);
1044 
1045 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1046 	if (stm32_port->cr3_irq)
1047 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1048 }
1049 
1050 /* Handle breaks - ignored by us */
1051 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1052 {
1053 }
1054 
1055 static int stm32_usart_startup(struct uart_port *port)
1056 {
1057 	struct stm32_port *stm32_port = to_stm32_port(port);
1058 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1059 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1060 	const char *name = to_platform_device(port->dev)->name;
1061 	u32 val;
1062 	int ret;
1063 
1064 	ret = request_irq(port->irq, stm32_usart_interrupt,
1065 			  IRQF_NO_SUSPEND, name, port);
1066 	if (ret)
1067 		return ret;
1068 
1069 	if (stm32_port->swap) {
1070 		val = readl_relaxed(port->membase + ofs->cr2);
1071 		val |= USART_CR2_SWAP;
1072 		writel_relaxed(val, port->membase + ofs->cr2);
1073 	}
1074 
1075 	/* RX FIFO Flush */
1076 	if (ofs->rqr != UNDEF_REG)
1077 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1078 
1079 	if (stm32_port->rx_ch) {
1080 		ret = stm32_usart_rx_dma_start_or_resume(port);
1081 		if (ret) {
1082 			free_irq(port->irq, port);
1083 			return ret;
1084 		}
1085 	}
1086 
1087 	/* RX enabling */
1088 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1089 	stm32_usart_set_bits(port, ofs->cr1, val);
1090 
1091 	return 0;
1092 }
1093 
1094 static void stm32_usart_shutdown(struct uart_port *port)
1095 {
1096 	struct stm32_port *stm32_port = to_stm32_port(port);
1097 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1098 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1099 	u32 val, isr;
1100 	int ret;
1101 
1102 	if (stm32_usart_tx_dma_started(stm32_port))
1103 		stm32_usart_tx_dma_terminate(stm32_port);
1104 
1105 	if (stm32_port->tx_ch)
1106 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1107 
1108 	/* Disable modem control interrupts */
1109 	stm32_usart_disable_ms(port);
1110 
1111 	val = USART_CR1_TXEIE | USART_CR1_TE;
1112 	val |= stm32_port->cr1_irq | USART_CR1_RE;
1113 	val |= BIT(cfg->uart_enable_bit);
1114 	if (stm32_port->fifoen)
1115 		val |= USART_CR1_FIFOEN;
1116 
1117 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1118 					 isr, (isr & USART_SR_TC),
1119 					 10, 100000);
1120 
1121 	/* Send the TC error message only when ISR_TC is not set */
1122 	if (ret)
1123 		dev_err(port->dev, "Transmission is not complete\n");
1124 
1125 	/* Disable RX DMA. */
1126 	if (stm32_port->rx_ch) {
1127 		stm32_usart_rx_dma_terminate(stm32_port);
1128 		dmaengine_synchronize(stm32_port->rx_ch);
1129 	}
1130 
1131 	/* flush RX & TX FIFO */
1132 	if (ofs->rqr != UNDEF_REG)
1133 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1134 			       port->membase + ofs->rqr);
1135 
1136 	stm32_usart_clr_bits(port, ofs->cr1, val);
1137 
1138 	free_irq(port->irq, port);
1139 }
1140 
1141 static void stm32_usart_set_termios(struct uart_port *port,
1142 				    struct ktermios *termios,
1143 				    const struct ktermios *old)
1144 {
1145 	struct stm32_port *stm32_port = to_stm32_port(port);
1146 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1147 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1148 	struct serial_rs485 *rs485conf = &port->rs485;
1149 	unsigned int baud, bits;
1150 	u32 usartdiv, mantissa, fraction, oversampling;
1151 	tcflag_t cflag = termios->c_cflag;
1152 	u32 cr1, cr2, cr3, isr;
1153 	unsigned long flags;
1154 	int ret;
1155 
1156 	if (!stm32_port->hw_flow_control)
1157 		cflag &= ~CRTSCTS;
1158 
1159 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1160 
1161 	spin_lock_irqsave(&port->lock, flags);
1162 
1163 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1164 						isr,
1165 						(isr & USART_SR_TC),
1166 						10, 100000);
1167 
1168 	/* Send the TC error message only when ISR_TC is not set. */
1169 	if (ret)
1170 		dev_err(port->dev, "Transmission is not complete\n");
1171 
1172 	/* Stop serial port and reset value */
1173 	writel_relaxed(0, port->membase + ofs->cr1);
1174 
1175 	/* flush RX & TX FIFO */
1176 	if (ofs->rqr != UNDEF_REG)
1177 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1178 			       port->membase + ofs->rqr);
1179 
1180 	cr1 = USART_CR1_TE | USART_CR1_RE;
1181 	if (stm32_port->fifoen)
1182 		cr1 |= USART_CR1_FIFOEN;
1183 	cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1184 
1185 	/* Tx and RX FIFO configuration */
1186 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1187 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1188 	if (stm32_port->fifoen) {
1189 		if (stm32_port->txftcfg >= 0)
1190 			cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1191 		if (stm32_port->rxftcfg >= 0)
1192 			cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1193 	}
1194 
1195 	if (cflag & CSTOPB)
1196 		cr2 |= USART_CR2_STOP_2B;
1197 
1198 	bits = tty_get_char_size(cflag);
1199 	stm32_port->rdr_mask = (BIT(bits) - 1);
1200 
1201 	if (cflag & PARENB) {
1202 		bits++;
1203 		cr1 |= USART_CR1_PCE;
1204 	}
1205 
1206 	/*
1207 	 * Word length configuration:
1208 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1209 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1210 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1211 	 * M0 and M1 already cleared by cr1 initialization.
1212 	 */
1213 	if (bits == 9) {
1214 		cr1 |= USART_CR1_M0;
1215 	} else if ((bits == 7) && cfg->has_7bits_data) {
1216 		cr1 |= USART_CR1_M1;
1217 	} else if (bits != 8) {
1218 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1219 			, bits);
1220 		cflag &= ~CSIZE;
1221 		cflag |= CS8;
1222 		termios->c_cflag = cflag;
1223 		bits = 8;
1224 		if (cflag & PARENB) {
1225 			bits++;
1226 			cr1 |= USART_CR1_M0;
1227 		}
1228 	}
1229 
1230 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1231 				       (stm32_port->fifoen &&
1232 					stm32_port->rxftcfg >= 0))) {
1233 		if (cflag & CSTOPB)
1234 			bits = bits + 3; /* 1 start bit + 2 stop bits */
1235 		else
1236 			bits = bits + 2; /* 1 start bit + 1 stop bit */
1237 
1238 		/* RX timeout irq to occur after last stop bit + bits */
1239 		stm32_port->cr1_irq = USART_CR1_RTOIE;
1240 		writel_relaxed(bits, port->membase + ofs->rtor);
1241 		cr2 |= USART_CR2_RTOEN;
1242 		/*
1243 		 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1244 		 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1245 		 */
1246 		stm32_port->cr3_irq =  USART_CR3_RXFTIE;
1247 	}
1248 
1249 	cr1 |= stm32_port->cr1_irq;
1250 	cr3 |= stm32_port->cr3_irq;
1251 
1252 	if (cflag & PARODD)
1253 		cr1 |= USART_CR1_PS;
1254 
1255 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1256 	if (cflag & CRTSCTS) {
1257 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1258 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1259 	}
1260 
1261 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1262 
1263 	/*
1264 	 * The USART supports 16 or 8 times oversampling.
1265 	 * By default we prefer 16 times oversampling, so that the receiver
1266 	 * has a better tolerance to clock deviations.
1267 	 * 8 times oversampling is only used to achieve higher speeds.
1268 	 */
1269 	if (usartdiv < 16) {
1270 		oversampling = 8;
1271 		cr1 |= USART_CR1_OVER8;
1272 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1273 	} else {
1274 		oversampling = 16;
1275 		cr1 &= ~USART_CR1_OVER8;
1276 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1277 	}
1278 
1279 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1280 	fraction = usartdiv % oversampling;
1281 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1282 
1283 	uart_update_timeout(port, cflag, baud);
1284 
1285 	port->read_status_mask = USART_SR_ORE;
1286 	if (termios->c_iflag & INPCK)
1287 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1288 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1289 		port->read_status_mask |= USART_SR_FE;
1290 
1291 	/* Characters to ignore */
1292 	port->ignore_status_mask = 0;
1293 	if (termios->c_iflag & IGNPAR)
1294 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1295 	if (termios->c_iflag & IGNBRK) {
1296 		port->ignore_status_mask |= USART_SR_FE;
1297 		/*
1298 		 * If we're ignoring parity and break indicators,
1299 		 * ignore overruns too (for real raw support).
1300 		 */
1301 		if (termios->c_iflag & IGNPAR)
1302 			port->ignore_status_mask |= USART_SR_ORE;
1303 	}
1304 
1305 	/* Ignore all characters if CREAD is not set */
1306 	if ((termios->c_cflag & CREAD) == 0)
1307 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
1308 
1309 	if (stm32_port->rx_ch) {
1310 		/*
1311 		 * Setup DMA to collect only valid data and enable error irqs.
1312 		 * This also enables break reception when using DMA.
1313 		 */
1314 		cr1 |= USART_CR1_PEIE;
1315 		cr3 |= USART_CR3_EIE;
1316 		cr3 |= USART_CR3_DMAR;
1317 		cr3 |= USART_CR3_DDRE;
1318 	}
1319 
1320 	if (stm32_port->tx_ch)
1321 		cr3 |= USART_CR3_DMAT;
1322 
1323 	if (rs485conf->flags & SER_RS485_ENABLED) {
1324 		stm32_usart_config_reg_rs485(&cr1, &cr3,
1325 					     rs485conf->delay_rts_before_send,
1326 					     rs485conf->delay_rts_after_send,
1327 					     baud);
1328 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1329 			cr3 &= ~USART_CR3_DEP;
1330 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1331 		} else {
1332 			cr3 |= USART_CR3_DEP;
1333 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1334 		}
1335 
1336 	} else {
1337 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1338 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1339 	}
1340 
1341 	/* Configure wake up from low power on start bit detection */
1342 	if (stm32_port->wakeup_src) {
1343 		cr3 &= ~USART_CR3_WUS_MASK;
1344 		cr3 |= USART_CR3_WUS_START_BIT;
1345 	}
1346 
1347 	writel_relaxed(cr3, port->membase + ofs->cr3);
1348 	writel_relaxed(cr2, port->membase + ofs->cr2);
1349 	writel_relaxed(cr1, port->membase + ofs->cr1);
1350 
1351 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1352 	spin_unlock_irqrestore(&port->lock, flags);
1353 
1354 	/* Handle modem control interrupts */
1355 	if (UART_ENABLE_MS(port, termios->c_cflag))
1356 		stm32_usart_enable_ms(port);
1357 	else
1358 		stm32_usart_disable_ms(port);
1359 }
1360 
1361 static const char *stm32_usart_type(struct uart_port *port)
1362 {
1363 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1364 }
1365 
1366 static void stm32_usart_release_port(struct uart_port *port)
1367 {
1368 }
1369 
1370 static int stm32_usart_request_port(struct uart_port *port)
1371 {
1372 	return 0;
1373 }
1374 
1375 static void stm32_usart_config_port(struct uart_port *port, int flags)
1376 {
1377 	if (flags & UART_CONFIG_TYPE)
1378 		port->type = PORT_STM32;
1379 }
1380 
1381 static int
1382 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1383 {
1384 	/* No user changeable parameters */
1385 	return -EINVAL;
1386 }
1387 
1388 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1389 			   unsigned int oldstate)
1390 {
1391 	struct stm32_port *stm32port = container_of(port,
1392 			struct stm32_port, port);
1393 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1394 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1395 	unsigned long flags;
1396 
1397 	switch (state) {
1398 	case UART_PM_STATE_ON:
1399 		pm_runtime_get_sync(port->dev);
1400 		break;
1401 	case UART_PM_STATE_OFF:
1402 		spin_lock_irqsave(&port->lock, flags);
1403 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1404 		spin_unlock_irqrestore(&port->lock, flags);
1405 		pm_runtime_put_sync(port->dev);
1406 		break;
1407 	}
1408 }
1409 
1410 #if defined(CONFIG_CONSOLE_POLL)
1411 
1412  /* Callbacks for characters polling in debug context (i.e. KGDB). */
1413 static int stm32_usart_poll_init(struct uart_port *port)
1414 {
1415 	struct stm32_port *stm32_port = to_stm32_port(port);
1416 
1417 	return clk_prepare_enable(stm32_port->clk);
1418 }
1419 
1420 static int stm32_usart_poll_get_char(struct uart_port *port)
1421 {
1422 	struct stm32_port *stm32_port = to_stm32_port(port);
1423 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1424 
1425 	if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1426 		return NO_POLL_CHAR;
1427 
1428 	return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1429 }
1430 
1431 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1432 {
1433 	stm32_usart_console_putchar(port, ch);
1434 }
1435 #endif /* CONFIG_CONSOLE_POLL */
1436 
1437 static const struct uart_ops stm32_uart_ops = {
1438 	.tx_empty	= stm32_usart_tx_empty,
1439 	.set_mctrl	= stm32_usart_set_mctrl,
1440 	.get_mctrl	= stm32_usart_get_mctrl,
1441 	.stop_tx	= stm32_usart_stop_tx,
1442 	.start_tx	= stm32_usart_start_tx,
1443 	.throttle	= stm32_usart_throttle,
1444 	.unthrottle	= stm32_usart_unthrottle,
1445 	.stop_rx	= stm32_usart_stop_rx,
1446 	.enable_ms	= stm32_usart_enable_ms,
1447 	.break_ctl	= stm32_usart_break_ctl,
1448 	.startup	= stm32_usart_startup,
1449 	.shutdown	= stm32_usart_shutdown,
1450 	.flush_buffer	= stm32_usart_flush_buffer,
1451 	.set_termios	= stm32_usart_set_termios,
1452 	.pm		= stm32_usart_pm,
1453 	.type		= stm32_usart_type,
1454 	.release_port	= stm32_usart_release_port,
1455 	.request_port	= stm32_usart_request_port,
1456 	.config_port	= stm32_usart_config_port,
1457 	.verify_port	= stm32_usart_verify_port,
1458 #if defined(CONFIG_CONSOLE_POLL)
1459 	.poll_init      = stm32_usart_poll_init,
1460 	.poll_get_char	= stm32_usart_poll_get_char,
1461 	.poll_put_char	= stm32_usart_poll_put_char,
1462 #endif /* CONFIG_CONSOLE_POLL */
1463 };
1464 
1465 /*
1466  * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1467  * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1468  * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1469  * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1470  */
1471 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1472 
1473 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1474 				  int *ftcfg)
1475 {
1476 	u32 bytes, i;
1477 
1478 	/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1479 	if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1480 		bytes = 8;
1481 
1482 	for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1483 		if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1484 			break;
1485 	if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1486 		i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1487 
1488 	dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1489 		stm32h7_usart_fifo_thresh_cfg[i]);
1490 
1491 	/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1492 	if (i)
1493 		*ftcfg = i - 1;
1494 	else
1495 		*ftcfg = -EINVAL;
1496 }
1497 
1498 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1499 {
1500 	clk_disable_unprepare(stm32port->clk);
1501 }
1502 
1503 static const struct serial_rs485 stm32_rs485_supported = {
1504 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1505 		 SER_RS485_RX_DURING_TX,
1506 	.delay_rts_before_send = 1,
1507 	.delay_rts_after_send = 1,
1508 };
1509 
1510 static int stm32_usart_init_port(struct stm32_port *stm32port,
1511 				 struct platform_device *pdev)
1512 {
1513 	struct uart_port *port = &stm32port->port;
1514 	struct resource *res;
1515 	int ret, irq;
1516 
1517 	irq = platform_get_irq(pdev, 0);
1518 	if (irq < 0)
1519 		return irq;
1520 
1521 	port->iotype	= UPIO_MEM;
1522 	port->flags	= UPF_BOOT_AUTOCONF;
1523 	port->ops	= &stm32_uart_ops;
1524 	port->dev	= &pdev->dev;
1525 	port->fifosize	= stm32port->info->cfg.fifosize;
1526 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1527 	port->irq = irq;
1528 	port->rs485_config = stm32_usart_config_rs485;
1529 	port->rs485_supported = stm32_rs485_supported;
1530 
1531 	ret = stm32_usart_init_rs485(port, pdev);
1532 	if (ret)
1533 		return ret;
1534 
1535 	stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1536 		of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1537 
1538 	stm32port->swap = stm32port->info->cfg.has_swap &&
1539 		of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1540 
1541 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
1542 	if (stm32port->fifoen) {
1543 		stm32_usart_get_ftcfg(pdev, "rx-threshold",
1544 				      &stm32port->rxftcfg);
1545 		stm32_usart_get_ftcfg(pdev, "tx-threshold",
1546 				      &stm32port->txftcfg);
1547 	}
1548 
1549 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1550 	if (IS_ERR(port->membase))
1551 		return PTR_ERR(port->membase);
1552 	port->mapbase = res->start;
1553 
1554 	spin_lock_init(&port->lock);
1555 
1556 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1557 	if (IS_ERR(stm32port->clk))
1558 		return PTR_ERR(stm32port->clk);
1559 
1560 	/* Ensure that clk rate is correct by enabling the clk */
1561 	ret = clk_prepare_enable(stm32port->clk);
1562 	if (ret)
1563 		return ret;
1564 
1565 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1566 	if (!stm32port->port.uartclk) {
1567 		ret = -EINVAL;
1568 		goto err_clk;
1569 	}
1570 
1571 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1572 	if (IS_ERR(stm32port->gpios)) {
1573 		ret = PTR_ERR(stm32port->gpios);
1574 		goto err_clk;
1575 	}
1576 
1577 	/*
1578 	 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1579 	 * properties should not be specified.
1580 	 */
1581 	if (stm32port->hw_flow_control) {
1582 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1583 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1584 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1585 			ret = -EINVAL;
1586 			goto err_clk;
1587 		}
1588 	}
1589 
1590 	return ret;
1591 
1592 err_clk:
1593 	clk_disable_unprepare(stm32port->clk);
1594 
1595 	return ret;
1596 }
1597 
1598 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1599 {
1600 	struct device_node *np = pdev->dev.of_node;
1601 	int id;
1602 
1603 	if (!np)
1604 		return NULL;
1605 
1606 	id = of_alias_get_id(np, "serial");
1607 	if (id < 0) {
1608 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1609 		return NULL;
1610 	}
1611 
1612 	if (WARN_ON(id >= STM32_MAX_PORTS))
1613 		return NULL;
1614 
1615 	stm32_ports[id].hw_flow_control =
1616 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1617 		of_property_read_bool (np, "uart-has-rtscts");
1618 	stm32_ports[id].port.line = id;
1619 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1620 	stm32_ports[id].cr3_irq = 0;
1621 	stm32_ports[id].last_res = RX_BUF_L;
1622 	return &stm32_ports[id];
1623 }
1624 
1625 #ifdef CONFIG_OF
1626 static const struct of_device_id stm32_match[] = {
1627 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
1628 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1629 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1630 	{},
1631 };
1632 
1633 MODULE_DEVICE_TABLE(of, stm32_match);
1634 #endif
1635 
1636 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1637 					 struct platform_device *pdev)
1638 {
1639 	if (stm32port->rx_buf)
1640 		dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1641 				  stm32port->rx_dma_buf);
1642 }
1643 
1644 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1645 				       struct platform_device *pdev)
1646 {
1647 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1648 	struct uart_port *port = &stm32port->port;
1649 	struct device *dev = &pdev->dev;
1650 	struct dma_slave_config config;
1651 	int ret;
1652 
1653 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1654 					       &stm32port->rx_dma_buf,
1655 					       GFP_KERNEL);
1656 	if (!stm32port->rx_buf)
1657 		return -ENOMEM;
1658 
1659 	/* Configure DMA channel */
1660 	memset(&config, 0, sizeof(config));
1661 	config.src_addr = port->mapbase + ofs->rdr;
1662 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1663 
1664 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1665 	if (ret < 0) {
1666 		dev_err(dev, "rx dma channel config failed\n");
1667 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1668 		return ret;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1675 					 struct platform_device *pdev)
1676 {
1677 	if (stm32port->tx_buf)
1678 		dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1679 				  stm32port->tx_dma_buf);
1680 }
1681 
1682 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1683 				       struct platform_device *pdev)
1684 {
1685 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1686 	struct uart_port *port = &stm32port->port;
1687 	struct device *dev = &pdev->dev;
1688 	struct dma_slave_config config;
1689 	int ret;
1690 
1691 	stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1692 					       &stm32port->tx_dma_buf,
1693 					       GFP_KERNEL);
1694 	if (!stm32port->tx_buf)
1695 		return -ENOMEM;
1696 
1697 	/* Configure DMA channel */
1698 	memset(&config, 0, sizeof(config));
1699 	config.dst_addr = port->mapbase + ofs->tdr;
1700 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1701 
1702 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1703 	if (ret < 0) {
1704 		dev_err(dev, "tx dma channel config failed\n");
1705 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1706 		return ret;
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 static int stm32_usart_serial_probe(struct platform_device *pdev)
1713 {
1714 	struct stm32_port *stm32port;
1715 	int ret;
1716 
1717 	stm32port = stm32_usart_of_get_port(pdev);
1718 	if (!stm32port)
1719 		return -ENODEV;
1720 
1721 	stm32port->info = of_device_get_match_data(&pdev->dev);
1722 	if (!stm32port->info)
1723 		return -EINVAL;
1724 
1725 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1726 	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1727 		return -EPROBE_DEFER;
1728 
1729 	/* Fall back in interrupt mode for any non-deferral error */
1730 	if (IS_ERR(stm32port->rx_ch))
1731 		stm32port->rx_ch = NULL;
1732 
1733 	stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1734 	if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1735 		ret = -EPROBE_DEFER;
1736 		goto err_dma_rx;
1737 	}
1738 	/* Fall back in interrupt mode for any non-deferral error */
1739 	if (IS_ERR(stm32port->tx_ch))
1740 		stm32port->tx_ch = NULL;
1741 
1742 	ret = stm32_usart_init_port(stm32port, pdev);
1743 	if (ret)
1744 		goto err_dma_tx;
1745 
1746 	if (stm32port->wakeup_src) {
1747 		device_set_wakeup_capable(&pdev->dev, true);
1748 		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1749 		if (ret)
1750 			goto err_deinit_port;
1751 	}
1752 
1753 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1754 		/* Fall back in interrupt mode */
1755 		dma_release_channel(stm32port->rx_ch);
1756 		stm32port->rx_ch = NULL;
1757 	}
1758 
1759 	if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1760 		/* Fall back in interrupt mode */
1761 		dma_release_channel(stm32port->tx_ch);
1762 		stm32port->tx_ch = NULL;
1763 	}
1764 
1765 	if (!stm32port->rx_ch)
1766 		dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1767 	if (!stm32port->tx_ch)
1768 		dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1769 
1770 	platform_set_drvdata(pdev, &stm32port->port);
1771 
1772 	pm_runtime_get_noresume(&pdev->dev);
1773 	pm_runtime_set_active(&pdev->dev);
1774 	pm_runtime_enable(&pdev->dev);
1775 
1776 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1777 	if (ret)
1778 		goto err_port;
1779 
1780 	pm_runtime_put_sync(&pdev->dev);
1781 
1782 	return 0;
1783 
1784 err_port:
1785 	pm_runtime_disable(&pdev->dev);
1786 	pm_runtime_set_suspended(&pdev->dev);
1787 	pm_runtime_put_noidle(&pdev->dev);
1788 
1789 	if (stm32port->tx_ch)
1790 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1791 	if (stm32port->rx_ch)
1792 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1793 
1794 	if (stm32port->wakeup_src)
1795 		dev_pm_clear_wake_irq(&pdev->dev);
1796 
1797 err_deinit_port:
1798 	if (stm32port->wakeup_src)
1799 		device_set_wakeup_capable(&pdev->dev, false);
1800 
1801 	stm32_usart_deinit_port(stm32port);
1802 
1803 err_dma_tx:
1804 	if (stm32port->tx_ch)
1805 		dma_release_channel(stm32port->tx_ch);
1806 
1807 err_dma_rx:
1808 	if (stm32port->rx_ch)
1809 		dma_release_channel(stm32port->rx_ch);
1810 
1811 	return ret;
1812 }
1813 
1814 static int stm32_usart_serial_remove(struct platform_device *pdev)
1815 {
1816 	struct uart_port *port = platform_get_drvdata(pdev);
1817 	struct stm32_port *stm32_port = to_stm32_port(port);
1818 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1819 	u32 cr3;
1820 
1821 	pm_runtime_get_sync(&pdev->dev);
1822 	uart_remove_one_port(&stm32_usart_driver, port);
1823 
1824 	pm_runtime_disable(&pdev->dev);
1825 	pm_runtime_set_suspended(&pdev->dev);
1826 	pm_runtime_put_noidle(&pdev->dev);
1827 
1828 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1829 
1830 	if (stm32_port->tx_ch) {
1831 		stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1832 		dma_release_channel(stm32_port->tx_ch);
1833 	}
1834 
1835 	if (stm32_port->rx_ch) {
1836 		stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1837 		dma_release_channel(stm32_port->rx_ch);
1838 	}
1839 
1840 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1841 	cr3 &= ~USART_CR3_EIE;
1842 	cr3 &= ~USART_CR3_DMAR;
1843 	cr3 &= ~USART_CR3_DMAT;
1844 	cr3 &= ~USART_CR3_DDRE;
1845 	writel_relaxed(cr3, port->membase + ofs->cr3);
1846 
1847 	if (stm32_port->wakeup_src) {
1848 		dev_pm_clear_wake_irq(&pdev->dev);
1849 		device_init_wakeup(&pdev->dev, false);
1850 	}
1851 
1852 	stm32_usart_deinit_port(stm32_port);
1853 
1854 	return 0;
1855 }
1856 
1857 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1858 {
1859 	struct stm32_port *stm32_port = to_stm32_port(port);
1860 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1861 	u32 isr;
1862 	int ret;
1863 
1864 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1865 						(isr & USART_SR_TXE), 100,
1866 						STM32_USART_TIMEOUT_USEC);
1867 	if (ret != 0) {
1868 		dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1869 		return;
1870 	}
1871 	writel_relaxed(ch, port->membase + ofs->tdr);
1872 }
1873 
1874 #ifdef CONFIG_SERIAL_STM32_CONSOLE
1875 static void stm32_usart_console_write(struct console *co, const char *s,
1876 				      unsigned int cnt)
1877 {
1878 	struct uart_port *port = &stm32_ports[co->index].port;
1879 	struct stm32_port *stm32_port = to_stm32_port(port);
1880 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1881 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1882 	unsigned long flags;
1883 	u32 old_cr1, new_cr1;
1884 	int locked = 1;
1885 
1886 	if (oops_in_progress)
1887 		locked = spin_trylock_irqsave(&port->lock, flags);
1888 	else
1889 		spin_lock_irqsave(&port->lock, flags);
1890 
1891 	/* Save and disable interrupts, enable the transmitter */
1892 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1893 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1894 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
1895 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
1896 
1897 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1898 
1899 	/* Restore interrupt state */
1900 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1901 
1902 	if (locked)
1903 		spin_unlock_irqrestore(&port->lock, flags);
1904 }
1905 
1906 static int stm32_usart_console_setup(struct console *co, char *options)
1907 {
1908 	struct stm32_port *stm32port;
1909 	int baud = 9600;
1910 	int bits = 8;
1911 	int parity = 'n';
1912 	int flow = 'n';
1913 
1914 	if (co->index >= STM32_MAX_PORTS)
1915 		return -ENODEV;
1916 
1917 	stm32port = &stm32_ports[co->index];
1918 
1919 	/*
1920 	 * This driver does not support early console initialization
1921 	 * (use ARM early printk support instead), so we only expect
1922 	 * this to be called during the uart port registration when the
1923 	 * driver gets probed and the port should be mapped at that point.
1924 	 */
1925 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1926 		return -ENXIO;
1927 
1928 	if (options)
1929 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1930 
1931 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1932 }
1933 
1934 static struct console stm32_console = {
1935 	.name		= STM32_SERIAL_NAME,
1936 	.device		= uart_console_device,
1937 	.write		= stm32_usart_console_write,
1938 	.setup		= stm32_usart_console_setup,
1939 	.flags		= CON_PRINTBUFFER,
1940 	.index		= -1,
1941 	.data		= &stm32_usart_driver,
1942 };
1943 
1944 #define STM32_SERIAL_CONSOLE (&stm32_console)
1945 
1946 #else
1947 #define STM32_SERIAL_CONSOLE NULL
1948 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1949 
1950 #ifdef CONFIG_SERIAL_EARLYCON
1951 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1952 {
1953 	struct stm32_usart_info *info = port->private_data;
1954 
1955 	while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1956 		cpu_relax();
1957 
1958 	writel_relaxed(ch, port->membase + info->ofs.tdr);
1959 }
1960 
1961 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1962 {
1963 	struct earlycon_device *device = console->data;
1964 	struct uart_port *port = &device->port;
1965 
1966 	uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1967 }
1968 
1969 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1970 {
1971 	if (!(device->port.membase || device->port.iobase))
1972 		return -ENODEV;
1973 	device->port.private_data = &stm32h7_info;
1974 	device->con->write = early_stm32_serial_write;
1975 	return 0;
1976 }
1977 
1978 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1979 {
1980 	if (!(device->port.membase || device->port.iobase))
1981 		return -ENODEV;
1982 	device->port.private_data = &stm32f7_info;
1983 	device->con->write = early_stm32_serial_write;
1984 	return 0;
1985 }
1986 
1987 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1988 {
1989 	if (!(device->port.membase || device->port.iobase))
1990 		return -ENODEV;
1991 	device->port.private_data = &stm32f4_info;
1992 	device->con->write = early_stm32_serial_write;
1993 	return 0;
1994 }
1995 
1996 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
1997 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
1998 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
1999 #endif /* CONFIG_SERIAL_EARLYCON */
2000 
2001 static struct uart_driver stm32_usart_driver = {
2002 	.driver_name	= DRIVER_NAME,
2003 	.dev_name	= STM32_SERIAL_NAME,
2004 	.major		= 0,
2005 	.minor		= 0,
2006 	.nr		= STM32_MAX_PORTS,
2007 	.cons		= STM32_SERIAL_CONSOLE,
2008 };
2009 
2010 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2011 						       bool enable)
2012 {
2013 	struct stm32_port *stm32_port = to_stm32_port(port);
2014 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2015 	struct tty_port *tport = &port->state->port;
2016 	int ret;
2017 	unsigned int size = 0;
2018 	unsigned long flags;
2019 
2020 	if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2021 		return 0;
2022 
2023 	/*
2024 	 * Enable low-power wake-up and wake-up irq if argument is set to
2025 	 * "enable", disable low-power wake-up and wake-up irq otherwise
2026 	 */
2027 	if (enable) {
2028 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2029 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2030 		mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2031 
2032 		/*
2033 		 * When DMA is used for reception, it must be disabled before
2034 		 * entering low-power mode and re-enabled when exiting from
2035 		 * low-power mode.
2036 		 */
2037 		if (stm32_port->rx_ch) {
2038 			spin_lock_irqsave(&port->lock, flags);
2039 			/* Poll data from DMA RX buffer if any */
2040 			if (!stm32_usart_rx_dma_pause(stm32_port))
2041 				size += stm32_usart_receive_chars(port, true);
2042 			stm32_usart_rx_dma_terminate(stm32_port);
2043 			uart_unlock_and_check_sysrq_irqrestore(port, flags);
2044 			if (size)
2045 				tty_flip_buffer_push(tport);
2046 		}
2047 
2048 		/* Poll data from RX FIFO if any */
2049 		stm32_usart_receive_chars(port, false);
2050 	} else {
2051 		if (stm32_port->rx_ch) {
2052 			ret = stm32_usart_rx_dma_start_or_resume(port);
2053 			if (ret)
2054 				return ret;
2055 		}
2056 		mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2057 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2058 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2065 {
2066 	struct uart_port *port = dev_get_drvdata(dev);
2067 	int ret;
2068 
2069 	uart_suspend_port(&stm32_usart_driver, port);
2070 
2071 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2072 		ret = stm32_usart_serial_en_wakeup(port, true);
2073 		if (ret)
2074 			return ret;
2075 	}
2076 
2077 	/*
2078 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
2079 	 * and rely on bootloader stage to restore this state upon resume.
2080 	 * Otherwise, apply the idle or sleep states depending on wakeup
2081 	 * capabilities.
2082 	 */
2083 	if (console_suspend_enabled || !uart_console(port)) {
2084 		if (device_may_wakeup(dev) || device_wakeup_path(dev))
2085 			pinctrl_pm_select_idle_state(dev);
2086 		else
2087 			pinctrl_pm_select_sleep_state(dev);
2088 	}
2089 
2090 	return 0;
2091 }
2092 
2093 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2094 {
2095 	struct uart_port *port = dev_get_drvdata(dev);
2096 	int ret;
2097 
2098 	pinctrl_pm_select_default_state(dev);
2099 
2100 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2101 		ret = stm32_usart_serial_en_wakeup(port, false);
2102 		if (ret)
2103 			return ret;
2104 	}
2105 
2106 	return uart_resume_port(&stm32_usart_driver, port);
2107 }
2108 
2109 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2110 {
2111 	struct uart_port *port = dev_get_drvdata(dev);
2112 	struct stm32_port *stm32port = container_of(port,
2113 			struct stm32_port, port);
2114 
2115 	clk_disable_unprepare(stm32port->clk);
2116 
2117 	return 0;
2118 }
2119 
2120 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2121 {
2122 	struct uart_port *port = dev_get_drvdata(dev);
2123 	struct stm32_port *stm32port = container_of(port,
2124 			struct stm32_port, port);
2125 
2126 	return clk_prepare_enable(stm32port->clk);
2127 }
2128 
2129 static const struct dev_pm_ops stm32_serial_pm_ops = {
2130 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2131 			   stm32_usart_runtime_resume, NULL)
2132 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2133 				stm32_usart_serial_resume)
2134 };
2135 
2136 static struct platform_driver stm32_serial_driver = {
2137 	.probe		= stm32_usart_serial_probe,
2138 	.remove		= stm32_usart_serial_remove,
2139 	.driver	= {
2140 		.name	= DRIVER_NAME,
2141 		.pm	= &stm32_serial_pm_ops,
2142 		.of_match_table = of_match_ptr(stm32_match),
2143 	},
2144 };
2145 
2146 static int __init stm32_usart_init(void)
2147 {
2148 	static char banner[] __initdata = "STM32 USART driver initialized";
2149 	int ret;
2150 
2151 	pr_info("%s\n", banner);
2152 
2153 	ret = uart_register_driver(&stm32_usart_driver);
2154 	if (ret)
2155 		return ret;
2156 
2157 	ret = platform_driver_register(&stm32_serial_driver);
2158 	if (ret)
2159 		uart_unregister_driver(&stm32_usart_driver);
2160 
2161 	return ret;
2162 }
2163 
2164 static void __exit stm32_usart_exit(void)
2165 {
2166 	platform_driver_unregister(&stm32_serial_driver);
2167 	uart_unregister_driver(&stm32_usart_driver);
2168 }
2169 
2170 module_init(stm32_usart_init);
2171 module_exit(stm32_usart_exit);
2172 
2173 MODULE_ALIAS("platform:" DRIVER_NAME);
2174 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2175 MODULE_LICENSE("GPL v2");
2176