xref: /openbmc/linux/drivers/tty/serial/stm32-usart.c (revision 12e9459d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  *	     Gerald Baeza <gerald.baeza@foss.st.com>
7  *	     Erwan Le Ray <erwan.leray@foss.st.com>
8  *
9  * Inspired by st-asc.c from STMicroelectronics (c)
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34 
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37 
38 
39 /* Register offsets */
40 static struct stm32_usart_info __maybe_unused stm32f4_info = {
41 	.ofs = {
42 		.isr	= 0x00,
43 		.rdr	= 0x04,
44 		.tdr	= 0x04,
45 		.brr	= 0x08,
46 		.cr1	= 0x0c,
47 		.cr2	= 0x10,
48 		.cr3	= 0x14,
49 		.gtpr	= 0x18,
50 		.rtor	= UNDEF_REG,
51 		.rqr	= UNDEF_REG,
52 		.icr	= UNDEF_REG,
53 	},
54 	.cfg = {
55 		.uart_enable_bit = 13,
56 		.has_7bits_data = false,
57 		.fifosize = 1,
58 	}
59 };
60 
61 static struct stm32_usart_info __maybe_unused stm32f7_info = {
62 	.ofs = {
63 		.cr1	= 0x00,
64 		.cr2	= 0x04,
65 		.cr3	= 0x08,
66 		.brr	= 0x0c,
67 		.gtpr	= 0x10,
68 		.rtor	= 0x14,
69 		.rqr	= 0x18,
70 		.isr	= 0x1c,
71 		.icr	= 0x20,
72 		.rdr	= 0x24,
73 		.tdr	= 0x28,
74 	},
75 	.cfg = {
76 		.uart_enable_bit = 0,
77 		.has_7bits_data = true,
78 		.has_swap = true,
79 		.fifosize = 1,
80 	}
81 };
82 
83 static struct stm32_usart_info __maybe_unused stm32h7_info = {
84 	.ofs = {
85 		.cr1	= 0x00,
86 		.cr2	= 0x04,
87 		.cr3	= 0x08,
88 		.brr	= 0x0c,
89 		.gtpr	= 0x10,
90 		.rtor	= 0x14,
91 		.rqr	= 0x18,
92 		.isr	= 0x1c,
93 		.icr	= 0x20,
94 		.rdr	= 0x24,
95 		.tdr	= 0x28,
96 	},
97 	.cfg = {
98 		.uart_enable_bit = 0,
99 		.has_7bits_data = true,
100 		.has_swap = true,
101 		.has_wakeup = true,
102 		.has_fifo = true,
103 		.fifosize = 16,
104 	}
105 };
106 
107 static void stm32_usart_stop_tx(struct uart_port *port);
108 static void stm32_usart_transmit_chars(struct uart_port *port);
109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
110 
to_stm32_port(struct uart_port * port)111 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
112 {
113 	return container_of(port, struct stm32_port, port);
114 }
115 
stm32_usart_set_bits(struct uart_port * port,u32 reg,u32 bits)116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
117 {
118 	u32 val;
119 
120 	val = readl_relaxed(port->membase + reg);
121 	val |= bits;
122 	writel_relaxed(val, port->membase + reg);
123 }
124 
stm32_usart_clr_bits(struct uart_port * port,u32 reg,u32 bits)125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
126 {
127 	u32 val;
128 
129 	val = readl_relaxed(port->membase + reg);
130 	val &= ~bits;
131 	writel_relaxed(val, port->membase + reg);
132 }
133 
stm32_usart_tx_empty(struct uart_port * port)134 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
135 {
136 	struct stm32_port *stm32_port = to_stm32_port(port);
137 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
138 
139 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
140 		return TIOCSER_TEMT;
141 
142 	return 0;
143 }
144 
stm32_usart_rs485_rts_enable(struct uart_port * port)145 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
146 {
147 	struct stm32_port *stm32_port = to_stm32_port(port);
148 	struct serial_rs485 *rs485conf = &port->rs485;
149 
150 	if (stm32_port->hw_flow_control ||
151 	    !(rs485conf->flags & SER_RS485_ENABLED))
152 		return;
153 
154 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
155 		mctrl_gpio_set(stm32_port->gpios,
156 			       stm32_port->port.mctrl | TIOCM_RTS);
157 	} else {
158 		mctrl_gpio_set(stm32_port->gpios,
159 			       stm32_port->port.mctrl & ~TIOCM_RTS);
160 	}
161 }
162 
stm32_usart_rs485_rts_disable(struct uart_port * port)163 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
164 {
165 	struct stm32_port *stm32_port = to_stm32_port(port);
166 	struct serial_rs485 *rs485conf = &port->rs485;
167 
168 	if (stm32_port->hw_flow_control ||
169 	    !(rs485conf->flags & SER_RS485_ENABLED))
170 		return;
171 
172 	if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
173 		mctrl_gpio_set(stm32_port->gpios,
174 			       stm32_port->port.mctrl & ~TIOCM_RTS);
175 	} else {
176 		mctrl_gpio_set(stm32_port->gpios,
177 			       stm32_port->port.mctrl | TIOCM_RTS);
178 	}
179 }
180 
stm32_usart_config_reg_rs485(u32 * cr1,u32 * cr3,u32 delay_ADE,u32 delay_DDE,u32 baud)181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
182 					 u32 delay_DDE, u32 baud)
183 {
184 	u32 rs485_deat_dedt;
185 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
186 	bool over8;
187 
188 	*cr3 |= USART_CR3_DEM;
189 	over8 = *cr1 & USART_CR1_OVER8;
190 
191 	*cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
192 
193 	if (over8)
194 		rs485_deat_dedt = delay_ADE * baud * 8;
195 	else
196 		rs485_deat_dedt = delay_ADE * baud * 16;
197 
198 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
199 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
200 			  rs485_deat_dedt_max : rs485_deat_dedt;
201 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
202 			   USART_CR1_DEAT_MASK;
203 	*cr1 |= rs485_deat_dedt;
204 
205 	if (over8)
206 		rs485_deat_dedt = delay_DDE * baud * 8;
207 	else
208 		rs485_deat_dedt = delay_DDE * baud * 16;
209 
210 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
211 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
212 			  rs485_deat_dedt_max : rs485_deat_dedt;
213 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
214 			   USART_CR1_DEDT_MASK;
215 	*cr1 |= rs485_deat_dedt;
216 }
217 
stm32_usart_config_rs485(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485conf)218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
219 				    struct serial_rs485 *rs485conf)
220 {
221 	struct stm32_port *stm32_port = to_stm32_port(port);
222 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
223 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
224 	u32 usartdiv, baud, cr1, cr3;
225 	bool over8;
226 
227 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
228 
229 	if (rs485conf->flags & SER_RS485_ENABLED) {
230 		cr1 = readl_relaxed(port->membase + ofs->cr1);
231 		cr3 = readl_relaxed(port->membase + ofs->cr3);
232 		usartdiv = readl_relaxed(port->membase + ofs->brr);
233 		usartdiv = usartdiv & GENMASK(15, 0);
234 		over8 = cr1 & USART_CR1_OVER8;
235 
236 		if (over8)
237 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
238 				   << USART_BRR_04_R_SHIFT;
239 
240 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
241 		stm32_usart_config_reg_rs485(&cr1, &cr3,
242 					     rs485conf->delay_rts_before_send,
243 					     rs485conf->delay_rts_after_send,
244 					     baud);
245 
246 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
247 			cr3 &= ~USART_CR3_DEP;
248 		else
249 			cr3 |= USART_CR3_DEP;
250 
251 		writel_relaxed(cr3, port->membase + ofs->cr3);
252 		writel_relaxed(cr1, port->membase + ofs->cr1);
253 
254 		if (!port->rs485_rx_during_tx_gpio)
255 			rs485conf->flags |= SER_RS485_RX_DURING_TX;
256 
257 	} else {
258 		stm32_usart_clr_bits(port, ofs->cr3,
259 				     USART_CR3_DEM | USART_CR3_DEP);
260 		stm32_usart_clr_bits(port, ofs->cr1,
261 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
262 	}
263 
264 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
265 
266 	/* Adjust RTS polarity in case it's driven in software */
267 	if (stm32_usart_tx_empty(port))
268 		stm32_usart_rs485_rts_disable(port);
269 	else
270 		stm32_usart_rs485_rts_enable(port);
271 
272 	return 0;
273 }
274 
stm32_usart_init_rs485(struct uart_port * port,struct platform_device * pdev)275 static int stm32_usart_init_rs485(struct uart_port *port,
276 				  struct platform_device *pdev)
277 {
278 	struct serial_rs485 *rs485conf = &port->rs485;
279 
280 	rs485conf->flags = 0;
281 	rs485conf->delay_rts_before_send = 0;
282 	rs485conf->delay_rts_after_send = 0;
283 
284 	if (!pdev->dev.of_node)
285 		return -ENODEV;
286 
287 	return uart_get_rs485_mode(port);
288 }
289 
stm32_usart_rx_dma_started(struct stm32_port * stm32_port)290 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
291 {
292 	return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
293 }
294 
stm32_usart_rx_dma_terminate(struct stm32_port * stm32_port)295 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
296 {
297 	dmaengine_terminate_async(stm32_port->rx_ch);
298 	stm32_port->rx_dma_busy = false;
299 }
300 
stm32_usart_dma_pause_resume(struct stm32_port * stm32_port,struct dma_chan * chan,enum dma_status expected_status,int dmaengine_pause_or_resume (struct dma_chan *),bool stm32_usart_xx_dma_started (struct stm32_port *),void stm32_usart_xx_dma_terminate (struct stm32_port *))301 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
302 					struct dma_chan *chan,
303 					enum dma_status expected_status,
304 					int dmaengine_pause_or_resume(struct dma_chan *),
305 					bool stm32_usart_xx_dma_started(struct stm32_port *),
306 					void stm32_usart_xx_dma_terminate(struct stm32_port *))
307 {
308 	struct uart_port *port = &stm32_port->port;
309 	enum dma_status dma_status;
310 	int ret;
311 
312 	if (!stm32_usart_xx_dma_started(stm32_port))
313 		return -EPERM;
314 
315 	dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
316 	if (dma_status != expected_status)
317 		return -EAGAIN;
318 
319 	ret = dmaengine_pause_or_resume(chan);
320 	if (ret) {
321 		dev_err(port->dev, "DMA failed with error code: %d\n", ret);
322 		stm32_usart_xx_dma_terminate(stm32_port);
323 	}
324 	return ret;
325 }
326 
stm32_usart_rx_dma_pause(struct stm32_port * stm32_port)327 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
328 {
329 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
330 					    DMA_IN_PROGRESS, dmaengine_pause,
331 					    stm32_usart_rx_dma_started,
332 					    stm32_usart_rx_dma_terminate);
333 }
334 
stm32_usart_rx_dma_resume(struct stm32_port * stm32_port)335 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
336 {
337 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
338 					    DMA_PAUSED, dmaengine_resume,
339 					    stm32_usart_rx_dma_started,
340 					    stm32_usart_rx_dma_terminate);
341 }
342 
343 /* Return true when data is pending (in pio mode), and false when no data is pending. */
stm32_usart_pending_rx_pio(struct uart_port * port,u32 * sr)344 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
345 {
346 	struct stm32_port *stm32_port = to_stm32_port(port);
347 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
348 
349 	*sr = readl_relaxed(port->membase + ofs->isr);
350 	/* Get pending characters in RDR or FIFO */
351 	if (*sr & USART_SR_RXNE) {
352 		/* Get all pending characters from the RDR or the FIFO when using interrupts */
353 		if (!stm32_usart_rx_dma_started(stm32_port))
354 			return true;
355 
356 		/* Handle only RX data errors when using DMA */
357 		if (*sr & USART_SR_ERR_MASK)
358 			return true;
359 	}
360 
361 	return false;
362 }
363 
stm32_usart_get_char_pio(struct uart_port * port)364 static u8 stm32_usart_get_char_pio(struct uart_port *port)
365 {
366 	struct stm32_port *stm32_port = to_stm32_port(port);
367 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
368 	unsigned long c;
369 
370 	c = readl_relaxed(port->membase + ofs->rdr);
371 	/* Apply RDR data mask */
372 	c &= stm32_port->rdr_mask;
373 
374 	return c;
375 }
376 
stm32_usart_receive_chars_pio(struct uart_port * port)377 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
378 {
379 	struct stm32_port *stm32_port = to_stm32_port(port);
380 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
381 	unsigned int size = 0;
382 	u32 sr;
383 	u8 c, flag;
384 
385 	while (stm32_usart_pending_rx_pio(port, &sr)) {
386 		sr |= USART_SR_DUMMY_RX;
387 		flag = TTY_NORMAL;
388 
389 		/*
390 		 * Status bits has to be cleared before reading the RDR:
391 		 * In FIFO mode, reading the RDR will pop the next data
392 		 * (if any) along with its status bits into the SR.
393 		 * Not doing so leads to misalignement between RDR and SR,
394 		 * and clear status bits of the next rx data.
395 		 *
396 		 * Clear errors flags for stm32f7 and stm32h7 compatible
397 		 * devices. On stm32f4 compatible devices, the error bit is
398 		 * cleared by the sequence [read SR - read DR].
399 		 */
400 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
401 			writel_relaxed(sr & USART_SR_ERR_MASK,
402 				       port->membase + ofs->icr);
403 
404 		c = stm32_usart_get_char_pio(port);
405 		port->icount.rx++;
406 		size++;
407 		if (sr & USART_SR_ERR_MASK) {
408 			if (sr & USART_SR_ORE) {
409 				port->icount.overrun++;
410 			} else if (sr & USART_SR_PE) {
411 				port->icount.parity++;
412 			} else if (sr & USART_SR_FE) {
413 				/* Break detection if character is null */
414 				if (!c) {
415 					port->icount.brk++;
416 					if (uart_handle_break(port))
417 						continue;
418 				} else {
419 					port->icount.frame++;
420 				}
421 			}
422 
423 			sr &= port->read_status_mask;
424 
425 			if (sr & USART_SR_PE) {
426 				flag = TTY_PARITY;
427 			} else if (sr & USART_SR_FE) {
428 				if (!c)
429 					flag = TTY_BREAK;
430 				else
431 					flag = TTY_FRAME;
432 			}
433 		}
434 
435 		if (uart_prepare_sysrq_char(port, c))
436 			continue;
437 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
438 	}
439 
440 	return size;
441 }
442 
stm32_usart_push_buffer_dma(struct uart_port * port,unsigned int dma_size)443 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
444 {
445 	struct stm32_port *stm32_port = to_stm32_port(port);
446 	struct tty_port *ttyport = &stm32_port->port.state->port;
447 	unsigned char *dma_start;
448 	int dma_count, i;
449 
450 	dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
451 
452 	/*
453 	 * Apply rdr_mask on buffer in order to mask parity bit.
454 	 * This loop is useless in cs8 mode because DMA copies only
455 	 * 8 bits and already ignores parity bit.
456 	 */
457 	if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
458 		for (i = 0; i < dma_size; i++)
459 			*(dma_start + i) &= stm32_port->rdr_mask;
460 
461 	dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
462 	port->icount.rx += dma_count;
463 	if (dma_count != dma_size)
464 		port->icount.buf_overrun++;
465 	stm32_port->last_res -= dma_count;
466 	if (stm32_port->last_res == 0)
467 		stm32_port->last_res = RX_BUF_L;
468 }
469 
stm32_usart_receive_chars_dma(struct uart_port * port)470 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
471 {
472 	struct stm32_port *stm32_port = to_stm32_port(port);
473 	unsigned int dma_size, size = 0;
474 
475 	/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
476 	if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
477 		/* Conditional first part: from last_res to end of DMA buffer */
478 		dma_size = stm32_port->last_res;
479 		stm32_usart_push_buffer_dma(port, dma_size);
480 		size = dma_size;
481 	}
482 
483 	dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
484 	stm32_usart_push_buffer_dma(port, dma_size);
485 	size += dma_size;
486 
487 	return size;
488 }
489 
stm32_usart_receive_chars(struct uart_port * port,bool force_dma_flush)490 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
491 {
492 	struct stm32_port *stm32_port = to_stm32_port(port);
493 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
494 	enum dma_status rx_dma_status;
495 	u32 sr;
496 	unsigned int size = 0;
497 
498 	if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
499 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
500 						    stm32_port->rx_ch->cookie,
501 						    &stm32_port->rx_dma_state);
502 		if (rx_dma_status == DMA_IN_PROGRESS ||
503 		    rx_dma_status == DMA_PAUSED) {
504 			/* Empty DMA buffer */
505 			size = stm32_usart_receive_chars_dma(port);
506 			sr = readl_relaxed(port->membase + ofs->isr);
507 			if (sr & USART_SR_ERR_MASK) {
508 				/* Disable DMA request line */
509 				stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
510 
511 				/* Switch to PIO mode to handle the errors */
512 				size += stm32_usart_receive_chars_pio(port);
513 
514 				/* Switch back to DMA mode */
515 				stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
516 			}
517 		} else {
518 			/* Disable RX DMA */
519 			stm32_usart_rx_dma_terminate(stm32_port);
520 			/* Fall back to interrupt mode */
521 			dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
522 			size = stm32_usart_receive_chars_pio(port);
523 		}
524 	} else {
525 		size = stm32_usart_receive_chars_pio(port);
526 	}
527 
528 	return size;
529 }
530 
stm32_usart_rx_dma_complete(void * arg)531 static void stm32_usart_rx_dma_complete(void *arg)
532 {
533 	struct uart_port *port = arg;
534 	struct tty_port *tport = &port->state->port;
535 	unsigned int size;
536 	unsigned long flags;
537 
538 	spin_lock_irqsave(&port->lock, flags);
539 	size = stm32_usart_receive_chars(port, false);
540 	uart_unlock_and_check_sysrq_irqrestore(port, flags);
541 	if (size)
542 		tty_flip_buffer_push(tport);
543 }
544 
stm32_usart_rx_dma_start_or_resume(struct uart_port * port)545 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
546 {
547 	struct stm32_port *stm32_port = to_stm32_port(port);
548 	struct dma_async_tx_descriptor *desc;
549 	enum dma_status rx_dma_status;
550 	int ret;
551 
552 	if (stm32_port->throttled)
553 		return 0;
554 
555 	if (stm32_port->rx_dma_busy) {
556 		rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
557 						    stm32_port->rx_ch->cookie,
558 						    NULL);
559 		if (rx_dma_status == DMA_IN_PROGRESS)
560 			return 0;
561 
562 		if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
563 			return 0;
564 
565 		dev_err(port->dev, "DMA failed : status error.\n");
566 		stm32_usart_rx_dma_terminate(stm32_port);
567 	}
568 
569 	stm32_port->rx_dma_busy = true;
570 
571 	stm32_port->last_res = RX_BUF_L;
572 	/* Prepare a DMA cyclic transaction */
573 	desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
574 					 stm32_port->rx_dma_buf,
575 					 RX_BUF_L, RX_BUF_P,
576 					 DMA_DEV_TO_MEM,
577 					 DMA_PREP_INTERRUPT);
578 	if (!desc) {
579 		dev_err(port->dev, "rx dma prep cyclic failed\n");
580 		stm32_port->rx_dma_busy = false;
581 		return -ENODEV;
582 	}
583 
584 	desc->callback = stm32_usart_rx_dma_complete;
585 	desc->callback_param = port;
586 
587 	/* Push current DMA transaction in the pending queue */
588 	ret = dma_submit_error(dmaengine_submit(desc));
589 	if (ret) {
590 		dmaengine_terminate_sync(stm32_port->rx_ch);
591 		stm32_port->rx_dma_busy = false;
592 		return ret;
593 	}
594 
595 	/* Issue pending DMA requests */
596 	dma_async_issue_pending(stm32_port->rx_ch);
597 
598 	return 0;
599 }
600 
stm32_usart_tx_dma_terminate(struct stm32_port * stm32_port)601 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
602 {
603 	dmaengine_terminate_async(stm32_port->tx_ch);
604 	stm32_port->tx_dma_busy = false;
605 }
606 
stm32_usart_tx_dma_started(struct stm32_port * stm32_port)607 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
608 {
609 	/*
610 	 * We cannot use the function "dmaengine_tx_status" to know the
611 	 * status of DMA. This function does not show if the "dma complete"
612 	 * callback of the DMA transaction has been called. So we prefer
613 	 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
614 	 * same time.
615 	 */
616 	return stm32_port->tx_dma_busy;
617 }
618 
stm32_usart_tx_dma_pause(struct stm32_port * stm32_port)619 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
620 {
621 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
622 					    DMA_IN_PROGRESS, dmaengine_pause,
623 					    stm32_usart_tx_dma_started,
624 					    stm32_usart_tx_dma_terminate);
625 }
626 
stm32_usart_tx_dma_resume(struct stm32_port * stm32_port)627 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
628 {
629 	return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
630 					    DMA_PAUSED, dmaengine_resume,
631 					    stm32_usart_tx_dma_started,
632 					    stm32_usart_tx_dma_terminate);
633 }
634 
stm32_usart_tx_dma_complete(void * arg)635 static void stm32_usart_tx_dma_complete(void *arg)
636 {
637 	struct uart_port *port = arg;
638 	struct stm32_port *stm32port = to_stm32_port(port);
639 	unsigned long flags;
640 
641 	stm32_usart_tx_dma_terminate(stm32port);
642 
643 	/* Let's see if we have pending data to send */
644 	spin_lock_irqsave(&port->lock, flags);
645 	stm32_usart_transmit_chars(port);
646 	spin_unlock_irqrestore(&port->lock, flags);
647 }
648 
stm32_usart_tx_interrupt_enable(struct uart_port * port)649 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
650 {
651 	struct stm32_port *stm32_port = to_stm32_port(port);
652 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
653 
654 	/*
655 	 * Enables TX FIFO threashold irq when FIFO is enabled,
656 	 * or TX empty irq when FIFO is disabled
657 	 */
658 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
659 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
660 	else
661 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
662 }
663 
stm32_usart_tc_interrupt_enable(struct uart_port * port)664 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
665 {
666 	struct stm32_port *stm32_port = to_stm32_port(port);
667 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
668 
669 	stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
670 }
671 
stm32_usart_tx_interrupt_disable(struct uart_port * port)672 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
673 {
674 	struct stm32_port *stm32_port = to_stm32_port(port);
675 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
676 
677 	if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
678 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
679 	else
680 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
681 }
682 
stm32_usart_tc_interrupt_disable(struct uart_port * port)683 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
684 {
685 	struct stm32_port *stm32_port = to_stm32_port(port);
686 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
687 
688 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
689 }
690 
stm32_usart_transmit_chars_pio(struct uart_port * port)691 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
692 {
693 	struct stm32_port *stm32_port = to_stm32_port(port);
694 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
695 	struct circ_buf *xmit = &port->state->xmit;
696 
697 	while (!uart_circ_empty(xmit)) {
698 		/* Check that TDR is empty before filling FIFO */
699 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
700 			break;
701 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
702 		uart_xmit_advance(port, 1);
703 	}
704 
705 	/* rely on TXE irq (mask or unmask) for sending remaining data */
706 	if (uart_circ_empty(xmit))
707 		stm32_usart_tx_interrupt_disable(port);
708 	else
709 		stm32_usart_tx_interrupt_enable(port);
710 }
711 
stm32_usart_transmit_chars_dma(struct uart_port * port)712 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
713 {
714 	struct stm32_port *stm32port = to_stm32_port(port);
715 	struct circ_buf *xmit = &port->state->xmit;
716 	struct dma_async_tx_descriptor *desc = NULL;
717 	unsigned int count;
718 	int ret;
719 
720 	if (stm32_usart_tx_dma_started(stm32port)) {
721 		ret = stm32_usart_tx_dma_resume(stm32port);
722 		if (ret < 0 && ret != -EAGAIN)
723 			goto fallback_err;
724 		return;
725 	}
726 
727 	count = uart_circ_chars_pending(xmit);
728 
729 	if (count > TX_BUF_L)
730 		count = TX_BUF_L;
731 
732 	if (xmit->tail < xmit->head) {
733 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
734 	} else {
735 		size_t one = UART_XMIT_SIZE - xmit->tail;
736 		size_t two;
737 
738 		if (one > count)
739 			one = count;
740 		two = count - one;
741 
742 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
743 		if (two)
744 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
745 	}
746 
747 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
748 					   stm32port->tx_dma_buf,
749 					   count,
750 					   DMA_MEM_TO_DEV,
751 					   DMA_PREP_INTERRUPT);
752 
753 	if (!desc)
754 		goto fallback_err;
755 
756 	/*
757 	 * Set "tx_dma_busy" flag. This flag will be released when
758 	 * dmaengine_terminate_async will be called. This flag helps
759 	 * transmit_chars_dma not to start another DMA transaction
760 	 * if the callback of the previous is not yet called.
761 	 */
762 	stm32port->tx_dma_busy = true;
763 
764 	desc->callback = stm32_usart_tx_dma_complete;
765 	desc->callback_param = port;
766 
767 	/* Push current DMA TX transaction in the pending queue */
768 	/* DMA no yet started, safe to free resources */
769 	ret = dma_submit_error(dmaengine_submit(desc));
770 	if (ret) {
771 		dev_err(port->dev, "DMA failed with error code: %d\n", ret);
772 		stm32_usart_tx_dma_terminate(stm32port);
773 		goto fallback_err;
774 	}
775 
776 	/* Issue pending DMA TX requests */
777 	dma_async_issue_pending(stm32port->tx_ch);
778 
779 	uart_xmit_advance(port, count);
780 
781 	return;
782 
783 fallback_err:
784 	stm32_usart_transmit_chars_pio(port);
785 }
786 
stm32_usart_transmit_chars(struct uart_port * port)787 static void stm32_usart_transmit_chars(struct uart_port *port)
788 {
789 	struct stm32_port *stm32_port = to_stm32_port(port);
790 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
791 	struct circ_buf *xmit = &port->state->xmit;
792 	u32 isr;
793 	int ret;
794 
795 	if (!stm32_port->hw_flow_control &&
796 	    port->rs485.flags & SER_RS485_ENABLED &&
797 	    (port->x_char ||
798 	     !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
799 		stm32_usart_tc_interrupt_disable(port);
800 		stm32_usart_rs485_rts_enable(port);
801 	}
802 
803 	if (port->x_char) {
804 		/* dma terminate may have been called in case of dma pause failure */
805 		stm32_usart_tx_dma_pause(stm32_port);
806 
807 		/* Check that TDR is empty before filling FIFO */
808 		ret =
809 		readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
810 						  isr,
811 						  (isr & USART_SR_TXE),
812 						  10, 1000);
813 		if (ret)
814 			dev_warn(port->dev, "1 character may be erased\n");
815 
816 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
817 		port->x_char = 0;
818 		port->icount.tx++;
819 
820 		/* dma terminate may have been called in case of dma resume failure */
821 		stm32_usart_tx_dma_resume(stm32_port);
822 		return;
823 	}
824 
825 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
826 		stm32_usart_tx_interrupt_disable(port);
827 		return;
828 	}
829 
830 	if (ofs->icr == UNDEF_REG)
831 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
832 	else
833 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
834 
835 	if (stm32_port->tx_ch)
836 		stm32_usart_transmit_chars_dma(port);
837 	else
838 		stm32_usart_transmit_chars_pio(port);
839 
840 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
841 		uart_write_wakeup(port);
842 
843 	if (uart_circ_empty(xmit)) {
844 		stm32_usart_tx_interrupt_disable(port);
845 		if (!stm32_port->hw_flow_control &&
846 		    port->rs485.flags & SER_RS485_ENABLED) {
847 			stm32_usart_tc_interrupt_enable(port);
848 		}
849 	}
850 }
851 
stm32_usart_interrupt(int irq,void * ptr)852 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
853 {
854 	struct uart_port *port = ptr;
855 	struct tty_port *tport = &port->state->port;
856 	struct stm32_port *stm32_port = to_stm32_port(port);
857 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
858 	u32 sr;
859 	unsigned int size;
860 	irqreturn_t ret = IRQ_NONE;
861 
862 	sr = readl_relaxed(port->membase + ofs->isr);
863 
864 	if (!stm32_port->hw_flow_control &&
865 	    port->rs485.flags & SER_RS485_ENABLED &&
866 	    (sr & USART_SR_TC)) {
867 		stm32_usart_tc_interrupt_disable(port);
868 		stm32_usart_rs485_rts_disable(port);
869 		ret = IRQ_HANDLED;
870 	}
871 
872 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
873 		writel_relaxed(USART_ICR_RTOCF,
874 			       port->membase + ofs->icr);
875 		ret = IRQ_HANDLED;
876 	}
877 
878 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
879 		/* Clear wake up flag and disable wake up interrupt */
880 		writel_relaxed(USART_ICR_WUCF,
881 			       port->membase + ofs->icr);
882 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
883 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
884 			pm_wakeup_event(tport->tty->dev, 0);
885 		ret = IRQ_HANDLED;
886 	}
887 
888 	/*
889 	 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
890 	 * line has been masked by HW and rx data are stacking in FIFO.
891 	 */
892 	if (!stm32_port->throttled) {
893 		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
894 		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
895 			spin_lock(&port->lock);
896 			size = stm32_usart_receive_chars(port, false);
897 			uart_unlock_and_check_sysrq(port);
898 			if (size)
899 				tty_flip_buffer_push(tport);
900 			ret = IRQ_HANDLED;
901 		}
902 	}
903 
904 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
905 		spin_lock(&port->lock);
906 		stm32_usart_transmit_chars(port);
907 		spin_unlock(&port->lock);
908 		ret = IRQ_HANDLED;
909 	}
910 
911 	/* Receiver timeout irq for DMA RX */
912 	if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
913 		spin_lock(&port->lock);
914 		size = stm32_usart_receive_chars(port, false);
915 		uart_unlock_and_check_sysrq(port);
916 		if (size)
917 			tty_flip_buffer_push(tport);
918 		ret = IRQ_HANDLED;
919 	}
920 
921 	return ret;
922 }
923 
stm32_usart_set_mctrl(struct uart_port * port,unsigned int mctrl)924 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
925 {
926 	struct stm32_port *stm32_port = to_stm32_port(port);
927 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
928 
929 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
930 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
931 	else
932 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
933 
934 	mctrl_gpio_set(stm32_port->gpios, mctrl);
935 }
936 
stm32_usart_get_mctrl(struct uart_port * port)937 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
938 {
939 	struct stm32_port *stm32_port = to_stm32_port(port);
940 	unsigned int ret;
941 
942 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
943 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
944 
945 	return mctrl_gpio_get(stm32_port->gpios, &ret);
946 }
947 
stm32_usart_enable_ms(struct uart_port * port)948 static void stm32_usart_enable_ms(struct uart_port *port)
949 {
950 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
951 }
952 
stm32_usart_disable_ms(struct uart_port * port)953 static void stm32_usart_disable_ms(struct uart_port *port)
954 {
955 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
956 }
957 
958 /* Transmit stop */
stm32_usart_stop_tx(struct uart_port * port)959 static void stm32_usart_stop_tx(struct uart_port *port)
960 {
961 	struct stm32_port *stm32_port = to_stm32_port(port);
962 
963 	stm32_usart_tx_interrupt_disable(port);
964 
965 	/* dma terminate may have been called in case of dma pause failure */
966 	stm32_usart_tx_dma_pause(stm32_port);
967 
968 	stm32_usart_rs485_rts_disable(port);
969 }
970 
971 /* There are probably characters waiting to be transmitted. */
stm32_usart_start_tx(struct uart_port * port)972 static void stm32_usart_start_tx(struct uart_port *port)
973 {
974 	struct circ_buf *xmit = &port->state->xmit;
975 
976 	if (uart_circ_empty(xmit) && !port->x_char) {
977 		stm32_usart_rs485_rts_disable(port);
978 		return;
979 	}
980 
981 	stm32_usart_rs485_rts_enable(port);
982 
983 	stm32_usart_transmit_chars(port);
984 }
985 
986 /* Flush the transmit buffer. */
stm32_usart_flush_buffer(struct uart_port * port)987 static void stm32_usart_flush_buffer(struct uart_port *port)
988 {
989 	struct stm32_port *stm32_port = to_stm32_port(port);
990 
991 	if (stm32_port->tx_ch)
992 		stm32_usart_tx_dma_terminate(stm32_port);
993 }
994 
995 /* Throttle the remote when input buffer is about to overflow. */
stm32_usart_throttle(struct uart_port * port)996 static void stm32_usart_throttle(struct uart_port *port)
997 {
998 	struct stm32_port *stm32_port = to_stm32_port(port);
999 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1000 	unsigned long flags;
1001 
1002 	spin_lock_irqsave(&port->lock, flags);
1003 
1004 	/*
1005 	 * Pause DMA transfer, so the RX data gets queued into the FIFO.
1006 	 * Hardware flow control is triggered when RX FIFO is full.
1007 	 */
1008 	stm32_usart_rx_dma_pause(stm32_port);
1009 
1010 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1011 	if (stm32_port->cr3_irq)
1012 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1013 
1014 	stm32_port->throttled = true;
1015 	spin_unlock_irqrestore(&port->lock, flags);
1016 }
1017 
1018 /* Unthrottle the remote, the input buffer can now accept data. */
stm32_usart_unthrottle(struct uart_port * port)1019 static void stm32_usart_unthrottle(struct uart_port *port)
1020 {
1021 	struct stm32_port *stm32_port = to_stm32_port(port);
1022 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1023 	unsigned long flags;
1024 
1025 	spin_lock_irqsave(&port->lock, flags);
1026 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1027 	if (stm32_port->cr3_irq)
1028 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1029 
1030 	stm32_port->throttled = false;
1031 
1032 	/*
1033 	 * Switch back to DMA mode (resume DMA).
1034 	 * Hardware flow control is stopped when FIFO is not full any more.
1035 	 */
1036 	if (stm32_port->rx_ch)
1037 		stm32_usart_rx_dma_start_or_resume(port);
1038 
1039 	spin_unlock_irqrestore(&port->lock, flags);
1040 }
1041 
1042 /* Receive stop */
stm32_usart_stop_rx(struct uart_port * port)1043 static void stm32_usart_stop_rx(struct uart_port *port)
1044 {
1045 	struct stm32_port *stm32_port = to_stm32_port(port);
1046 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1047 
1048 	/* Disable DMA request line. */
1049 	stm32_usart_rx_dma_pause(stm32_port);
1050 
1051 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1052 	if (stm32_port->cr3_irq)
1053 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1054 }
1055 
1056 /* Handle breaks - ignored by us */
stm32_usart_break_ctl(struct uart_port * port,int break_state)1057 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1058 {
1059 }
1060 
stm32_usart_startup(struct uart_port * port)1061 static int stm32_usart_startup(struct uart_port *port)
1062 {
1063 	struct stm32_port *stm32_port = to_stm32_port(port);
1064 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1065 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1066 	const char *name = to_platform_device(port->dev)->name;
1067 	u32 val;
1068 	int ret;
1069 
1070 	ret = request_irq(port->irq, stm32_usart_interrupt,
1071 			  IRQF_NO_SUSPEND, name, port);
1072 	if (ret)
1073 		return ret;
1074 
1075 	if (stm32_port->swap) {
1076 		val = readl_relaxed(port->membase + ofs->cr2);
1077 		val |= USART_CR2_SWAP;
1078 		writel_relaxed(val, port->membase + ofs->cr2);
1079 	}
1080 	stm32_port->throttled = false;
1081 
1082 	/* RX FIFO Flush */
1083 	if (ofs->rqr != UNDEF_REG)
1084 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1085 
1086 	if (stm32_port->rx_ch) {
1087 		ret = stm32_usart_rx_dma_start_or_resume(port);
1088 		if (ret) {
1089 			free_irq(port->irq, port);
1090 			return ret;
1091 		}
1092 	}
1093 
1094 	/* RX enabling */
1095 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1096 	stm32_usart_set_bits(port, ofs->cr1, val);
1097 
1098 	return 0;
1099 }
1100 
stm32_usart_shutdown(struct uart_port * port)1101 static void stm32_usart_shutdown(struct uart_port *port)
1102 {
1103 	struct stm32_port *stm32_port = to_stm32_port(port);
1104 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1105 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1106 	u32 val, isr;
1107 	int ret;
1108 
1109 	if (stm32_usart_tx_dma_started(stm32_port))
1110 		stm32_usart_tx_dma_terminate(stm32_port);
1111 
1112 	if (stm32_port->tx_ch)
1113 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1114 
1115 	/* Disable modem control interrupts */
1116 	stm32_usart_disable_ms(port);
1117 
1118 	val = USART_CR1_TXEIE | USART_CR1_TE;
1119 	val |= stm32_port->cr1_irq | USART_CR1_RE;
1120 	val |= BIT(cfg->uart_enable_bit);
1121 	if (stm32_port->fifoen)
1122 		val |= USART_CR1_FIFOEN;
1123 
1124 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1125 					 isr, (isr & USART_SR_TC),
1126 					 10, 100000);
1127 
1128 	/* Send the TC error message only when ISR_TC is not set */
1129 	if (ret)
1130 		dev_err(port->dev, "Transmission is not complete\n");
1131 
1132 	/* Disable RX DMA. */
1133 	if (stm32_port->rx_ch) {
1134 		stm32_usart_rx_dma_terminate(stm32_port);
1135 		dmaengine_synchronize(stm32_port->rx_ch);
1136 	}
1137 
1138 	/* flush RX & TX FIFO */
1139 	if (ofs->rqr != UNDEF_REG)
1140 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1141 			       port->membase + ofs->rqr);
1142 
1143 	stm32_usart_clr_bits(port, ofs->cr1, val);
1144 
1145 	free_irq(port->irq, port);
1146 }
1147 
stm32_usart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)1148 static void stm32_usart_set_termios(struct uart_port *port,
1149 				    struct ktermios *termios,
1150 				    const struct ktermios *old)
1151 {
1152 	struct stm32_port *stm32_port = to_stm32_port(port);
1153 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1154 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1155 	struct serial_rs485 *rs485conf = &port->rs485;
1156 	unsigned int baud, bits;
1157 	u32 usartdiv, mantissa, fraction, oversampling;
1158 	tcflag_t cflag = termios->c_cflag;
1159 	u32 cr1, cr2, cr3, isr;
1160 	unsigned long flags;
1161 	int ret;
1162 
1163 	if (!stm32_port->hw_flow_control)
1164 		cflag &= ~CRTSCTS;
1165 
1166 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1167 
1168 	spin_lock_irqsave(&port->lock, flags);
1169 
1170 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1171 						isr,
1172 						(isr & USART_SR_TC),
1173 						10, 100000);
1174 
1175 	/* Send the TC error message only when ISR_TC is not set. */
1176 	if (ret)
1177 		dev_err(port->dev, "Transmission is not complete\n");
1178 
1179 	/* Stop serial port and reset value */
1180 	writel_relaxed(0, port->membase + ofs->cr1);
1181 
1182 	/* flush RX & TX FIFO */
1183 	if (ofs->rqr != UNDEF_REG)
1184 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1185 			       port->membase + ofs->rqr);
1186 
1187 	cr1 = USART_CR1_TE | USART_CR1_RE;
1188 	if (stm32_port->fifoen)
1189 		cr1 |= USART_CR1_FIFOEN;
1190 	cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1191 
1192 	/* Tx and RX FIFO configuration */
1193 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1194 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1195 	if (stm32_port->fifoen) {
1196 		if (stm32_port->txftcfg >= 0)
1197 			cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1198 		if (stm32_port->rxftcfg >= 0)
1199 			cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1200 	}
1201 
1202 	if (cflag & CSTOPB)
1203 		cr2 |= USART_CR2_STOP_2B;
1204 
1205 	bits = tty_get_char_size(cflag);
1206 	stm32_port->rdr_mask = (BIT(bits) - 1);
1207 
1208 	if (cflag & PARENB) {
1209 		bits++;
1210 		cr1 |= USART_CR1_PCE;
1211 	}
1212 
1213 	/*
1214 	 * Word length configuration:
1215 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1216 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1217 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1218 	 * M0 and M1 already cleared by cr1 initialization.
1219 	 */
1220 	if (bits == 9) {
1221 		cr1 |= USART_CR1_M0;
1222 	} else if ((bits == 7) && cfg->has_7bits_data) {
1223 		cr1 |= USART_CR1_M1;
1224 	} else if (bits != 8) {
1225 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1226 			, bits);
1227 		cflag &= ~CSIZE;
1228 		cflag |= CS8;
1229 		termios->c_cflag = cflag;
1230 		bits = 8;
1231 		if (cflag & PARENB) {
1232 			bits++;
1233 			cr1 |= USART_CR1_M0;
1234 		}
1235 	}
1236 
1237 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1238 				       (stm32_port->fifoen &&
1239 					stm32_port->rxftcfg >= 0))) {
1240 		if (cflag & CSTOPB)
1241 			bits = bits + 3; /* 1 start bit + 2 stop bits */
1242 		else
1243 			bits = bits + 2; /* 1 start bit + 1 stop bit */
1244 
1245 		/* RX timeout irq to occur after last stop bit + bits */
1246 		stm32_port->cr1_irq = USART_CR1_RTOIE;
1247 		writel_relaxed(bits, port->membase + ofs->rtor);
1248 		cr2 |= USART_CR2_RTOEN;
1249 		/*
1250 		 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1251 		 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1252 		 */
1253 		stm32_port->cr3_irq =  USART_CR3_RXFTIE;
1254 	}
1255 
1256 	cr1 |= stm32_port->cr1_irq;
1257 	cr3 |= stm32_port->cr3_irq;
1258 
1259 	if (cflag & PARODD)
1260 		cr1 |= USART_CR1_PS;
1261 
1262 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1263 	if (cflag & CRTSCTS) {
1264 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1265 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1266 	}
1267 
1268 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1269 
1270 	/*
1271 	 * The USART supports 16 or 8 times oversampling.
1272 	 * By default we prefer 16 times oversampling, so that the receiver
1273 	 * has a better tolerance to clock deviations.
1274 	 * 8 times oversampling is only used to achieve higher speeds.
1275 	 */
1276 	if (usartdiv < 16) {
1277 		oversampling = 8;
1278 		cr1 |= USART_CR1_OVER8;
1279 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1280 	} else {
1281 		oversampling = 16;
1282 		cr1 &= ~USART_CR1_OVER8;
1283 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1284 	}
1285 
1286 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1287 	fraction = usartdiv % oversampling;
1288 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1289 
1290 	uart_update_timeout(port, cflag, baud);
1291 
1292 	port->read_status_mask = USART_SR_ORE;
1293 	if (termios->c_iflag & INPCK)
1294 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1295 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1296 		port->read_status_mask |= USART_SR_FE;
1297 
1298 	/* Characters to ignore */
1299 	port->ignore_status_mask = 0;
1300 	if (termios->c_iflag & IGNPAR)
1301 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1302 	if (termios->c_iflag & IGNBRK) {
1303 		port->ignore_status_mask |= USART_SR_FE;
1304 		/*
1305 		 * If we're ignoring parity and break indicators,
1306 		 * ignore overruns too (for real raw support).
1307 		 */
1308 		if (termios->c_iflag & IGNPAR)
1309 			port->ignore_status_mask |= USART_SR_ORE;
1310 	}
1311 
1312 	/* Ignore all characters if CREAD is not set */
1313 	if ((termios->c_cflag & CREAD) == 0)
1314 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
1315 
1316 	if (stm32_port->rx_ch) {
1317 		/*
1318 		 * Setup DMA to collect only valid data and enable error irqs.
1319 		 * This also enables break reception when using DMA.
1320 		 */
1321 		cr1 |= USART_CR1_PEIE;
1322 		cr3 |= USART_CR3_EIE;
1323 		cr3 |= USART_CR3_DMAR;
1324 		cr3 |= USART_CR3_DDRE;
1325 	}
1326 
1327 	if (stm32_port->tx_ch)
1328 		cr3 |= USART_CR3_DMAT;
1329 
1330 	if (rs485conf->flags & SER_RS485_ENABLED) {
1331 		stm32_usart_config_reg_rs485(&cr1, &cr3,
1332 					     rs485conf->delay_rts_before_send,
1333 					     rs485conf->delay_rts_after_send,
1334 					     baud);
1335 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1336 			cr3 &= ~USART_CR3_DEP;
1337 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1338 		} else {
1339 			cr3 |= USART_CR3_DEP;
1340 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1341 		}
1342 
1343 	} else {
1344 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1345 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1346 	}
1347 
1348 	/* Configure wake up from low power on start bit detection */
1349 	if (stm32_port->wakeup_src) {
1350 		cr3 &= ~USART_CR3_WUS_MASK;
1351 		cr3 |= USART_CR3_WUS_START_BIT;
1352 	}
1353 
1354 	writel_relaxed(cr3, port->membase + ofs->cr3);
1355 	writel_relaxed(cr2, port->membase + ofs->cr2);
1356 	writel_relaxed(cr1, port->membase + ofs->cr1);
1357 
1358 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1359 	spin_unlock_irqrestore(&port->lock, flags);
1360 
1361 	/* Handle modem control interrupts */
1362 	if (UART_ENABLE_MS(port, termios->c_cflag))
1363 		stm32_usart_enable_ms(port);
1364 	else
1365 		stm32_usart_disable_ms(port);
1366 }
1367 
stm32_usart_type(struct uart_port * port)1368 static const char *stm32_usart_type(struct uart_port *port)
1369 {
1370 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1371 }
1372 
stm32_usart_release_port(struct uart_port * port)1373 static void stm32_usart_release_port(struct uart_port *port)
1374 {
1375 }
1376 
stm32_usart_request_port(struct uart_port * port)1377 static int stm32_usart_request_port(struct uart_port *port)
1378 {
1379 	return 0;
1380 }
1381 
stm32_usart_config_port(struct uart_port * port,int flags)1382 static void stm32_usart_config_port(struct uart_port *port, int flags)
1383 {
1384 	if (flags & UART_CONFIG_TYPE)
1385 		port->type = PORT_STM32;
1386 }
1387 
1388 static int
stm32_usart_verify_port(struct uart_port * port,struct serial_struct * ser)1389 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1390 {
1391 	/* No user changeable parameters */
1392 	return -EINVAL;
1393 }
1394 
stm32_usart_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)1395 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1396 			   unsigned int oldstate)
1397 {
1398 	struct stm32_port *stm32port = container_of(port,
1399 			struct stm32_port, port);
1400 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1401 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1402 	unsigned long flags;
1403 
1404 	switch (state) {
1405 	case UART_PM_STATE_ON:
1406 		pm_runtime_get_sync(port->dev);
1407 		break;
1408 	case UART_PM_STATE_OFF:
1409 		spin_lock_irqsave(&port->lock, flags);
1410 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1411 		spin_unlock_irqrestore(&port->lock, flags);
1412 		pm_runtime_put_sync(port->dev);
1413 		break;
1414 	}
1415 }
1416 
1417 #if defined(CONFIG_CONSOLE_POLL)
1418 
1419  /* Callbacks for characters polling in debug context (i.e. KGDB). */
stm32_usart_poll_init(struct uart_port * port)1420 static int stm32_usart_poll_init(struct uart_port *port)
1421 {
1422 	struct stm32_port *stm32_port = to_stm32_port(port);
1423 
1424 	return clk_prepare_enable(stm32_port->clk);
1425 }
1426 
stm32_usart_poll_get_char(struct uart_port * port)1427 static int stm32_usart_poll_get_char(struct uart_port *port)
1428 {
1429 	struct stm32_port *stm32_port = to_stm32_port(port);
1430 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1431 
1432 	if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1433 		return NO_POLL_CHAR;
1434 
1435 	return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1436 }
1437 
stm32_usart_poll_put_char(struct uart_port * port,unsigned char ch)1438 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1439 {
1440 	stm32_usart_console_putchar(port, ch);
1441 }
1442 #endif /* CONFIG_CONSOLE_POLL */
1443 
1444 static const struct uart_ops stm32_uart_ops = {
1445 	.tx_empty	= stm32_usart_tx_empty,
1446 	.set_mctrl	= stm32_usart_set_mctrl,
1447 	.get_mctrl	= stm32_usart_get_mctrl,
1448 	.stop_tx	= stm32_usart_stop_tx,
1449 	.start_tx	= stm32_usart_start_tx,
1450 	.throttle	= stm32_usart_throttle,
1451 	.unthrottle	= stm32_usart_unthrottle,
1452 	.stop_rx	= stm32_usart_stop_rx,
1453 	.enable_ms	= stm32_usart_enable_ms,
1454 	.break_ctl	= stm32_usart_break_ctl,
1455 	.startup	= stm32_usart_startup,
1456 	.shutdown	= stm32_usart_shutdown,
1457 	.flush_buffer	= stm32_usart_flush_buffer,
1458 	.set_termios	= stm32_usart_set_termios,
1459 	.pm		= stm32_usart_pm,
1460 	.type		= stm32_usart_type,
1461 	.release_port	= stm32_usart_release_port,
1462 	.request_port	= stm32_usart_request_port,
1463 	.config_port	= stm32_usart_config_port,
1464 	.verify_port	= stm32_usart_verify_port,
1465 #if defined(CONFIG_CONSOLE_POLL)
1466 	.poll_init      = stm32_usart_poll_init,
1467 	.poll_get_char	= stm32_usart_poll_get_char,
1468 	.poll_put_char	= stm32_usart_poll_put_char,
1469 #endif /* CONFIG_CONSOLE_POLL */
1470 };
1471 
1472 /*
1473  * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1474  * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1475  * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1476  * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1477  */
1478 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1479 
stm32_usart_get_ftcfg(struct platform_device * pdev,const char * p,int * ftcfg)1480 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1481 				  int *ftcfg)
1482 {
1483 	u32 bytes, i;
1484 
1485 	/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1486 	if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1487 		bytes = 8;
1488 
1489 	for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1490 		if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1491 			break;
1492 	if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1493 		i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1494 
1495 	dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1496 		stm32h7_usart_fifo_thresh_cfg[i]);
1497 
1498 	/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1499 	if (i)
1500 		*ftcfg = i - 1;
1501 	else
1502 		*ftcfg = -EINVAL;
1503 }
1504 
stm32_usart_deinit_port(struct stm32_port * stm32port)1505 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1506 {
1507 	clk_disable_unprepare(stm32port->clk);
1508 }
1509 
1510 static const struct serial_rs485 stm32_rs485_supported = {
1511 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1512 		 SER_RS485_RX_DURING_TX,
1513 	.delay_rts_before_send = 1,
1514 	.delay_rts_after_send = 1,
1515 };
1516 
stm32_usart_init_port(struct stm32_port * stm32port,struct platform_device * pdev)1517 static int stm32_usart_init_port(struct stm32_port *stm32port,
1518 				 struct platform_device *pdev)
1519 {
1520 	struct uart_port *port = &stm32port->port;
1521 	struct resource *res;
1522 	int ret, irq;
1523 
1524 	irq = platform_get_irq(pdev, 0);
1525 	if (irq < 0)
1526 		return irq;
1527 
1528 	port->iotype	= UPIO_MEM;
1529 	port->flags	= UPF_BOOT_AUTOCONF;
1530 	port->ops	= &stm32_uart_ops;
1531 	port->dev	= &pdev->dev;
1532 	port->fifosize	= stm32port->info->cfg.fifosize;
1533 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1534 	port->irq = irq;
1535 	port->rs485_config = stm32_usart_config_rs485;
1536 	port->rs485_supported = stm32_rs485_supported;
1537 
1538 	ret = stm32_usart_init_rs485(port, pdev);
1539 	if (ret)
1540 		return ret;
1541 
1542 	stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1543 		of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1544 
1545 	stm32port->swap = stm32port->info->cfg.has_swap &&
1546 		of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1547 
1548 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
1549 	if (stm32port->fifoen) {
1550 		stm32_usart_get_ftcfg(pdev, "rx-threshold",
1551 				      &stm32port->rxftcfg);
1552 		stm32_usart_get_ftcfg(pdev, "tx-threshold",
1553 				      &stm32port->txftcfg);
1554 	}
1555 
1556 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1557 	if (IS_ERR(port->membase))
1558 		return PTR_ERR(port->membase);
1559 	port->mapbase = res->start;
1560 
1561 	spin_lock_init(&port->lock);
1562 
1563 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1564 	if (IS_ERR(stm32port->clk))
1565 		return PTR_ERR(stm32port->clk);
1566 
1567 	/* Ensure that clk rate is correct by enabling the clk */
1568 	ret = clk_prepare_enable(stm32port->clk);
1569 	if (ret)
1570 		return ret;
1571 
1572 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1573 	if (!stm32port->port.uartclk) {
1574 		ret = -EINVAL;
1575 		goto err_clk;
1576 	}
1577 
1578 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1579 	if (IS_ERR(stm32port->gpios)) {
1580 		ret = PTR_ERR(stm32port->gpios);
1581 		goto err_clk;
1582 	}
1583 
1584 	/*
1585 	 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1586 	 * properties should not be specified.
1587 	 */
1588 	if (stm32port->hw_flow_control) {
1589 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1590 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1591 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1592 			ret = -EINVAL;
1593 			goto err_clk;
1594 		}
1595 	}
1596 
1597 	return ret;
1598 
1599 err_clk:
1600 	clk_disable_unprepare(stm32port->clk);
1601 
1602 	return ret;
1603 }
1604 
stm32_usart_of_get_port(struct platform_device * pdev)1605 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1606 {
1607 	struct device_node *np = pdev->dev.of_node;
1608 	int id;
1609 
1610 	if (!np)
1611 		return NULL;
1612 
1613 	id = of_alias_get_id(np, "serial");
1614 	if (id < 0) {
1615 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1616 		return NULL;
1617 	}
1618 
1619 	if (WARN_ON(id >= STM32_MAX_PORTS))
1620 		return NULL;
1621 
1622 	stm32_ports[id].hw_flow_control =
1623 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1624 		of_property_read_bool (np, "uart-has-rtscts");
1625 	stm32_ports[id].port.line = id;
1626 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1627 	stm32_ports[id].cr3_irq = 0;
1628 	stm32_ports[id].last_res = RX_BUF_L;
1629 	return &stm32_ports[id];
1630 }
1631 
1632 #ifdef CONFIG_OF
1633 static const struct of_device_id stm32_match[] = {
1634 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
1635 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1636 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1637 	{},
1638 };
1639 
1640 MODULE_DEVICE_TABLE(of, stm32_match);
1641 #endif
1642 
stm32_usart_of_dma_rx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1643 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1644 					 struct platform_device *pdev)
1645 {
1646 	if (stm32port->rx_buf)
1647 		dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1648 				  stm32port->rx_dma_buf);
1649 }
1650 
stm32_usart_of_dma_rx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1651 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1652 				       struct platform_device *pdev)
1653 {
1654 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1655 	struct uart_port *port = &stm32port->port;
1656 	struct device *dev = &pdev->dev;
1657 	struct dma_slave_config config;
1658 	int ret;
1659 
1660 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1661 					       &stm32port->rx_dma_buf,
1662 					       GFP_KERNEL);
1663 	if (!stm32port->rx_buf)
1664 		return -ENOMEM;
1665 
1666 	/* Configure DMA channel */
1667 	memset(&config, 0, sizeof(config));
1668 	config.src_addr = port->mapbase + ofs->rdr;
1669 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1670 
1671 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1672 	if (ret < 0) {
1673 		dev_err(dev, "rx dma channel config failed\n");
1674 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1675 		return ret;
1676 	}
1677 
1678 	return 0;
1679 }
1680 
stm32_usart_of_dma_tx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1681 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1682 					 struct platform_device *pdev)
1683 {
1684 	if (stm32port->tx_buf)
1685 		dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1686 				  stm32port->tx_dma_buf);
1687 }
1688 
stm32_usart_of_dma_tx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1689 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1690 				       struct platform_device *pdev)
1691 {
1692 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1693 	struct uart_port *port = &stm32port->port;
1694 	struct device *dev = &pdev->dev;
1695 	struct dma_slave_config config;
1696 	int ret;
1697 
1698 	stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1699 					       &stm32port->tx_dma_buf,
1700 					       GFP_KERNEL);
1701 	if (!stm32port->tx_buf)
1702 		return -ENOMEM;
1703 
1704 	/* Configure DMA channel */
1705 	memset(&config, 0, sizeof(config));
1706 	config.dst_addr = port->mapbase + ofs->tdr;
1707 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1708 
1709 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1710 	if (ret < 0) {
1711 		dev_err(dev, "tx dma channel config failed\n");
1712 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1713 		return ret;
1714 	}
1715 
1716 	return 0;
1717 }
1718 
stm32_usart_serial_probe(struct platform_device * pdev)1719 static int stm32_usart_serial_probe(struct platform_device *pdev)
1720 {
1721 	struct stm32_port *stm32port;
1722 	int ret;
1723 
1724 	stm32port = stm32_usart_of_get_port(pdev);
1725 	if (!stm32port)
1726 		return -ENODEV;
1727 
1728 	stm32port->info = of_device_get_match_data(&pdev->dev);
1729 	if (!stm32port->info)
1730 		return -EINVAL;
1731 
1732 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1733 	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1734 		return -EPROBE_DEFER;
1735 
1736 	/* Fall back in interrupt mode for any non-deferral error */
1737 	if (IS_ERR(stm32port->rx_ch))
1738 		stm32port->rx_ch = NULL;
1739 
1740 	stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1741 	if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1742 		ret = -EPROBE_DEFER;
1743 		goto err_dma_rx;
1744 	}
1745 	/* Fall back in interrupt mode for any non-deferral error */
1746 	if (IS_ERR(stm32port->tx_ch))
1747 		stm32port->tx_ch = NULL;
1748 
1749 	ret = stm32_usart_init_port(stm32port, pdev);
1750 	if (ret)
1751 		goto err_dma_tx;
1752 
1753 	if (stm32port->wakeup_src) {
1754 		device_set_wakeup_capable(&pdev->dev, true);
1755 		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1756 		if (ret)
1757 			goto err_deinit_port;
1758 	}
1759 
1760 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1761 		/* Fall back in interrupt mode */
1762 		dma_release_channel(stm32port->rx_ch);
1763 		stm32port->rx_ch = NULL;
1764 	}
1765 
1766 	if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1767 		/* Fall back in interrupt mode */
1768 		dma_release_channel(stm32port->tx_ch);
1769 		stm32port->tx_ch = NULL;
1770 	}
1771 
1772 	if (!stm32port->rx_ch)
1773 		dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1774 	if (!stm32port->tx_ch)
1775 		dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1776 
1777 	platform_set_drvdata(pdev, &stm32port->port);
1778 
1779 	pm_runtime_get_noresume(&pdev->dev);
1780 	pm_runtime_set_active(&pdev->dev);
1781 	pm_runtime_enable(&pdev->dev);
1782 
1783 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1784 	if (ret)
1785 		goto err_port;
1786 
1787 	pm_runtime_put_sync(&pdev->dev);
1788 
1789 	return 0;
1790 
1791 err_port:
1792 	pm_runtime_disable(&pdev->dev);
1793 	pm_runtime_set_suspended(&pdev->dev);
1794 	pm_runtime_put_noidle(&pdev->dev);
1795 
1796 	if (stm32port->tx_ch)
1797 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
1798 	if (stm32port->rx_ch)
1799 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
1800 
1801 	if (stm32port->wakeup_src)
1802 		dev_pm_clear_wake_irq(&pdev->dev);
1803 
1804 err_deinit_port:
1805 	if (stm32port->wakeup_src)
1806 		device_set_wakeup_capable(&pdev->dev, false);
1807 
1808 	stm32_usart_deinit_port(stm32port);
1809 
1810 err_dma_tx:
1811 	if (stm32port->tx_ch)
1812 		dma_release_channel(stm32port->tx_ch);
1813 
1814 err_dma_rx:
1815 	if (stm32port->rx_ch)
1816 		dma_release_channel(stm32port->rx_ch);
1817 
1818 	return ret;
1819 }
1820 
stm32_usart_serial_remove(struct platform_device * pdev)1821 static int stm32_usart_serial_remove(struct platform_device *pdev)
1822 {
1823 	struct uart_port *port = platform_get_drvdata(pdev);
1824 	struct stm32_port *stm32_port = to_stm32_port(port);
1825 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1826 	u32 cr3;
1827 
1828 	pm_runtime_get_sync(&pdev->dev);
1829 	uart_remove_one_port(&stm32_usart_driver, port);
1830 
1831 	pm_runtime_disable(&pdev->dev);
1832 	pm_runtime_set_suspended(&pdev->dev);
1833 	pm_runtime_put_noidle(&pdev->dev);
1834 
1835 	stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1836 
1837 	if (stm32_port->tx_ch) {
1838 		stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1839 		dma_release_channel(stm32_port->tx_ch);
1840 	}
1841 
1842 	if (stm32_port->rx_ch) {
1843 		stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1844 		dma_release_channel(stm32_port->rx_ch);
1845 	}
1846 
1847 	cr3 = readl_relaxed(port->membase + ofs->cr3);
1848 	cr3 &= ~USART_CR3_EIE;
1849 	cr3 &= ~USART_CR3_DMAR;
1850 	cr3 &= ~USART_CR3_DMAT;
1851 	cr3 &= ~USART_CR3_DDRE;
1852 	writel_relaxed(cr3, port->membase + ofs->cr3);
1853 
1854 	if (stm32_port->wakeup_src) {
1855 		dev_pm_clear_wake_irq(&pdev->dev);
1856 		device_init_wakeup(&pdev->dev, false);
1857 	}
1858 
1859 	stm32_usart_deinit_port(stm32_port);
1860 
1861 	return 0;
1862 }
1863 
stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)1864 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1865 {
1866 	struct stm32_port *stm32_port = to_stm32_port(port);
1867 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1868 	u32 isr;
1869 	int ret;
1870 
1871 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1872 						(isr & USART_SR_TXE), 100,
1873 						STM32_USART_TIMEOUT_USEC);
1874 	if (ret != 0) {
1875 		dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1876 		return;
1877 	}
1878 	writel_relaxed(ch, port->membase + ofs->tdr);
1879 }
1880 
1881 #ifdef CONFIG_SERIAL_STM32_CONSOLE
stm32_usart_console_write(struct console * co,const char * s,unsigned int cnt)1882 static void stm32_usart_console_write(struct console *co, const char *s,
1883 				      unsigned int cnt)
1884 {
1885 	struct uart_port *port = &stm32_ports[co->index].port;
1886 	struct stm32_port *stm32_port = to_stm32_port(port);
1887 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1888 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1889 	unsigned long flags;
1890 	u32 old_cr1, new_cr1;
1891 	int locked = 1;
1892 
1893 	if (oops_in_progress)
1894 		locked = spin_trylock_irqsave(&port->lock, flags);
1895 	else
1896 		spin_lock_irqsave(&port->lock, flags);
1897 
1898 	/* Save and disable interrupts, enable the transmitter */
1899 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1900 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1901 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
1902 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
1903 
1904 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1905 
1906 	/* Restore interrupt state */
1907 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1908 
1909 	if (locked)
1910 		spin_unlock_irqrestore(&port->lock, flags);
1911 }
1912 
stm32_usart_console_setup(struct console * co,char * options)1913 static int stm32_usart_console_setup(struct console *co, char *options)
1914 {
1915 	struct stm32_port *stm32port;
1916 	int baud = 9600;
1917 	int bits = 8;
1918 	int parity = 'n';
1919 	int flow = 'n';
1920 
1921 	if (co->index >= STM32_MAX_PORTS)
1922 		return -ENODEV;
1923 
1924 	stm32port = &stm32_ports[co->index];
1925 
1926 	/*
1927 	 * This driver does not support early console initialization
1928 	 * (use ARM early printk support instead), so we only expect
1929 	 * this to be called during the uart port registration when the
1930 	 * driver gets probed and the port should be mapped at that point.
1931 	 */
1932 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1933 		return -ENXIO;
1934 
1935 	if (options)
1936 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1937 
1938 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1939 }
1940 
1941 static struct console stm32_console = {
1942 	.name		= STM32_SERIAL_NAME,
1943 	.device		= uart_console_device,
1944 	.write		= stm32_usart_console_write,
1945 	.setup		= stm32_usart_console_setup,
1946 	.flags		= CON_PRINTBUFFER,
1947 	.index		= -1,
1948 	.data		= &stm32_usart_driver,
1949 };
1950 
1951 #define STM32_SERIAL_CONSOLE (&stm32_console)
1952 
1953 #else
1954 #define STM32_SERIAL_CONSOLE NULL
1955 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1956 
1957 #ifdef CONFIG_SERIAL_EARLYCON
early_stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)1958 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1959 {
1960 	struct stm32_usart_info *info = port->private_data;
1961 
1962 	while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1963 		cpu_relax();
1964 
1965 	writel_relaxed(ch, port->membase + info->ofs.tdr);
1966 }
1967 
early_stm32_serial_write(struct console * console,const char * s,unsigned int count)1968 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1969 {
1970 	struct earlycon_device *device = console->data;
1971 	struct uart_port *port = &device->port;
1972 
1973 	uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1974 }
1975 
early_stm32_h7_serial_setup(struct earlycon_device * device,const char * options)1976 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1977 {
1978 	if (!(device->port.membase || device->port.iobase))
1979 		return -ENODEV;
1980 	device->port.private_data = &stm32h7_info;
1981 	device->con->write = early_stm32_serial_write;
1982 	return 0;
1983 }
1984 
early_stm32_f7_serial_setup(struct earlycon_device * device,const char * options)1985 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1986 {
1987 	if (!(device->port.membase || device->port.iobase))
1988 		return -ENODEV;
1989 	device->port.private_data = &stm32f7_info;
1990 	device->con->write = early_stm32_serial_write;
1991 	return 0;
1992 }
1993 
early_stm32_f4_serial_setup(struct earlycon_device * device,const char * options)1994 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1995 {
1996 	if (!(device->port.membase || device->port.iobase))
1997 		return -ENODEV;
1998 	device->port.private_data = &stm32f4_info;
1999 	device->con->write = early_stm32_serial_write;
2000 	return 0;
2001 }
2002 
2003 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
2004 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
2005 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
2006 #endif /* CONFIG_SERIAL_EARLYCON */
2007 
2008 static struct uart_driver stm32_usart_driver = {
2009 	.driver_name	= DRIVER_NAME,
2010 	.dev_name	= STM32_SERIAL_NAME,
2011 	.major		= 0,
2012 	.minor		= 0,
2013 	.nr		= STM32_MAX_PORTS,
2014 	.cons		= STM32_SERIAL_CONSOLE,
2015 };
2016 
stm32_usart_serial_en_wakeup(struct uart_port * port,bool enable)2017 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2018 						       bool enable)
2019 {
2020 	struct stm32_port *stm32_port = to_stm32_port(port);
2021 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2022 	struct tty_port *tport = &port->state->port;
2023 	int ret;
2024 	unsigned int size = 0;
2025 	unsigned long flags;
2026 
2027 	if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2028 		return 0;
2029 
2030 	/*
2031 	 * Enable low-power wake-up and wake-up irq if argument is set to
2032 	 * "enable", disable low-power wake-up and wake-up irq otherwise
2033 	 */
2034 	if (enable) {
2035 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2036 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2037 		mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2038 
2039 		/*
2040 		 * When DMA is used for reception, it must be disabled before
2041 		 * entering low-power mode and re-enabled when exiting from
2042 		 * low-power mode.
2043 		 */
2044 		if (stm32_port->rx_ch) {
2045 			spin_lock_irqsave(&port->lock, flags);
2046 			/* Poll data from DMA RX buffer if any */
2047 			if (!stm32_usart_rx_dma_pause(stm32_port))
2048 				size += stm32_usart_receive_chars(port, true);
2049 			stm32_usart_rx_dma_terminate(stm32_port);
2050 			uart_unlock_and_check_sysrq_irqrestore(port, flags);
2051 			if (size)
2052 				tty_flip_buffer_push(tport);
2053 		}
2054 
2055 		/* Poll data from RX FIFO if any */
2056 		stm32_usart_receive_chars(port, false);
2057 	} else {
2058 		if (stm32_port->rx_ch) {
2059 			ret = stm32_usart_rx_dma_start_or_resume(port);
2060 			if (ret)
2061 				return ret;
2062 		}
2063 		mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2064 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2065 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2066 	}
2067 
2068 	return 0;
2069 }
2070 
stm32_usart_serial_suspend(struct device * dev)2071 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2072 {
2073 	struct uart_port *port = dev_get_drvdata(dev);
2074 	int ret;
2075 
2076 	uart_suspend_port(&stm32_usart_driver, port);
2077 
2078 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2079 		ret = stm32_usart_serial_en_wakeup(port, true);
2080 		if (ret)
2081 			return ret;
2082 	}
2083 
2084 	/*
2085 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
2086 	 * and rely on bootloader stage to restore this state upon resume.
2087 	 * Otherwise, apply the idle or sleep states depending on wakeup
2088 	 * capabilities.
2089 	 */
2090 	if (console_suspend_enabled || !uart_console(port)) {
2091 		if (device_may_wakeup(dev) || device_wakeup_path(dev))
2092 			pinctrl_pm_select_idle_state(dev);
2093 		else
2094 			pinctrl_pm_select_sleep_state(dev);
2095 	}
2096 
2097 	return 0;
2098 }
2099 
stm32_usart_serial_resume(struct device * dev)2100 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2101 {
2102 	struct uart_port *port = dev_get_drvdata(dev);
2103 	int ret;
2104 
2105 	pinctrl_pm_select_default_state(dev);
2106 
2107 	if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2108 		ret = stm32_usart_serial_en_wakeup(port, false);
2109 		if (ret)
2110 			return ret;
2111 	}
2112 
2113 	return uart_resume_port(&stm32_usart_driver, port);
2114 }
2115 
stm32_usart_runtime_suspend(struct device * dev)2116 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2117 {
2118 	struct uart_port *port = dev_get_drvdata(dev);
2119 	struct stm32_port *stm32port = container_of(port,
2120 			struct stm32_port, port);
2121 
2122 	clk_disable_unprepare(stm32port->clk);
2123 
2124 	return 0;
2125 }
2126 
stm32_usart_runtime_resume(struct device * dev)2127 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2128 {
2129 	struct uart_port *port = dev_get_drvdata(dev);
2130 	struct stm32_port *stm32port = container_of(port,
2131 			struct stm32_port, port);
2132 
2133 	return clk_prepare_enable(stm32port->clk);
2134 }
2135 
2136 static const struct dev_pm_ops stm32_serial_pm_ops = {
2137 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2138 			   stm32_usart_runtime_resume, NULL)
2139 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2140 				stm32_usart_serial_resume)
2141 };
2142 
2143 static struct platform_driver stm32_serial_driver = {
2144 	.probe		= stm32_usart_serial_probe,
2145 	.remove		= stm32_usart_serial_remove,
2146 	.driver	= {
2147 		.name	= DRIVER_NAME,
2148 		.pm	= &stm32_serial_pm_ops,
2149 		.of_match_table = of_match_ptr(stm32_match),
2150 	},
2151 };
2152 
stm32_usart_init(void)2153 static int __init stm32_usart_init(void)
2154 {
2155 	static char banner[] __initdata = "STM32 USART driver initialized";
2156 	int ret;
2157 
2158 	pr_info("%s\n", banner);
2159 
2160 	ret = uart_register_driver(&stm32_usart_driver);
2161 	if (ret)
2162 		return ret;
2163 
2164 	ret = platform_driver_register(&stm32_serial_driver);
2165 	if (ret)
2166 		uart_unregister_driver(&stm32_usart_driver);
2167 
2168 	return ret;
2169 }
2170 
stm32_usart_exit(void)2171 static void __exit stm32_usart_exit(void)
2172 {
2173 	platform_driver_unregister(&stm32_serial_driver);
2174 	uart_unregister_driver(&stm32_usart_driver);
2175 }
2176 
2177 module_init(stm32_usart_init);
2178 module_exit(stm32_usart_exit);
2179 
2180 MODULE_ALIAS("platform:" DRIVER_NAME);
2181 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2182 MODULE_LICENSE("GPL v2");
2183