1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/circ_buf.h>
12 #include <linux/tty.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/serial.h>
17 #include <linux/clk.h>
18 #include <linux/clk-provider.h>
19 #include <linux/console.h>
20 #include <linux/sysrq.h>
21 #include <linux/tty_flip.h>
22 #include <linux/platform_device.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmaengine.h>
27 #include <linux/atmel_pdc.h>
28 #include <linux/uaccess.h>
29 #include <linux/platform_data/atmel.h>
30 #include <linux/timer.h>
31 #include <linux/err.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mm.h>
35 #include <linux/io.h>
36 
37 #include <asm/div64.h>
38 #include <asm/ioctls.h>
39 
40 #define PDC_BUFFER_SIZE		512
41 /* Revisit: We should calculate this based on the actual port settings */
42 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
43 
44 /* The minium number of data FIFOs should be able to contain */
45 #define ATMEL_MIN_FIFO_SIZE	8
46 /*
47  * These two offsets are substracted from the RX FIFO size to define the RTS
48  * high and low thresholds
49  */
50 #define ATMEL_RTS_HIGH_OFFSET	16
51 #define ATMEL_RTS_LOW_OFFSET	20
52 
53 #include <linux/serial_core.h>
54 
55 #include "serial_mctrl_gpio.h"
56 #include "atmel_serial.h"
57 
58 static void atmel_start_rx(struct uart_port *port);
59 static void atmel_stop_rx(struct uart_port *port);
60 
61 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
62 
63 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
64  * should coexist with the 8250 driver, such as if we have an external 16C550
65  * UART. */
66 #define SERIAL_ATMEL_MAJOR	204
67 #define MINOR_START		154
68 #define ATMEL_DEVICENAME	"ttyAT"
69 
70 #else
71 
72 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
73  * name, but it is legally reserved for the 8250 driver. */
74 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
75 #define MINOR_START		64
76 #define ATMEL_DEVICENAME	"ttyS"
77 
78 #endif
79 
80 #define ATMEL_ISR_PASS_LIMIT	256
81 
82 struct atmel_dma_buffer {
83 	unsigned char	*buf;
84 	dma_addr_t	dma_addr;
85 	unsigned int	dma_size;
86 	unsigned int	ofs;
87 };
88 
89 struct atmel_uart_char {
90 	u16		status;
91 	u16		ch;
92 };
93 
94 /*
95  * Be careful, the real size of the ring buffer is
96  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
97  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
98  * DMA mode.
99  */
100 #define ATMEL_SERIAL_RINGSIZE 1024
101 
102 /*
103  * at91: 6 USARTs and one DBGU port (SAM9260)
104  * samx7: 3 USARTs and 5 UARTs
105  */
106 #define ATMEL_MAX_UART		8
107 
108 /*
109  * We wrap our port structure around the generic uart_port.
110  */
111 struct atmel_uart_port {
112 	struct uart_port	uart;		/* uart */
113 	struct clk		*clk;		/* uart clock */
114 	struct clk		*gclk;		/* uart generic clock */
115 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
116 	u32			backup_imr;	/* IMR saved during suspend */
117 	int			break_active;	/* break being received */
118 
119 	bool			use_dma_rx;	/* enable DMA receiver */
120 	bool			use_pdc_rx;	/* enable PDC receiver */
121 	short			pdc_rx_idx;	/* current PDC RX buffer */
122 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
123 
124 	bool			use_dma_tx;     /* enable DMA transmitter */
125 	bool			use_pdc_tx;	/* enable PDC transmitter */
126 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
127 
128 	spinlock_t			lock_tx;	/* port lock */
129 	spinlock_t			lock_rx;	/* port lock */
130 	struct dma_chan			*chan_tx;
131 	struct dma_chan			*chan_rx;
132 	struct dma_async_tx_descriptor	*desc_tx;
133 	struct dma_async_tx_descriptor	*desc_rx;
134 	dma_cookie_t			cookie_tx;
135 	dma_cookie_t			cookie_rx;
136 	struct scatterlist		sg_tx;
137 	struct scatterlist		sg_rx;
138 	struct tasklet_struct	tasklet_rx;
139 	struct tasklet_struct	tasklet_tx;
140 	atomic_t		tasklet_shutdown;
141 	unsigned int		irq_status_prev;
142 	unsigned int		tx_len;
143 
144 	struct circ_buf		rx_ring;
145 
146 	struct mctrl_gpios	*gpios;
147 	u32			backup_mode;	/* MR saved during iso7816 operations */
148 	u32			backup_brgr;	/* BRGR saved during iso7816 operations */
149 	unsigned int		tx_done_mask;
150 	u32			fifo_size;
151 	u32			rts_high;
152 	u32			rts_low;
153 	bool			ms_irq_enabled;
154 	u32			rtor;	/* address of receiver timeout register if it exists */
155 	bool			is_usart;
156 	bool			has_frac_baudrate;
157 	bool			has_hw_timer;
158 	struct timer_list	uart_timer;
159 
160 	bool			tx_stopped;
161 	bool			suspended;
162 	unsigned int		pending;
163 	unsigned int		pending_status;
164 	spinlock_t		lock_suspended;
165 
166 	bool			hd_start_rx;	/* can start RX during half-duplex operation */
167 
168 	/* ISO7816 */
169 	unsigned int		fidi_min;
170 	unsigned int		fidi_max;
171 
172 	struct {
173 		u32		cr;
174 		u32		mr;
175 		u32		imr;
176 		u32		brgr;
177 		u32		rtor;
178 		u32		ttgr;
179 		u32		fmr;
180 		u32		fimr;
181 	} cache;
182 
183 	int (*prepare_rx)(struct uart_port *port);
184 	int (*prepare_tx)(struct uart_port *port);
185 	void (*schedule_rx)(struct uart_port *port);
186 	void (*schedule_tx)(struct uart_port *port);
187 	void (*release_rx)(struct uart_port *port);
188 	void (*release_tx)(struct uart_port *port);
189 };
190 
191 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
192 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
193 
194 #if defined(CONFIG_OF)
195 static const struct of_device_id atmel_serial_dt_ids[] = {
196 	{ .compatible = "atmel,at91rm9200-usart-serial" },
197 	{ /* sentinel */ }
198 };
199 #endif
200 
201 static inline struct atmel_uart_port *
202 to_atmel_uart_port(struct uart_port *uart)
203 {
204 	return container_of(uart, struct atmel_uart_port, uart);
205 }
206 
207 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
208 {
209 	return __raw_readl(port->membase + reg);
210 }
211 
212 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
213 {
214 	__raw_writel(value, port->membase + reg);
215 }
216 
217 static inline u8 atmel_uart_read_char(struct uart_port *port)
218 {
219 	return __raw_readb(port->membase + ATMEL_US_RHR);
220 }
221 
222 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
223 {
224 	__raw_writeb(value, port->membase + ATMEL_US_THR);
225 }
226 
227 static inline int atmel_uart_is_half_duplex(struct uart_port *port)
228 {
229 	return ((port->rs485.flags & SER_RS485_ENABLED) &&
230 		!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
231 		(port->iso7816.flags & SER_ISO7816_ENABLED);
232 }
233 
234 static inline int atmel_error_rate(int desired_value, int actual_value)
235 {
236 	return 100 - (desired_value * 100) / actual_value;
237 }
238 
239 #ifdef CONFIG_SERIAL_ATMEL_PDC
240 static bool atmel_use_pdc_rx(struct uart_port *port)
241 {
242 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
243 
244 	return atmel_port->use_pdc_rx;
245 }
246 
247 static bool atmel_use_pdc_tx(struct uart_port *port)
248 {
249 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
250 
251 	return atmel_port->use_pdc_tx;
252 }
253 #else
254 static bool atmel_use_pdc_rx(struct uart_port *port)
255 {
256 	return false;
257 }
258 
259 static bool atmel_use_pdc_tx(struct uart_port *port)
260 {
261 	return false;
262 }
263 #endif
264 
265 static bool atmel_use_dma_tx(struct uart_port *port)
266 {
267 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
268 
269 	return atmel_port->use_dma_tx;
270 }
271 
272 static bool atmel_use_dma_rx(struct uart_port *port)
273 {
274 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
275 
276 	return atmel_port->use_dma_rx;
277 }
278 
279 static bool atmel_use_fifo(struct uart_port *port)
280 {
281 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
282 
283 	return atmel_port->fifo_size;
284 }
285 
286 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
287 				   struct tasklet_struct *t)
288 {
289 	if (!atomic_read(&atmel_port->tasklet_shutdown))
290 		tasklet_schedule(t);
291 }
292 
293 /* Enable or disable the rs485 support */
294 static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
295 			      struct serial_rs485 *rs485conf)
296 {
297 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
298 	unsigned int mode;
299 
300 	/* Disable interrupts */
301 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
302 
303 	mode = atmel_uart_readl(port, ATMEL_US_MR);
304 
305 	if (rs485conf->flags & SER_RS485_ENABLED) {
306 		dev_dbg(port->dev, "Setting UART to RS485\n");
307 		if (rs485conf->flags & SER_RS485_RX_DURING_TX)
308 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
309 		else
310 			atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
311 
312 		atmel_uart_writel(port, ATMEL_US_TTGR,
313 				  rs485conf->delay_rts_after_send);
314 		mode &= ~ATMEL_US_USMODE;
315 		mode |= ATMEL_US_USMODE_RS485;
316 	} else {
317 		dev_dbg(port->dev, "Setting UART to RS232\n");
318 		if (atmel_use_pdc_tx(port))
319 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
320 				ATMEL_US_TXBUFE;
321 		else
322 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
323 	}
324 	atmel_uart_writel(port, ATMEL_US_MR, mode);
325 
326 	/* Enable interrupts */
327 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
328 
329 	return 0;
330 }
331 
332 static unsigned int atmel_calc_cd(struct uart_port *port,
333 				  struct serial_iso7816 *iso7816conf)
334 {
335 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
336 	unsigned int cd;
337 	u64 mck_rate;
338 
339 	mck_rate = (u64)clk_get_rate(atmel_port->clk);
340 	do_div(mck_rate, iso7816conf->clk);
341 	cd = mck_rate;
342 	return cd;
343 }
344 
345 static unsigned int atmel_calc_fidi(struct uart_port *port,
346 				    struct serial_iso7816 *iso7816conf)
347 {
348 	u64 fidi = 0;
349 
350 	if (iso7816conf->sc_fi && iso7816conf->sc_di) {
351 		fidi = (u64)iso7816conf->sc_fi;
352 		do_div(fidi, iso7816conf->sc_di);
353 	}
354 	return (u32)fidi;
355 }
356 
357 /* Enable or disable the iso7816 support */
358 /* Called with interrupts disabled */
359 static int atmel_config_iso7816(struct uart_port *port,
360 				struct serial_iso7816 *iso7816conf)
361 {
362 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
363 	unsigned int mode;
364 	unsigned int cd, fidi;
365 	int ret = 0;
366 
367 	/* Disable interrupts */
368 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
369 
370 	mode = atmel_uart_readl(port, ATMEL_US_MR);
371 
372 	if (iso7816conf->flags & SER_ISO7816_ENABLED) {
373 		mode &= ~ATMEL_US_USMODE;
374 
375 		if (iso7816conf->tg > 255) {
376 			dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
377 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
378 			ret = -EINVAL;
379 			goto err_out;
380 		}
381 
382 		if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
383 		    == SER_ISO7816_T(0)) {
384 			mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
385 		} else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
386 			   == SER_ISO7816_T(1)) {
387 			mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
388 		} else {
389 			dev_err(port->dev, "ISO7816: Type not supported\n");
390 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
391 			ret = -EINVAL;
392 			goto err_out;
393 		}
394 
395 		mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
396 
397 		/* select mck clock, and output  */
398 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
399 		/* set parity for normal/inverse mode + max iterations */
400 		mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
401 
402 		cd = atmel_calc_cd(port, iso7816conf);
403 		fidi = atmel_calc_fidi(port, iso7816conf);
404 		if (fidi == 0) {
405 			dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
406 		} else if (fidi < atmel_port->fidi_min
407 			   || fidi > atmel_port->fidi_max) {
408 			dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
409 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
410 			ret = -EINVAL;
411 			goto err_out;
412 		}
413 
414 		if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
415 			/* port not yet in iso7816 mode: store configuration */
416 			atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
417 			atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
418 		}
419 
420 		atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
421 		atmel_uart_writel(port, ATMEL_US_BRGR, cd);
422 		atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
423 
424 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
425 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
426 	} else {
427 		dev_dbg(port->dev, "Setting UART back to RS232\n");
428 		/* back to last RS232 settings */
429 		mode = atmel_port->backup_mode;
430 		memset(iso7816conf, 0, sizeof(struct serial_iso7816));
431 		atmel_uart_writel(port, ATMEL_US_TTGR, 0);
432 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
433 		atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
434 
435 		if (atmel_use_pdc_tx(port))
436 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
437 						   ATMEL_US_TXBUFE;
438 		else
439 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
440 	}
441 
442 	port->iso7816 = *iso7816conf;
443 
444 	atmel_uart_writel(port, ATMEL_US_MR, mode);
445 
446 err_out:
447 	/* Enable interrupts */
448 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
449 
450 	return ret;
451 }
452 
453 /*
454  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
455  */
456 static u_int atmel_tx_empty(struct uart_port *port)
457 {
458 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
459 
460 	if (atmel_port->tx_stopped)
461 		return TIOCSER_TEMT;
462 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
463 		TIOCSER_TEMT :
464 		0;
465 }
466 
467 /*
468  * Set state of the modem control output lines
469  */
470 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
471 {
472 	unsigned int control = 0;
473 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
474 	unsigned int rts_paused, rts_ready;
475 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
476 
477 	/* override mode to RS485 if needed, otherwise keep the current mode */
478 	if (port->rs485.flags & SER_RS485_ENABLED) {
479 		atmel_uart_writel(port, ATMEL_US_TTGR,
480 				  port->rs485.delay_rts_after_send);
481 		mode &= ~ATMEL_US_USMODE;
482 		mode |= ATMEL_US_USMODE_RS485;
483 	}
484 
485 	/* set the RTS line state according to the mode */
486 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
487 		/* force RTS line to high level */
488 		rts_paused = ATMEL_US_RTSEN;
489 
490 		/* give the control of the RTS line back to the hardware */
491 		rts_ready = ATMEL_US_RTSDIS;
492 	} else {
493 		/* force RTS line to high level */
494 		rts_paused = ATMEL_US_RTSDIS;
495 
496 		/* force RTS line to low level */
497 		rts_ready = ATMEL_US_RTSEN;
498 	}
499 
500 	if (mctrl & TIOCM_RTS)
501 		control |= rts_ready;
502 	else
503 		control |= rts_paused;
504 
505 	if (mctrl & TIOCM_DTR)
506 		control |= ATMEL_US_DTREN;
507 	else
508 		control |= ATMEL_US_DTRDIS;
509 
510 	atmel_uart_writel(port, ATMEL_US_CR, control);
511 
512 	mctrl_gpio_set(atmel_port->gpios, mctrl);
513 
514 	/* Local loopback mode? */
515 	mode &= ~ATMEL_US_CHMODE;
516 	if (mctrl & TIOCM_LOOP)
517 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
518 	else
519 		mode |= ATMEL_US_CHMODE_NORMAL;
520 
521 	atmel_uart_writel(port, ATMEL_US_MR, mode);
522 }
523 
524 /*
525  * Get state of the modem control input lines
526  */
527 static u_int atmel_get_mctrl(struct uart_port *port)
528 {
529 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
530 	unsigned int ret = 0, status;
531 
532 	status = atmel_uart_readl(port, ATMEL_US_CSR);
533 
534 	/*
535 	 * The control signals are active low.
536 	 */
537 	if (!(status & ATMEL_US_DCD))
538 		ret |= TIOCM_CD;
539 	if (!(status & ATMEL_US_CTS))
540 		ret |= TIOCM_CTS;
541 	if (!(status & ATMEL_US_DSR))
542 		ret |= TIOCM_DSR;
543 	if (!(status & ATMEL_US_RI))
544 		ret |= TIOCM_RI;
545 
546 	return mctrl_gpio_get(atmel_port->gpios, &ret);
547 }
548 
549 /*
550  * Stop transmitting.
551  */
552 static void atmel_stop_tx(struct uart_port *port)
553 {
554 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
555 
556 	if (atmel_use_pdc_tx(port)) {
557 		/* disable PDC transmit */
558 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
559 	}
560 
561 	/*
562 	 * Disable the transmitter.
563 	 * This is mandatory when DMA is used, otherwise the DMA buffer
564 	 * is fully transmitted.
565 	 */
566 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
567 	atmel_port->tx_stopped = true;
568 
569 	/* Disable interrupts */
570 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
571 
572 	if (atmel_uart_is_half_duplex(port))
573 		if (!atomic_read(&atmel_port->tasklet_shutdown))
574 			atmel_start_rx(port);
575 
576 }
577 
578 /*
579  * Start transmitting.
580  */
581 static void atmel_start_tx(struct uart_port *port)
582 {
583 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
584 
585 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
586 				       & ATMEL_PDC_TXTEN))
587 		/* The transmitter is already running.  Yes, we
588 		   really need this.*/
589 		return;
590 
591 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
592 		if (atmel_uart_is_half_duplex(port))
593 			atmel_stop_rx(port);
594 
595 	if (atmel_use_pdc_tx(port))
596 		/* re-enable PDC transmit */
597 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
598 
599 	/* Enable interrupts */
600 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
601 
602 	/* re-enable the transmitter */
603 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
604 	atmel_port->tx_stopped = false;
605 }
606 
607 /*
608  * start receiving - port is in process of being opened.
609  */
610 static void atmel_start_rx(struct uart_port *port)
611 {
612 	/* reset status and receiver */
613 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
614 
615 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
616 
617 	if (atmel_use_pdc_rx(port)) {
618 		/* enable PDC controller */
619 		atmel_uart_writel(port, ATMEL_US_IER,
620 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
621 				  port->read_status_mask);
622 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
623 	} else {
624 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
625 	}
626 }
627 
628 /*
629  * Stop receiving - port is in process of being closed.
630  */
631 static void atmel_stop_rx(struct uart_port *port)
632 {
633 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
634 
635 	if (atmel_use_pdc_rx(port)) {
636 		/* disable PDC receive */
637 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
638 		atmel_uart_writel(port, ATMEL_US_IDR,
639 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
640 				  port->read_status_mask);
641 	} else {
642 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
643 	}
644 }
645 
646 /*
647  * Enable modem status interrupts
648  */
649 static void atmel_enable_ms(struct uart_port *port)
650 {
651 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
652 	uint32_t ier = 0;
653 
654 	/*
655 	 * Interrupt should not be enabled twice
656 	 */
657 	if (atmel_port->ms_irq_enabled)
658 		return;
659 
660 	atmel_port->ms_irq_enabled = true;
661 
662 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
663 		ier |= ATMEL_US_CTSIC;
664 
665 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
666 		ier |= ATMEL_US_DSRIC;
667 
668 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
669 		ier |= ATMEL_US_RIIC;
670 
671 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
672 		ier |= ATMEL_US_DCDIC;
673 
674 	atmel_uart_writel(port, ATMEL_US_IER, ier);
675 
676 	mctrl_gpio_enable_ms(atmel_port->gpios);
677 }
678 
679 /*
680  * Disable modem status interrupts
681  */
682 static void atmel_disable_ms(struct uart_port *port)
683 {
684 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
685 	uint32_t idr = 0;
686 
687 	/*
688 	 * Interrupt should not be disabled twice
689 	 */
690 	if (!atmel_port->ms_irq_enabled)
691 		return;
692 
693 	atmel_port->ms_irq_enabled = false;
694 
695 	mctrl_gpio_disable_ms(atmel_port->gpios);
696 
697 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
698 		idr |= ATMEL_US_CTSIC;
699 
700 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
701 		idr |= ATMEL_US_DSRIC;
702 
703 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
704 		idr |= ATMEL_US_RIIC;
705 
706 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
707 		idr |= ATMEL_US_DCDIC;
708 
709 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
710 }
711 
712 /*
713  * Control the transmission of a break signal
714  */
715 static void atmel_break_ctl(struct uart_port *port, int break_state)
716 {
717 	if (break_state != 0)
718 		/* start break */
719 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
720 	else
721 		/* stop break */
722 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
723 }
724 
725 /*
726  * Stores the incoming character in the ring buffer
727  */
728 static void
729 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
730 		     unsigned int ch)
731 {
732 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
733 	struct circ_buf *ring = &atmel_port->rx_ring;
734 	struct atmel_uart_char *c;
735 
736 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
737 		/* Buffer overflow, ignore char */
738 		return;
739 
740 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
741 	c->status	= status;
742 	c->ch		= ch;
743 
744 	/* Make sure the character is stored before we update head. */
745 	smp_wmb();
746 
747 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
748 }
749 
750 /*
751  * Deal with parity, framing and overrun errors.
752  */
753 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
754 {
755 	/* clear error */
756 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
757 
758 	if (status & ATMEL_US_RXBRK) {
759 		/* ignore side-effect */
760 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
761 		port->icount.brk++;
762 	}
763 	if (status & ATMEL_US_PARE)
764 		port->icount.parity++;
765 	if (status & ATMEL_US_FRAME)
766 		port->icount.frame++;
767 	if (status & ATMEL_US_OVRE)
768 		port->icount.overrun++;
769 }
770 
771 /*
772  * Characters received (called from interrupt handler)
773  */
774 static void atmel_rx_chars(struct uart_port *port)
775 {
776 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
777 	unsigned int status, ch;
778 
779 	status = atmel_uart_readl(port, ATMEL_US_CSR);
780 	while (status & ATMEL_US_RXRDY) {
781 		ch = atmel_uart_read_char(port);
782 
783 		/*
784 		 * note that the error handling code is
785 		 * out of the main execution path
786 		 */
787 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
788 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
789 			     || atmel_port->break_active)) {
790 
791 			/* clear error */
792 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
793 
794 			if (status & ATMEL_US_RXBRK
795 			    && !atmel_port->break_active) {
796 				atmel_port->break_active = 1;
797 				atmel_uart_writel(port, ATMEL_US_IER,
798 						  ATMEL_US_RXBRK);
799 			} else {
800 				/*
801 				 * This is either the end-of-break
802 				 * condition or we've received at
803 				 * least one character without RXBRK
804 				 * being set. In both cases, the next
805 				 * RXBRK will indicate start-of-break.
806 				 */
807 				atmel_uart_writel(port, ATMEL_US_IDR,
808 						  ATMEL_US_RXBRK);
809 				status &= ~ATMEL_US_RXBRK;
810 				atmel_port->break_active = 0;
811 			}
812 		}
813 
814 		atmel_buffer_rx_char(port, status, ch);
815 		status = atmel_uart_readl(port, ATMEL_US_CSR);
816 	}
817 
818 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
819 }
820 
821 /*
822  * Transmit characters (called from tasklet with TXRDY interrupt
823  * disabled)
824  */
825 static void atmel_tx_chars(struct uart_port *port)
826 {
827 	struct circ_buf *xmit = &port->state->xmit;
828 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
829 
830 	if (port->x_char &&
831 	    (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
832 		atmel_uart_write_char(port, port->x_char);
833 		port->icount.tx++;
834 		port->x_char = 0;
835 	}
836 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
837 		return;
838 
839 	while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
840 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
841 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
842 		port->icount.tx++;
843 		if (uart_circ_empty(xmit))
844 			break;
845 	}
846 
847 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
848 		uart_write_wakeup(port);
849 
850 	if (!uart_circ_empty(xmit)) {
851 		/* we still have characters to transmit, so we should continue
852 		 * transmitting them when TX is ready, regardless of
853 		 * mode or duplexity
854 		 */
855 		atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
856 
857 		/* Enable interrupts */
858 		atmel_uart_writel(port, ATMEL_US_IER,
859 				  atmel_port->tx_done_mask);
860 	} else {
861 		if (atmel_uart_is_half_duplex(port))
862 			atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
863 	}
864 }
865 
866 static void atmel_complete_tx_dma(void *arg)
867 {
868 	struct atmel_uart_port *atmel_port = arg;
869 	struct uart_port *port = &atmel_port->uart;
870 	struct circ_buf *xmit = &port->state->xmit;
871 	struct dma_chan *chan = atmel_port->chan_tx;
872 	unsigned long flags;
873 
874 	spin_lock_irqsave(&port->lock, flags);
875 
876 	if (chan)
877 		dmaengine_terminate_all(chan);
878 	xmit->tail += atmel_port->tx_len;
879 	xmit->tail &= UART_XMIT_SIZE - 1;
880 
881 	port->icount.tx += atmel_port->tx_len;
882 
883 	spin_lock_irq(&atmel_port->lock_tx);
884 	async_tx_ack(atmel_port->desc_tx);
885 	atmel_port->cookie_tx = -EINVAL;
886 	atmel_port->desc_tx = NULL;
887 	spin_unlock_irq(&atmel_port->lock_tx);
888 
889 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
890 		uart_write_wakeup(port);
891 
892 	/*
893 	 * xmit is a circular buffer so, if we have just send data from
894 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
895 	 * remaining data from the beginning of xmit->buf to xmit->head.
896 	 */
897 	if (!uart_circ_empty(xmit))
898 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
899 	else if (atmel_uart_is_half_duplex(port)) {
900 		/*
901 		 * DMA done, re-enable TXEMPTY and signal that we can stop
902 		 * TX and start RX for RS485
903 		 */
904 		atmel_port->hd_start_rx = true;
905 		atmel_uart_writel(port, ATMEL_US_IER,
906 				  atmel_port->tx_done_mask);
907 	}
908 
909 	spin_unlock_irqrestore(&port->lock, flags);
910 }
911 
912 static void atmel_release_tx_dma(struct uart_port *port)
913 {
914 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
915 	struct dma_chan *chan = atmel_port->chan_tx;
916 
917 	if (chan) {
918 		dmaengine_terminate_all(chan);
919 		dma_release_channel(chan);
920 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
921 				DMA_TO_DEVICE);
922 	}
923 
924 	atmel_port->desc_tx = NULL;
925 	atmel_port->chan_tx = NULL;
926 	atmel_port->cookie_tx = -EINVAL;
927 }
928 
929 /*
930  * Called from tasklet with TXRDY interrupt is disabled.
931  */
932 static void atmel_tx_dma(struct uart_port *port)
933 {
934 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
935 	struct circ_buf *xmit = &port->state->xmit;
936 	struct dma_chan *chan = atmel_port->chan_tx;
937 	struct dma_async_tx_descriptor *desc;
938 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
939 	unsigned int tx_len, part1_len, part2_len, sg_len;
940 	dma_addr_t phys_addr;
941 
942 	/* Make sure we have an idle channel */
943 	if (atmel_port->desc_tx != NULL)
944 		return;
945 
946 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
947 		/*
948 		 * DMA is idle now.
949 		 * Port xmit buffer is already mapped,
950 		 * and it is one page... Just adjust
951 		 * offsets and lengths. Since it is a circular buffer,
952 		 * we have to transmit till the end, and then the rest.
953 		 * Take the port lock to get a
954 		 * consistent xmit buffer state.
955 		 */
956 		tx_len = CIRC_CNT_TO_END(xmit->head,
957 					 xmit->tail,
958 					 UART_XMIT_SIZE);
959 
960 		if (atmel_port->fifo_size) {
961 			/* multi data mode */
962 			part1_len = (tx_len & ~0x3); /* DWORD access */
963 			part2_len = (tx_len & 0x3); /* BYTE access */
964 		} else {
965 			/* single data (legacy) mode */
966 			part1_len = 0;
967 			part2_len = tx_len; /* BYTE access only */
968 		}
969 
970 		sg_init_table(sgl, 2);
971 		sg_len = 0;
972 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
973 		if (part1_len) {
974 			sg = &sgl[sg_len++];
975 			sg_dma_address(sg) = phys_addr;
976 			sg_dma_len(sg) = part1_len;
977 
978 			phys_addr += part1_len;
979 		}
980 
981 		if (part2_len) {
982 			sg = &sgl[sg_len++];
983 			sg_dma_address(sg) = phys_addr;
984 			sg_dma_len(sg) = part2_len;
985 		}
986 
987 		/*
988 		 * save tx_len so atmel_complete_tx_dma() will increase
989 		 * xmit->tail correctly
990 		 */
991 		atmel_port->tx_len = tx_len;
992 
993 		desc = dmaengine_prep_slave_sg(chan,
994 					       sgl,
995 					       sg_len,
996 					       DMA_MEM_TO_DEV,
997 					       DMA_PREP_INTERRUPT |
998 					       DMA_CTRL_ACK);
999 		if (!desc) {
1000 			dev_err(port->dev, "Failed to send via dma!\n");
1001 			return;
1002 		}
1003 
1004 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
1005 
1006 		atmel_port->desc_tx = desc;
1007 		desc->callback = atmel_complete_tx_dma;
1008 		desc->callback_param = atmel_port;
1009 		atmel_port->cookie_tx = dmaengine_submit(desc);
1010 		if (dma_submit_error(atmel_port->cookie_tx)) {
1011 			dev_err(port->dev, "dma_submit_error %d\n",
1012 				atmel_port->cookie_tx);
1013 			return;
1014 		}
1015 
1016 		dma_async_issue_pending(chan);
1017 	}
1018 
1019 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1020 		uart_write_wakeup(port);
1021 }
1022 
1023 static int atmel_prepare_tx_dma(struct uart_port *port)
1024 {
1025 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1026 	struct device *mfd_dev = port->dev->parent;
1027 	dma_cap_mask_t		mask;
1028 	struct dma_slave_config config;
1029 	int ret, nent;
1030 
1031 	dma_cap_zero(mask);
1032 	dma_cap_set(DMA_SLAVE, mask);
1033 
1034 	atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
1035 	if (atmel_port->chan_tx == NULL)
1036 		goto chan_err;
1037 	dev_info(port->dev, "using %s for tx DMA transfers\n",
1038 		dma_chan_name(atmel_port->chan_tx));
1039 
1040 	spin_lock_init(&atmel_port->lock_tx);
1041 	sg_init_table(&atmel_port->sg_tx, 1);
1042 	/* UART circular tx buffer is an aligned page. */
1043 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
1044 	sg_set_page(&atmel_port->sg_tx,
1045 			virt_to_page(port->state->xmit.buf),
1046 			UART_XMIT_SIZE,
1047 			offset_in_page(port->state->xmit.buf));
1048 	nent = dma_map_sg(port->dev,
1049 				&atmel_port->sg_tx,
1050 				1,
1051 				DMA_TO_DEVICE);
1052 
1053 	if (!nent) {
1054 		dev_dbg(port->dev, "need to release resource of dma\n");
1055 		goto chan_err;
1056 	} else {
1057 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1058 			sg_dma_len(&atmel_port->sg_tx),
1059 			port->state->xmit.buf,
1060 			&sg_dma_address(&atmel_port->sg_tx));
1061 	}
1062 
1063 	/* Configure the slave DMA */
1064 	memset(&config, 0, sizeof(config));
1065 	config.direction = DMA_MEM_TO_DEV;
1066 	config.dst_addr_width = (atmel_port->fifo_size) ?
1067 				DMA_SLAVE_BUSWIDTH_4_BYTES :
1068 				DMA_SLAVE_BUSWIDTH_1_BYTE;
1069 	config.dst_addr = port->mapbase + ATMEL_US_THR;
1070 	config.dst_maxburst = 1;
1071 
1072 	ret = dmaengine_slave_config(atmel_port->chan_tx,
1073 				     &config);
1074 	if (ret) {
1075 		dev_err(port->dev, "DMA tx slave configuration failed\n");
1076 		goto chan_err;
1077 	}
1078 
1079 	return 0;
1080 
1081 chan_err:
1082 	dev_err(port->dev, "TX channel not available, switch to pio\n");
1083 	atmel_port->use_dma_tx = false;
1084 	if (atmel_port->chan_tx)
1085 		atmel_release_tx_dma(port);
1086 	return -EINVAL;
1087 }
1088 
1089 static void atmel_complete_rx_dma(void *arg)
1090 {
1091 	struct uart_port *port = arg;
1092 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1093 
1094 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1095 }
1096 
1097 static void atmel_release_rx_dma(struct uart_port *port)
1098 {
1099 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1100 	struct dma_chan *chan = atmel_port->chan_rx;
1101 
1102 	if (chan) {
1103 		dmaengine_terminate_all(chan);
1104 		dma_release_channel(chan);
1105 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1106 				DMA_FROM_DEVICE);
1107 	}
1108 
1109 	atmel_port->desc_rx = NULL;
1110 	atmel_port->chan_rx = NULL;
1111 	atmel_port->cookie_rx = -EINVAL;
1112 }
1113 
1114 static void atmel_rx_from_dma(struct uart_port *port)
1115 {
1116 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1117 	struct tty_port *tport = &port->state->port;
1118 	struct circ_buf *ring = &atmel_port->rx_ring;
1119 	struct dma_chan *chan = atmel_port->chan_rx;
1120 	struct dma_tx_state state;
1121 	enum dma_status dmastat;
1122 	size_t count;
1123 
1124 
1125 	/* Reset the UART timeout early so that we don't miss one */
1126 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1127 	dmastat = dmaengine_tx_status(chan,
1128 				atmel_port->cookie_rx,
1129 				&state);
1130 	/* Restart a new tasklet if DMA status is error */
1131 	if (dmastat == DMA_ERROR) {
1132 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1133 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1134 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1135 		return;
1136 	}
1137 
1138 	/* CPU claims ownership of RX DMA buffer */
1139 	dma_sync_sg_for_cpu(port->dev,
1140 			    &atmel_port->sg_rx,
1141 			    1,
1142 			    DMA_FROM_DEVICE);
1143 
1144 	/*
1145 	 * ring->head points to the end of data already written by the DMA.
1146 	 * ring->tail points to the beginning of data to be read by the
1147 	 * framework.
1148 	 * The current transfer size should not be larger than the dma buffer
1149 	 * length.
1150 	 */
1151 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1152 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1153 	/*
1154 	 * At this point ring->head may point to the first byte right after the
1155 	 * last byte of the dma buffer:
1156 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1157 	 *
1158 	 * However ring->tail must always points inside the dma buffer:
1159 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1160 	 *
1161 	 * Since we use a ring buffer, we have to handle the case
1162 	 * where head is lower than tail. In such a case, we first read from
1163 	 * tail to the end of the buffer then reset tail.
1164 	 */
1165 	if (ring->head < ring->tail) {
1166 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1167 
1168 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1169 		ring->tail = 0;
1170 		port->icount.rx += count;
1171 	}
1172 
1173 	/* Finally we read data from tail to head */
1174 	if (ring->tail < ring->head) {
1175 		count = ring->head - ring->tail;
1176 
1177 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1178 		/* Wrap ring->head if needed */
1179 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1180 			ring->head = 0;
1181 		ring->tail = ring->head;
1182 		port->icount.rx += count;
1183 	}
1184 
1185 	/* USART retreives ownership of RX DMA buffer */
1186 	dma_sync_sg_for_device(port->dev,
1187 			       &atmel_port->sg_rx,
1188 			       1,
1189 			       DMA_FROM_DEVICE);
1190 
1191 	tty_flip_buffer_push(tport);
1192 
1193 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1194 }
1195 
1196 static int atmel_prepare_rx_dma(struct uart_port *port)
1197 {
1198 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1199 	struct device *mfd_dev = port->dev->parent;
1200 	struct dma_async_tx_descriptor *desc;
1201 	dma_cap_mask_t		mask;
1202 	struct dma_slave_config config;
1203 	struct circ_buf		*ring;
1204 	int ret, nent;
1205 
1206 	ring = &atmel_port->rx_ring;
1207 
1208 	dma_cap_zero(mask);
1209 	dma_cap_set(DMA_CYCLIC, mask);
1210 
1211 	atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
1212 	if (atmel_port->chan_rx == NULL)
1213 		goto chan_err;
1214 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1215 		dma_chan_name(atmel_port->chan_rx));
1216 
1217 	spin_lock_init(&atmel_port->lock_rx);
1218 	sg_init_table(&atmel_port->sg_rx, 1);
1219 	/* UART circular rx buffer is an aligned page. */
1220 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1221 	sg_set_page(&atmel_port->sg_rx,
1222 		    virt_to_page(ring->buf),
1223 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1224 		    offset_in_page(ring->buf));
1225 	nent = dma_map_sg(port->dev,
1226 			  &atmel_port->sg_rx,
1227 			  1,
1228 			  DMA_FROM_DEVICE);
1229 
1230 	if (!nent) {
1231 		dev_dbg(port->dev, "need to release resource of dma\n");
1232 		goto chan_err;
1233 	} else {
1234 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1235 			sg_dma_len(&atmel_port->sg_rx),
1236 			ring->buf,
1237 			&sg_dma_address(&atmel_port->sg_rx));
1238 	}
1239 
1240 	/* Configure the slave DMA */
1241 	memset(&config, 0, sizeof(config));
1242 	config.direction = DMA_DEV_TO_MEM;
1243 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1244 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1245 	config.src_maxburst = 1;
1246 
1247 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1248 				     &config);
1249 	if (ret) {
1250 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1251 		goto chan_err;
1252 	}
1253 	/*
1254 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1255 	 * each one is half ring buffer size
1256 	 */
1257 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1258 					 sg_dma_address(&atmel_port->sg_rx),
1259 					 sg_dma_len(&atmel_port->sg_rx),
1260 					 sg_dma_len(&atmel_port->sg_rx)/2,
1261 					 DMA_DEV_TO_MEM,
1262 					 DMA_PREP_INTERRUPT);
1263 	if (!desc) {
1264 		dev_err(port->dev, "Preparing DMA cyclic failed\n");
1265 		goto chan_err;
1266 	}
1267 	desc->callback = atmel_complete_rx_dma;
1268 	desc->callback_param = port;
1269 	atmel_port->desc_rx = desc;
1270 	atmel_port->cookie_rx = dmaengine_submit(desc);
1271 	if (dma_submit_error(atmel_port->cookie_rx)) {
1272 		dev_err(port->dev, "dma_submit_error %d\n",
1273 			atmel_port->cookie_rx);
1274 		goto chan_err;
1275 	}
1276 
1277 	dma_async_issue_pending(atmel_port->chan_rx);
1278 
1279 	return 0;
1280 
1281 chan_err:
1282 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1283 	atmel_port->use_dma_rx = false;
1284 	if (atmel_port->chan_rx)
1285 		atmel_release_rx_dma(port);
1286 	return -EINVAL;
1287 }
1288 
1289 static void atmel_uart_timer_callback(struct timer_list *t)
1290 {
1291 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1292 							uart_timer);
1293 	struct uart_port *port = &atmel_port->uart;
1294 
1295 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1296 		tasklet_schedule(&atmel_port->tasklet_rx);
1297 		mod_timer(&atmel_port->uart_timer,
1298 			  jiffies + uart_poll_timeout(port));
1299 	}
1300 }
1301 
1302 /*
1303  * receive interrupt handler.
1304  */
1305 static void
1306 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1307 {
1308 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1309 
1310 	if (atmel_use_pdc_rx(port)) {
1311 		/*
1312 		 * PDC receive. Just schedule the tasklet and let it
1313 		 * figure out the details.
1314 		 *
1315 		 * TODO: We're not handling error flags correctly at
1316 		 * the moment.
1317 		 */
1318 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1319 			atmel_uart_writel(port, ATMEL_US_IDR,
1320 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1321 			atmel_tasklet_schedule(atmel_port,
1322 					       &atmel_port->tasklet_rx);
1323 		}
1324 
1325 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1326 				ATMEL_US_FRAME | ATMEL_US_PARE))
1327 			atmel_pdc_rxerr(port, pending);
1328 	}
1329 
1330 	if (atmel_use_dma_rx(port)) {
1331 		if (pending & ATMEL_US_TIMEOUT) {
1332 			atmel_uart_writel(port, ATMEL_US_IDR,
1333 					  ATMEL_US_TIMEOUT);
1334 			atmel_tasklet_schedule(atmel_port,
1335 					       &atmel_port->tasklet_rx);
1336 		}
1337 	}
1338 
1339 	/* Interrupt receive */
1340 	if (pending & ATMEL_US_RXRDY)
1341 		atmel_rx_chars(port);
1342 	else if (pending & ATMEL_US_RXBRK) {
1343 		/*
1344 		 * End of break detected. If it came along with a
1345 		 * character, atmel_rx_chars will handle it.
1346 		 */
1347 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1348 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1349 		atmel_port->break_active = 0;
1350 	}
1351 }
1352 
1353 /*
1354  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1355  */
1356 static void
1357 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1358 {
1359 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1360 
1361 	if (pending & atmel_port->tx_done_mask) {
1362 		atmel_uart_writel(port, ATMEL_US_IDR,
1363 				  atmel_port->tx_done_mask);
1364 
1365 		/* Start RX if flag was set and FIFO is empty */
1366 		if (atmel_port->hd_start_rx) {
1367 			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1368 					& ATMEL_US_TXEMPTY))
1369 				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1370 
1371 			atmel_port->hd_start_rx = false;
1372 			atmel_start_rx(port);
1373 		}
1374 
1375 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1376 	}
1377 }
1378 
1379 /*
1380  * status flags interrupt handler.
1381  */
1382 static void
1383 atmel_handle_status(struct uart_port *port, unsigned int pending,
1384 		    unsigned int status)
1385 {
1386 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1387 	unsigned int status_change;
1388 
1389 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1390 				| ATMEL_US_CTSIC)) {
1391 		status_change = status ^ atmel_port->irq_status_prev;
1392 		atmel_port->irq_status_prev = status;
1393 
1394 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1395 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1396 			/* TODO: All reads to CSR will clear these interrupts! */
1397 			if (status_change & ATMEL_US_RI)
1398 				port->icount.rng++;
1399 			if (status_change & ATMEL_US_DSR)
1400 				port->icount.dsr++;
1401 			if (status_change & ATMEL_US_DCD)
1402 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1403 			if (status_change & ATMEL_US_CTS)
1404 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1405 
1406 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1407 		}
1408 	}
1409 
1410 	if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
1411 		dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
1412 }
1413 
1414 /*
1415  * Interrupt handler
1416  */
1417 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1418 {
1419 	struct uart_port *port = dev_id;
1420 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1421 	unsigned int status, pending, mask, pass_counter = 0;
1422 
1423 	spin_lock(&atmel_port->lock_suspended);
1424 
1425 	do {
1426 		status = atmel_uart_readl(port, ATMEL_US_CSR);
1427 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1428 		pending = status & mask;
1429 		if (!pending)
1430 			break;
1431 
1432 		if (atmel_port->suspended) {
1433 			atmel_port->pending |= pending;
1434 			atmel_port->pending_status = status;
1435 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1436 			pm_system_wakeup();
1437 			break;
1438 		}
1439 
1440 		atmel_handle_receive(port, pending);
1441 		atmel_handle_status(port, pending, status);
1442 		atmel_handle_transmit(port, pending);
1443 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1444 
1445 	spin_unlock(&atmel_port->lock_suspended);
1446 
1447 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1448 }
1449 
1450 static void atmel_release_tx_pdc(struct uart_port *port)
1451 {
1452 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1453 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1454 
1455 	dma_unmap_single(port->dev,
1456 			 pdc->dma_addr,
1457 			 pdc->dma_size,
1458 			 DMA_TO_DEVICE);
1459 }
1460 
1461 /*
1462  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1463  */
1464 static void atmel_tx_pdc(struct uart_port *port)
1465 {
1466 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1467 	struct circ_buf *xmit = &port->state->xmit;
1468 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1469 	int count;
1470 
1471 	/* nothing left to transmit? */
1472 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1473 		return;
1474 
1475 	xmit->tail += pdc->ofs;
1476 	xmit->tail &= UART_XMIT_SIZE - 1;
1477 
1478 	port->icount.tx += pdc->ofs;
1479 	pdc->ofs = 0;
1480 
1481 	/* more to transmit - setup next transfer */
1482 
1483 	/* disable PDC transmit */
1484 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1485 
1486 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1487 		dma_sync_single_for_device(port->dev,
1488 					   pdc->dma_addr,
1489 					   pdc->dma_size,
1490 					   DMA_TO_DEVICE);
1491 
1492 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1493 		pdc->ofs = count;
1494 
1495 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1496 				  pdc->dma_addr + xmit->tail);
1497 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1498 		/* re-enable PDC transmit */
1499 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1500 		/* Enable interrupts */
1501 		atmel_uart_writel(port, ATMEL_US_IER,
1502 				  atmel_port->tx_done_mask);
1503 	} else {
1504 		if (atmel_uart_is_half_duplex(port)) {
1505 			/* DMA done, stop TX, start RX for RS485 */
1506 			atmel_start_rx(port);
1507 		}
1508 	}
1509 
1510 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1511 		uart_write_wakeup(port);
1512 }
1513 
1514 static int atmel_prepare_tx_pdc(struct uart_port *port)
1515 {
1516 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1517 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1518 	struct circ_buf *xmit = &port->state->xmit;
1519 
1520 	pdc->buf = xmit->buf;
1521 	pdc->dma_addr = dma_map_single(port->dev,
1522 					pdc->buf,
1523 					UART_XMIT_SIZE,
1524 					DMA_TO_DEVICE);
1525 	pdc->dma_size = UART_XMIT_SIZE;
1526 	pdc->ofs = 0;
1527 
1528 	return 0;
1529 }
1530 
1531 static void atmel_rx_from_ring(struct uart_port *port)
1532 {
1533 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1534 	struct circ_buf *ring = &atmel_port->rx_ring;
1535 	unsigned int flg;
1536 	unsigned int status;
1537 
1538 	while (ring->head != ring->tail) {
1539 		struct atmel_uart_char c;
1540 
1541 		/* Make sure c is loaded after head. */
1542 		smp_rmb();
1543 
1544 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1545 
1546 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1547 
1548 		port->icount.rx++;
1549 		status = c.status;
1550 		flg = TTY_NORMAL;
1551 
1552 		/*
1553 		 * note that the error handling code is
1554 		 * out of the main execution path
1555 		 */
1556 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1557 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1558 			if (status & ATMEL_US_RXBRK) {
1559 				/* ignore side-effect */
1560 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1561 
1562 				port->icount.brk++;
1563 				if (uart_handle_break(port))
1564 					continue;
1565 			}
1566 			if (status & ATMEL_US_PARE)
1567 				port->icount.parity++;
1568 			if (status & ATMEL_US_FRAME)
1569 				port->icount.frame++;
1570 			if (status & ATMEL_US_OVRE)
1571 				port->icount.overrun++;
1572 
1573 			status &= port->read_status_mask;
1574 
1575 			if (status & ATMEL_US_RXBRK)
1576 				flg = TTY_BREAK;
1577 			else if (status & ATMEL_US_PARE)
1578 				flg = TTY_PARITY;
1579 			else if (status & ATMEL_US_FRAME)
1580 				flg = TTY_FRAME;
1581 		}
1582 
1583 
1584 		if (uart_handle_sysrq_char(port, c.ch))
1585 			continue;
1586 
1587 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1588 	}
1589 
1590 	tty_flip_buffer_push(&port->state->port);
1591 }
1592 
1593 static void atmel_release_rx_pdc(struct uart_port *port)
1594 {
1595 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1596 	int i;
1597 
1598 	for (i = 0; i < 2; i++) {
1599 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1600 
1601 		dma_unmap_single(port->dev,
1602 				 pdc->dma_addr,
1603 				 pdc->dma_size,
1604 				 DMA_FROM_DEVICE);
1605 		kfree(pdc->buf);
1606 	}
1607 }
1608 
1609 static void atmel_rx_from_pdc(struct uart_port *port)
1610 {
1611 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1612 	struct tty_port *tport = &port->state->port;
1613 	struct atmel_dma_buffer *pdc;
1614 	int rx_idx = atmel_port->pdc_rx_idx;
1615 	unsigned int head;
1616 	unsigned int tail;
1617 	unsigned int count;
1618 
1619 	do {
1620 		/* Reset the UART timeout early so that we don't miss one */
1621 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1622 
1623 		pdc = &atmel_port->pdc_rx[rx_idx];
1624 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1625 		tail = pdc->ofs;
1626 
1627 		/* If the PDC has switched buffers, RPR won't contain
1628 		 * any address within the current buffer. Since head
1629 		 * is unsigned, we just need a one-way comparison to
1630 		 * find out.
1631 		 *
1632 		 * In this case, we just need to consume the entire
1633 		 * buffer and resubmit it for DMA. This will clear the
1634 		 * ENDRX bit as well, so that we can safely re-enable
1635 		 * all interrupts below.
1636 		 */
1637 		head = min(head, pdc->dma_size);
1638 
1639 		if (likely(head != tail)) {
1640 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1641 					pdc->dma_size, DMA_FROM_DEVICE);
1642 
1643 			/*
1644 			 * head will only wrap around when we recycle
1645 			 * the DMA buffer, and when that happens, we
1646 			 * explicitly set tail to 0. So head will
1647 			 * always be greater than tail.
1648 			 */
1649 			count = head - tail;
1650 
1651 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1652 						count);
1653 
1654 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1655 					pdc->dma_size, DMA_FROM_DEVICE);
1656 
1657 			port->icount.rx += count;
1658 			pdc->ofs = head;
1659 		}
1660 
1661 		/*
1662 		 * If the current buffer is full, we need to check if
1663 		 * the next one contains any additional data.
1664 		 */
1665 		if (head >= pdc->dma_size) {
1666 			pdc->ofs = 0;
1667 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1668 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1669 
1670 			rx_idx = !rx_idx;
1671 			atmel_port->pdc_rx_idx = rx_idx;
1672 		}
1673 	} while (head >= pdc->dma_size);
1674 
1675 	tty_flip_buffer_push(tport);
1676 
1677 	atmel_uart_writel(port, ATMEL_US_IER,
1678 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1679 }
1680 
1681 static int atmel_prepare_rx_pdc(struct uart_port *port)
1682 {
1683 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1684 	int i;
1685 
1686 	for (i = 0; i < 2; i++) {
1687 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1688 
1689 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1690 		if (pdc->buf == NULL) {
1691 			if (i != 0) {
1692 				dma_unmap_single(port->dev,
1693 					atmel_port->pdc_rx[0].dma_addr,
1694 					PDC_BUFFER_SIZE,
1695 					DMA_FROM_DEVICE);
1696 				kfree(atmel_port->pdc_rx[0].buf);
1697 			}
1698 			atmel_port->use_pdc_rx = false;
1699 			return -ENOMEM;
1700 		}
1701 		pdc->dma_addr = dma_map_single(port->dev,
1702 						pdc->buf,
1703 						PDC_BUFFER_SIZE,
1704 						DMA_FROM_DEVICE);
1705 		pdc->dma_size = PDC_BUFFER_SIZE;
1706 		pdc->ofs = 0;
1707 	}
1708 
1709 	atmel_port->pdc_rx_idx = 0;
1710 
1711 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1712 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1713 
1714 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1715 			  atmel_port->pdc_rx[1].dma_addr);
1716 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1717 
1718 	return 0;
1719 }
1720 
1721 /*
1722  * tasklet handling tty stuff outside the interrupt handler.
1723  */
1724 static void atmel_tasklet_rx_func(struct tasklet_struct *t)
1725 {
1726 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1727 							  tasklet_rx);
1728 	struct uart_port *port = &atmel_port->uart;
1729 
1730 	/* The interrupt handler does not take the lock */
1731 	spin_lock(&port->lock);
1732 	atmel_port->schedule_rx(port);
1733 	spin_unlock(&port->lock);
1734 }
1735 
1736 static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1737 {
1738 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1739 							  tasklet_tx);
1740 	struct uart_port *port = &atmel_port->uart;
1741 
1742 	/* The interrupt handler does not take the lock */
1743 	spin_lock(&port->lock);
1744 	atmel_port->schedule_tx(port);
1745 	spin_unlock(&port->lock);
1746 }
1747 
1748 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1749 				struct platform_device *pdev)
1750 {
1751 	struct device_node *np = pdev->dev.of_node;
1752 
1753 	/* DMA/PDC usage specification */
1754 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1755 		if (of_property_read_bool(np, "dmas")) {
1756 			atmel_port->use_dma_rx  = true;
1757 			atmel_port->use_pdc_rx  = false;
1758 		} else {
1759 			atmel_port->use_dma_rx  = false;
1760 			atmel_port->use_pdc_rx  = true;
1761 		}
1762 	} else {
1763 		atmel_port->use_dma_rx  = false;
1764 		atmel_port->use_pdc_rx  = false;
1765 	}
1766 
1767 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1768 		if (of_property_read_bool(np, "dmas")) {
1769 			atmel_port->use_dma_tx  = true;
1770 			atmel_port->use_pdc_tx  = false;
1771 		} else {
1772 			atmel_port->use_dma_tx  = false;
1773 			atmel_port->use_pdc_tx  = true;
1774 		}
1775 	} else {
1776 		atmel_port->use_dma_tx  = false;
1777 		atmel_port->use_pdc_tx  = false;
1778 	}
1779 }
1780 
1781 static void atmel_set_ops(struct uart_port *port)
1782 {
1783 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1784 
1785 	if (atmel_use_dma_rx(port)) {
1786 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1787 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1788 		atmel_port->release_rx = &atmel_release_rx_dma;
1789 	} else if (atmel_use_pdc_rx(port)) {
1790 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1791 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1792 		atmel_port->release_rx = &atmel_release_rx_pdc;
1793 	} else {
1794 		atmel_port->prepare_rx = NULL;
1795 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1796 		atmel_port->release_rx = NULL;
1797 	}
1798 
1799 	if (atmel_use_dma_tx(port)) {
1800 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1801 		atmel_port->schedule_tx = &atmel_tx_dma;
1802 		atmel_port->release_tx = &atmel_release_tx_dma;
1803 	} else if (atmel_use_pdc_tx(port)) {
1804 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1805 		atmel_port->schedule_tx = &atmel_tx_pdc;
1806 		atmel_port->release_tx = &atmel_release_tx_pdc;
1807 	} else {
1808 		atmel_port->prepare_tx = NULL;
1809 		atmel_port->schedule_tx = &atmel_tx_chars;
1810 		atmel_port->release_tx = NULL;
1811 	}
1812 }
1813 
1814 /*
1815  * Get ip name usart or uart
1816  */
1817 static void atmel_get_ip_name(struct uart_port *port)
1818 {
1819 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1820 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1821 	u32 version;
1822 	u32 usart, dbgu_uart, new_uart;
1823 	/* ASCII decoding for IP version */
1824 	usart = 0x55534152;	/* USAR(T) */
1825 	dbgu_uart = 0x44424755;	/* DBGU */
1826 	new_uart = 0x55415254;	/* UART */
1827 
1828 	/*
1829 	 * Only USART devices from at91sam9260 SOC implement fractional
1830 	 * baudrate. It is available for all asynchronous modes, with the
1831 	 * following restriction: the sampling clock's duty cycle is not
1832 	 * constant.
1833 	 */
1834 	atmel_port->has_frac_baudrate = false;
1835 	atmel_port->has_hw_timer = false;
1836 	atmel_port->is_usart = false;
1837 
1838 	if (name == new_uart) {
1839 		dev_dbg(port->dev, "Uart with hw timer");
1840 		atmel_port->has_hw_timer = true;
1841 		atmel_port->rtor = ATMEL_UA_RTOR;
1842 	} else if (name == usart) {
1843 		dev_dbg(port->dev, "Usart\n");
1844 		atmel_port->has_frac_baudrate = true;
1845 		atmel_port->has_hw_timer = true;
1846 		atmel_port->is_usart = true;
1847 		atmel_port->rtor = ATMEL_US_RTOR;
1848 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1849 		switch (version) {
1850 		case 0x814:	/* sama5d2 */
1851 			fallthrough;
1852 		case 0x701:	/* sama5d4 */
1853 			atmel_port->fidi_min = 3;
1854 			atmel_port->fidi_max = 65535;
1855 			break;
1856 		case 0x502:	/* sam9x5, sama5d3 */
1857 			atmel_port->fidi_min = 3;
1858 			atmel_port->fidi_max = 2047;
1859 			break;
1860 		default:
1861 			atmel_port->fidi_min = 1;
1862 			atmel_port->fidi_max = 2047;
1863 		}
1864 	} else if (name == dbgu_uart) {
1865 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1866 	} else {
1867 		/* fallback for older SoCs: use version field */
1868 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1869 		switch (version) {
1870 		case 0x302:
1871 		case 0x10213:
1872 		case 0x10302:
1873 			dev_dbg(port->dev, "This version is usart\n");
1874 			atmel_port->has_frac_baudrate = true;
1875 			atmel_port->has_hw_timer = true;
1876 			atmel_port->is_usart = true;
1877 			atmel_port->rtor = ATMEL_US_RTOR;
1878 			break;
1879 		case 0x203:
1880 		case 0x10202:
1881 			dev_dbg(port->dev, "This version is uart\n");
1882 			break;
1883 		default:
1884 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1885 		}
1886 	}
1887 }
1888 
1889 /*
1890  * Perform initialization and enable port for reception
1891  */
1892 static int atmel_startup(struct uart_port *port)
1893 {
1894 	struct platform_device *pdev = to_platform_device(port->dev);
1895 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1896 	int retval;
1897 
1898 	/*
1899 	 * Ensure that no interrupts are enabled otherwise when
1900 	 * request_irq() is called we could get stuck trying to
1901 	 * handle an unexpected interrupt
1902 	 */
1903 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1904 	atmel_port->ms_irq_enabled = false;
1905 
1906 	/*
1907 	 * Allocate the IRQ
1908 	 */
1909 	retval = request_irq(port->irq, atmel_interrupt,
1910 			     IRQF_SHARED | IRQF_COND_SUSPEND,
1911 			     dev_name(&pdev->dev), port);
1912 	if (retval) {
1913 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1914 		return retval;
1915 	}
1916 
1917 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1918 	tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
1919 	tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
1920 
1921 	/*
1922 	 * Initialize DMA (if necessary)
1923 	 */
1924 	atmel_init_property(atmel_port, pdev);
1925 	atmel_set_ops(port);
1926 
1927 	if (atmel_port->prepare_rx) {
1928 		retval = atmel_port->prepare_rx(port);
1929 		if (retval < 0)
1930 			atmel_set_ops(port);
1931 	}
1932 
1933 	if (atmel_port->prepare_tx) {
1934 		retval = atmel_port->prepare_tx(port);
1935 		if (retval < 0)
1936 			atmel_set_ops(port);
1937 	}
1938 
1939 	/*
1940 	 * Enable FIFO when available
1941 	 */
1942 	if (atmel_port->fifo_size) {
1943 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1944 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1945 		unsigned int fmr;
1946 
1947 		atmel_uart_writel(port, ATMEL_US_CR,
1948 				  ATMEL_US_FIFOEN |
1949 				  ATMEL_US_RXFCLR |
1950 				  ATMEL_US_TXFLCLR);
1951 
1952 		if (atmel_use_dma_tx(port))
1953 			txrdym = ATMEL_US_FOUR_DATA;
1954 
1955 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1956 		if (atmel_port->rts_high &&
1957 		    atmel_port->rts_low)
1958 			fmr |=	ATMEL_US_FRTSC |
1959 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1960 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1961 
1962 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1963 	}
1964 
1965 	/* Save current CSR for comparison in atmel_tasklet_func() */
1966 	atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
1967 
1968 	/*
1969 	 * Finally, enable the serial port
1970 	 */
1971 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1972 	/* enable xmit & rcvr */
1973 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1974 	atmel_port->tx_stopped = false;
1975 
1976 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1977 
1978 	if (atmel_use_pdc_rx(port)) {
1979 		/* set UART timeout */
1980 		if (!atmel_port->has_hw_timer) {
1981 			mod_timer(&atmel_port->uart_timer,
1982 					jiffies + uart_poll_timeout(port));
1983 		/* set USART timeout */
1984 		} else {
1985 			atmel_uart_writel(port, atmel_port->rtor,
1986 					  PDC_RX_TIMEOUT);
1987 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1988 
1989 			atmel_uart_writel(port, ATMEL_US_IER,
1990 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1991 		}
1992 		/* enable PDC controller */
1993 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1994 	} else if (atmel_use_dma_rx(port)) {
1995 		/* set UART timeout */
1996 		if (!atmel_port->has_hw_timer) {
1997 			mod_timer(&atmel_port->uart_timer,
1998 					jiffies + uart_poll_timeout(port));
1999 		/* set USART timeout */
2000 		} else {
2001 			atmel_uart_writel(port, atmel_port->rtor,
2002 					  PDC_RX_TIMEOUT);
2003 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
2004 
2005 			atmel_uart_writel(port, ATMEL_US_IER,
2006 					  ATMEL_US_TIMEOUT);
2007 		}
2008 	} else {
2009 		/* enable receive only */
2010 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
2011 	}
2012 
2013 	return 0;
2014 }
2015 
2016 /*
2017  * Flush any TX data submitted for DMA. Called when the TX circular
2018  * buffer is reset.
2019  */
2020 static void atmel_flush_buffer(struct uart_port *port)
2021 {
2022 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2023 
2024 	if (atmel_use_pdc_tx(port)) {
2025 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
2026 		atmel_port->pdc_tx.ofs = 0;
2027 	}
2028 	/*
2029 	 * in uart_flush_buffer(), the xmit circular buffer has just
2030 	 * been cleared, so we have to reset tx_len accordingly.
2031 	 */
2032 	atmel_port->tx_len = 0;
2033 }
2034 
2035 /*
2036  * Disable the port
2037  */
2038 static void atmel_shutdown(struct uart_port *port)
2039 {
2040 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2041 
2042 	/* Disable modem control lines interrupts */
2043 	atmel_disable_ms(port);
2044 
2045 	/* Disable interrupts at device level */
2046 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2047 
2048 	/* Prevent spurious interrupts from scheduling the tasklet */
2049 	atomic_inc(&atmel_port->tasklet_shutdown);
2050 
2051 	/*
2052 	 * Prevent any tasklets being scheduled during
2053 	 * cleanup
2054 	 */
2055 	del_timer_sync(&atmel_port->uart_timer);
2056 
2057 	/* Make sure that no interrupt is on the fly */
2058 	synchronize_irq(port->irq);
2059 
2060 	/*
2061 	 * Clear out any scheduled tasklets before
2062 	 * we destroy the buffers
2063 	 */
2064 	tasklet_kill(&atmel_port->tasklet_rx);
2065 	tasklet_kill(&atmel_port->tasklet_tx);
2066 
2067 	/*
2068 	 * Ensure everything is stopped and
2069 	 * disable port and break condition.
2070 	 */
2071 	atmel_stop_rx(port);
2072 	atmel_stop_tx(port);
2073 
2074 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2075 
2076 	/*
2077 	 * Shut-down the DMA.
2078 	 */
2079 	if (atmel_port->release_rx)
2080 		atmel_port->release_rx(port);
2081 	if (atmel_port->release_tx)
2082 		atmel_port->release_tx(port);
2083 
2084 	/*
2085 	 * Reset ring buffer pointers
2086 	 */
2087 	atmel_port->rx_ring.head = 0;
2088 	atmel_port->rx_ring.tail = 0;
2089 
2090 	/*
2091 	 * Free the interrupts
2092 	 */
2093 	free_irq(port->irq, port);
2094 
2095 	atmel_flush_buffer(port);
2096 }
2097 
2098 /*
2099  * Power / Clock management.
2100  */
2101 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2102 			    unsigned int oldstate)
2103 {
2104 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2105 
2106 	switch (state) {
2107 	case UART_PM_STATE_ON:
2108 		/*
2109 		 * Enable the peripheral clock for this serial port.
2110 		 * This is called on uart_open() or a resume event.
2111 		 */
2112 		clk_prepare_enable(atmel_port->clk);
2113 
2114 		/* re-enable interrupts if we disabled some on suspend */
2115 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2116 		break;
2117 	case UART_PM_STATE_OFF:
2118 		/* Back up the interrupt mask and disable all interrupts */
2119 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2120 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2121 
2122 		/*
2123 		 * Disable the peripheral clock for this serial port.
2124 		 * This is called on uart_close() or a suspend event.
2125 		 */
2126 		clk_disable_unprepare(atmel_port->clk);
2127 		if (__clk_is_enabled(atmel_port->gclk))
2128 			clk_disable_unprepare(atmel_port->gclk);
2129 		break;
2130 	default:
2131 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2132 	}
2133 }
2134 
2135 /*
2136  * Change the port parameters
2137  */
2138 static void atmel_set_termios(struct uart_port *port,
2139 			      struct ktermios *termios,
2140 			      const struct ktermios *old)
2141 {
2142 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2143 	unsigned long flags;
2144 	unsigned int old_mode, mode, imr, quot, div, cd, fp = 0;
2145 	unsigned int baud, actual_baud, gclk_rate;
2146 	int ret;
2147 
2148 	/* save the current mode register */
2149 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2150 
2151 	/* reset the mode, clock divisor, parity, stop bits and data size */
2152 	if (atmel_port->is_usart)
2153 		mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL |
2154 			  ATMEL_US_USCLKS | ATMEL_US_USMODE);
2155 	else
2156 		mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER);
2157 
2158 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2159 
2160 	/* byte size */
2161 	switch (termios->c_cflag & CSIZE) {
2162 	case CS5:
2163 		mode |= ATMEL_US_CHRL_5;
2164 		break;
2165 	case CS6:
2166 		mode |= ATMEL_US_CHRL_6;
2167 		break;
2168 	case CS7:
2169 		mode |= ATMEL_US_CHRL_7;
2170 		break;
2171 	default:
2172 		mode |= ATMEL_US_CHRL_8;
2173 		break;
2174 	}
2175 
2176 	/* stop bits */
2177 	if (termios->c_cflag & CSTOPB)
2178 		mode |= ATMEL_US_NBSTOP_2;
2179 
2180 	/* parity */
2181 	if (termios->c_cflag & PARENB) {
2182 		/* Mark or Space parity */
2183 		if (termios->c_cflag & CMSPAR) {
2184 			if (termios->c_cflag & PARODD)
2185 				mode |= ATMEL_US_PAR_MARK;
2186 			else
2187 				mode |= ATMEL_US_PAR_SPACE;
2188 		} else if (termios->c_cflag & PARODD)
2189 			mode |= ATMEL_US_PAR_ODD;
2190 		else
2191 			mode |= ATMEL_US_PAR_EVEN;
2192 	} else
2193 		mode |= ATMEL_US_PAR_NONE;
2194 
2195 	spin_lock_irqsave(&port->lock, flags);
2196 
2197 	port->read_status_mask = ATMEL_US_OVRE;
2198 	if (termios->c_iflag & INPCK)
2199 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2200 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2201 		port->read_status_mask |= ATMEL_US_RXBRK;
2202 
2203 	if (atmel_use_pdc_rx(port))
2204 		/* need to enable error interrupts */
2205 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2206 
2207 	/*
2208 	 * Characters to ignore
2209 	 */
2210 	port->ignore_status_mask = 0;
2211 	if (termios->c_iflag & IGNPAR)
2212 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2213 	if (termios->c_iflag & IGNBRK) {
2214 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2215 		/*
2216 		 * If we're ignoring parity and break indicators,
2217 		 * ignore overruns too (for real raw support).
2218 		 */
2219 		if (termios->c_iflag & IGNPAR)
2220 			port->ignore_status_mask |= ATMEL_US_OVRE;
2221 	}
2222 	/* TODO: Ignore all characters if CREAD is set.*/
2223 
2224 	/* update the per-port timeout */
2225 	uart_update_timeout(port, termios->c_cflag, baud);
2226 
2227 	/*
2228 	 * save/disable interrupts. The tty layer will ensure that the
2229 	 * transmitter is empty if requested by the caller, so there's
2230 	 * no need to wait for it here.
2231 	 */
2232 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2233 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2234 
2235 	/* disable receiver and transmitter */
2236 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2237 	atmel_port->tx_stopped = true;
2238 
2239 	/* mode */
2240 	if (port->rs485.flags & SER_RS485_ENABLED) {
2241 		atmel_uart_writel(port, ATMEL_US_TTGR,
2242 				  port->rs485.delay_rts_after_send);
2243 		mode |= ATMEL_US_USMODE_RS485;
2244 	} else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
2245 		atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
2246 		/* select mck clock, and output  */
2247 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
2248 		/* set max iterations */
2249 		mode |= ATMEL_US_MAX_ITER(3);
2250 		if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
2251 				== SER_ISO7816_T(0))
2252 			mode |= ATMEL_US_USMODE_ISO7816_T0;
2253 		else
2254 			mode |= ATMEL_US_USMODE_ISO7816_T1;
2255 	} else if (termios->c_cflag & CRTSCTS) {
2256 		/* RS232 with hardware handshake (RTS/CTS) */
2257 		if (atmel_use_fifo(port) &&
2258 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2259 			/*
2260 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2261 			 * be able to drive the RTS pin high/low when the RX
2262 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2263 			 * It will also disable the transmitter when the CTS
2264 			 * pin is high.
2265 			 * This mode is not activated if CTS pin is a GPIO
2266 			 * because in this case, the transmitter is always
2267 			 * disabled (there must be an internal pull-up
2268 			 * responsible for this behaviour).
2269 			 * If the RTS pin is a GPIO, the controller won't be
2270 			 * able to drive it according to the FIFO thresholds,
2271 			 * but it will be handled by the driver.
2272 			 */
2273 			mode |= ATMEL_US_USMODE_HWHS;
2274 		} else {
2275 			/*
2276 			 * For platforms without FIFO, the flow control is
2277 			 * handled by the driver.
2278 			 */
2279 			mode |= ATMEL_US_USMODE_NORMAL;
2280 		}
2281 	} else {
2282 		/* RS232 without hadware handshake */
2283 		mode |= ATMEL_US_USMODE_NORMAL;
2284 	}
2285 
2286 	/*
2287 	 * Set the baud rate:
2288 	 * Fractional baudrate allows to setup output frequency more
2289 	 * accurately. This feature is enabled only when using normal mode.
2290 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2291 	 * Currently, OVER is always set to 0 so we get
2292 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2293 	 * then
2294 	 * 8 CD + FP = selected clock / (2 * baudrate)
2295 	 */
2296 	if (atmel_port->has_frac_baudrate) {
2297 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2298 		cd = div >> 3;
2299 		fp = div & ATMEL_US_FP_MASK;
2300 	} else {
2301 		cd = uart_get_divisor(port, baud);
2302 	}
2303 
2304 	/*
2305 	 * If the current value of the Clock Divisor surpasses the 16 bit
2306 	 * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral
2307 	 * Clock implicitly divided by 8.
2308 	 * If the IP is UART however, keep the highest possible value for
2309 	 * the CD and avoid needless division of CD, since UART IP's do not
2310 	 * support implicit division of the Peripheral Clock.
2311 	 */
2312 	if (atmel_port->is_usart && cd > ATMEL_US_CD) {
2313 		cd /= 8;
2314 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2315 	} else {
2316 		cd = min_t(unsigned int, cd, ATMEL_US_CD);
2317 	}
2318 
2319 	/*
2320 	 * If there is no Fractional Part, there is a high chance that
2321 	 * we may be able to generate a baudrate closer to the desired one
2322 	 * if we use the GCLK as the clock source driving the baudrate
2323 	 * generator.
2324 	 */
2325 	if (!atmel_port->has_frac_baudrate) {
2326 		if (__clk_is_enabled(atmel_port->gclk))
2327 			clk_disable_unprepare(atmel_port->gclk);
2328 		gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud);
2329 		actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd);
2330 		if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) >
2331 		    abs(atmel_error_rate(baud, gclk_rate / 16))) {
2332 			clk_set_rate(atmel_port->gclk, 16 * baud);
2333 			ret = clk_prepare_enable(atmel_port->gclk);
2334 			if (ret)
2335 				goto gclk_fail;
2336 
2337 			if (atmel_port->is_usart) {
2338 				mode &= ~ATMEL_US_USCLKS;
2339 				mode |= ATMEL_US_USCLKS_GCLK;
2340 			} else {
2341 				mode |= ATMEL_UA_BRSRCCK;
2342 			}
2343 
2344 			/*
2345 			 * Set the Clock Divisor for GCLK to 1.
2346 			 * Since we were able to generate the smallest
2347 			 * multiple of the desired baudrate times 16,
2348 			 * then we surely can generate a bigger multiple
2349 			 * with the exact error rate for an equally increased
2350 			 * CD. Thus no need to take into account
2351 			 * a higher value for CD.
2352 			 */
2353 			cd = 1;
2354 		}
2355 	}
2356 
2357 gclk_fail:
2358 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2359 
2360 	if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
2361 		atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2362 
2363 	/* set the mode, clock divisor, parity, stop bits and data size */
2364 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2365 
2366 	/*
2367 	 * when switching the mode, set the RTS line state according to the
2368 	 * new mode, otherwise keep the former state
2369 	 */
2370 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2371 		unsigned int rts_state;
2372 
2373 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2374 			/* let the hardware control the RTS line */
2375 			rts_state = ATMEL_US_RTSDIS;
2376 		} else {
2377 			/* force RTS line to low level */
2378 			rts_state = ATMEL_US_RTSEN;
2379 		}
2380 
2381 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2382 	}
2383 
2384 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2385 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2386 	atmel_port->tx_stopped = false;
2387 
2388 	/* restore interrupts */
2389 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2390 
2391 	/* CTS flow-control and modem-status interrupts */
2392 	if (UART_ENABLE_MS(port, termios->c_cflag))
2393 		atmel_enable_ms(port);
2394 	else
2395 		atmel_disable_ms(port);
2396 
2397 	spin_unlock_irqrestore(&port->lock, flags);
2398 }
2399 
2400 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2401 {
2402 	if (termios->c_line == N_PPS) {
2403 		port->flags |= UPF_HARDPPS_CD;
2404 		spin_lock_irq(&port->lock);
2405 		atmel_enable_ms(port);
2406 		spin_unlock_irq(&port->lock);
2407 	} else {
2408 		port->flags &= ~UPF_HARDPPS_CD;
2409 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2410 			spin_lock_irq(&port->lock);
2411 			atmel_disable_ms(port);
2412 			spin_unlock_irq(&port->lock);
2413 		}
2414 	}
2415 }
2416 
2417 /*
2418  * Return string describing the specified port
2419  */
2420 static const char *atmel_type(struct uart_port *port)
2421 {
2422 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2423 }
2424 
2425 /*
2426  * Release the memory region(s) being used by 'port'.
2427  */
2428 static void atmel_release_port(struct uart_port *port)
2429 {
2430 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2431 	int size = resource_size(mpdev->resource);
2432 
2433 	release_mem_region(port->mapbase, size);
2434 
2435 	if (port->flags & UPF_IOREMAP) {
2436 		iounmap(port->membase);
2437 		port->membase = NULL;
2438 	}
2439 }
2440 
2441 /*
2442  * Request the memory region(s) being used by 'port'.
2443  */
2444 static int atmel_request_port(struct uart_port *port)
2445 {
2446 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2447 	int size = resource_size(mpdev->resource);
2448 
2449 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2450 		return -EBUSY;
2451 
2452 	if (port->flags & UPF_IOREMAP) {
2453 		port->membase = ioremap(port->mapbase, size);
2454 		if (port->membase == NULL) {
2455 			release_mem_region(port->mapbase, size);
2456 			return -ENOMEM;
2457 		}
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 /*
2464  * Configure/autoconfigure the port.
2465  */
2466 static void atmel_config_port(struct uart_port *port, int flags)
2467 {
2468 	if (flags & UART_CONFIG_TYPE) {
2469 		port->type = PORT_ATMEL;
2470 		atmel_request_port(port);
2471 	}
2472 }
2473 
2474 /*
2475  * Verify the new serial_struct (for TIOCSSERIAL).
2476  */
2477 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2478 {
2479 	int ret = 0;
2480 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2481 		ret = -EINVAL;
2482 	if (port->irq != ser->irq)
2483 		ret = -EINVAL;
2484 	if (ser->io_type != SERIAL_IO_MEM)
2485 		ret = -EINVAL;
2486 	if (port->uartclk / 16 != ser->baud_base)
2487 		ret = -EINVAL;
2488 	if (port->mapbase != (unsigned long)ser->iomem_base)
2489 		ret = -EINVAL;
2490 	if (port->iobase != ser->port)
2491 		ret = -EINVAL;
2492 	if (ser->hub6 != 0)
2493 		ret = -EINVAL;
2494 	return ret;
2495 }
2496 
2497 #ifdef CONFIG_CONSOLE_POLL
2498 static int atmel_poll_get_char(struct uart_port *port)
2499 {
2500 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2501 		cpu_relax();
2502 
2503 	return atmel_uart_read_char(port);
2504 }
2505 
2506 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2507 {
2508 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2509 		cpu_relax();
2510 
2511 	atmel_uart_write_char(port, ch);
2512 }
2513 #endif
2514 
2515 static const struct uart_ops atmel_pops = {
2516 	.tx_empty	= atmel_tx_empty,
2517 	.set_mctrl	= atmel_set_mctrl,
2518 	.get_mctrl	= atmel_get_mctrl,
2519 	.stop_tx	= atmel_stop_tx,
2520 	.start_tx	= atmel_start_tx,
2521 	.stop_rx	= atmel_stop_rx,
2522 	.enable_ms	= atmel_enable_ms,
2523 	.break_ctl	= atmel_break_ctl,
2524 	.startup	= atmel_startup,
2525 	.shutdown	= atmel_shutdown,
2526 	.flush_buffer	= atmel_flush_buffer,
2527 	.set_termios	= atmel_set_termios,
2528 	.set_ldisc	= atmel_set_ldisc,
2529 	.type		= atmel_type,
2530 	.release_port	= atmel_release_port,
2531 	.request_port	= atmel_request_port,
2532 	.config_port	= atmel_config_port,
2533 	.verify_port	= atmel_verify_port,
2534 	.pm		= atmel_serial_pm,
2535 #ifdef CONFIG_CONSOLE_POLL
2536 	.poll_get_char	= atmel_poll_get_char,
2537 	.poll_put_char	= atmel_poll_put_char,
2538 #endif
2539 };
2540 
2541 static const struct serial_rs485 atmel_rs485_supported = {
2542 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
2543 	.delay_rts_before_send = 1,
2544 	.delay_rts_after_send = 1,
2545 };
2546 
2547 /*
2548  * Configure the port from the platform device resource info.
2549  */
2550 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2551 				      struct platform_device *pdev)
2552 {
2553 	int ret;
2554 	struct uart_port *port = &atmel_port->uart;
2555 	struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2556 
2557 	atmel_init_property(atmel_port, pdev);
2558 	atmel_set_ops(port);
2559 
2560 	port->iotype		= UPIO_MEM;
2561 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2562 	port->ops		= &atmel_pops;
2563 	port->fifosize		= 1;
2564 	port->dev		= &pdev->dev;
2565 	port->mapbase		= mpdev->resource[0].start;
2566 	port->irq		= platform_get_irq(mpdev, 0);
2567 	port->rs485_config	= atmel_config_rs485;
2568 	port->rs485_supported	= atmel_rs485_supported;
2569 	port->iso7816_config	= atmel_config_iso7816;
2570 	port->membase		= NULL;
2571 
2572 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2573 
2574 	ret = uart_get_rs485_mode(port);
2575 	if (ret)
2576 		return ret;
2577 
2578 	port->uartclk = clk_get_rate(atmel_port->clk);
2579 
2580 	/*
2581 	 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
2582 	 * ENDTX|TXBUFE
2583 	 */
2584 	if (atmel_uart_is_half_duplex(port))
2585 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2586 	else if (atmel_use_pdc_tx(port)) {
2587 		port->fifosize = PDC_BUFFER_SIZE;
2588 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2589 	} else {
2590 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2591 	}
2592 
2593 	return 0;
2594 }
2595 
2596 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2597 static void atmel_console_putchar(struct uart_port *port, unsigned char ch)
2598 {
2599 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2600 		cpu_relax();
2601 	atmel_uart_write_char(port, ch);
2602 }
2603 
2604 /*
2605  * Interrupts are disabled on entering
2606  */
2607 static void atmel_console_write(struct console *co, const char *s, u_int count)
2608 {
2609 	struct uart_port *port = &atmel_ports[co->index].uart;
2610 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2611 	unsigned int status, imr;
2612 	unsigned int pdc_tx;
2613 
2614 	/*
2615 	 * First, save IMR and then disable interrupts
2616 	 */
2617 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2618 	atmel_uart_writel(port, ATMEL_US_IDR,
2619 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2620 
2621 	/* Store PDC transmit status and disable it */
2622 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2623 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2624 
2625 	/* Make sure that tx path is actually able to send characters */
2626 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2627 	atmel_port->tx_stopped = false;
2628 
2629 	uart_console_write(port, s, count, atmel_console_putchar);
2630 
2631 	/*
2632 	 * Finally, wait for transmitter to become empty
2633 	 * and restore IMR
2634 	 */
2635 	do {
2636 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2637 	} while (!(status & ATMEL_US_TXRDY));
2638 
2639 	/* Restore PDC transmit status */
2640 	if (pdc_tx)
2641 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2642 
2643 	/* set interrupts back the way they were */
2644 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2645 }
2646 
2647 /*
2648  * If the port was already initialised (eg, by a boot loader),
2649  * try to determine the current setup.
2650  */
2651 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2652 					     int *parity, int *bits)
2653 {
2654 	unsigned int mr, quot;
2655 
2656 	/*
2657 	 * If the baud rate generator isn't running, the port wasn't
2658 	 * initialized by the boot loader.
2659 	 */
2660 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2661 	if (!quot)
2662 		return;
2663 
2664 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2665 	if (mr == ATMEL_US_CHRL_8)
2666 		*bits = 8;
2667 	else
2668 		*bits = 7;
2669 
2670 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2671 	if (mr == ATMEL_US_PAR_EVEN)
2672 		*parity = 'e';
2673 	else if (mr == ATMEL_US_PAR_ODD)
2674 		*parity = 'o';
2675 
2676 	/*
2677 	 * The serial core only rounds down when matching this to a
2678 	 * supported baud rate. Make sure we don't end up slightly
2679 	 * lower than one of those, as it would make us fall through
2680 	 * to a much lower baud rate than we really want.
2681 	 */
2682 	*baud = port->uartclk / (16 * (quot - 1));
2683 }
2684 
2685 static int __init atmel_console_setup(struct console *co, char *options)
2686 {
2687 	struct uart_port *port = &atmel_ports[co->index].uart;
2688 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2689 	int baud = 115200;
2690 	int bits = 8;
2691 	int parity = 'n';
2692 	int flow = 'n';
2693 
2694 	if (port->membase == NULL) {
2695 		/* Port not initialized yet - delay setup */
2696 		return -ENODEV;
2697 	}
2698 
2699 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2700 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2701 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2702 	atmel_port->tx_stopped = false;
2703 
2704 	if (options)
2705 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2706 	else
2707 		atmel_console_get_options(port, &baud, &parity, &bits);
2708 
2709 	return uart_set_options(port, co, baud, parity, bits, flow);
2710 }
2711 
2712 static struct uart_driver atmel_uart;
2713 
2714 static struct console atmel_console = {
2715 	.name		= ATMEL_DEVICENAME,
2716 	.write		= atmel_console_write,
2717 	.device		= uart_console_device,
2718 	.setup		= atmel_console_setup,
2719 	.flags		= CON_PRINTBUFFER,
2720 	.index		= -1,
2721 	.data		= &atmel_uart,
2722 };
2723 
2724 static void atmel_serial_early_write(struct console *con, const char *s,
2725 				     unsigned int n)
2726 {
2727 	struct earlycon_device *dev = con->data;
2728 
2729 	uart_console_write(&dev->port, s, n, atmel_console_putchar);
2730 }
2731 
2732 static int __init atmel_early_console_setup(struct earlycon_device *device,
2733 					    const char *options)
2734 {
2735 	if (!device->port.membase)
2736 		return -ENODEV;
2737 
2738 	device->con->write = atmel_serial_early_write;
2739 
2740 	return 0;
2741 }
2742 
2743 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart",
2744 		    atmel_early_console_setup);
2745 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart",
2746 		    atmel_early_console_setup);
2747 
2748 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2749 
2750 #else
2751 #define ATMEL_CONSOLE_DEVICE	NULL
2752 #endif
2753 
2754 static struct uart_driver atmel_uart = {
2755 	.owner		= THIS_MODULE,
2756 	.driver_name	= "atmel_serial",
2757 	.dev_name	= ATMEL_DEVICENAME,
2758 	.major		= SERIAL_ATMEL_MAJOR,
2759 	.minor		= MINOR_START,
2760 	.nr		= ATMEL_MAX_UART,
2761 	.cons		= ATMEL_CONSOLE_DEVICE,
2762 };
2763 
2764 static bool atmel_serial_clk_will_stop(void)
2765 {
2766 #ifdef CONFIG_ARCH_AT91
2767 	return at91_suspend_entering_slow_clock();
2768 #else
2769 	return false;
2770 #endif
2771 }
2772 
2773 static int __maybe_unused atmel_serial_suspend(struct device *dev)
2774 {
2775 	struct uart_port *port = dev_get_drvdata(dev);
2776 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2777 
2778 	if (uart_console(port) && console_suspend_enabled) {
2779 		/* Drain the TX shifter */
2780 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2781 			 ATMEL_US_TXEMPTY))
2782 			cpu_relax();
2783 	}
2784 
2785 	if (uart_console(port) && !console_suspend_enabled) {
2786 		/* Cache register values as we won't get a full shutdown/startup
2787 		 * cycle
2788 		 */
2789 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2790 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2791 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2792 		atmel_port->cache.rtor = atmel_uart_readl(port,
2793 							  atmel_port->rtor);
2794 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2795 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2796 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2797 	}
2798 
2799 	/* we can not wake up if we're running on slow clock */
2800 	atmel_port->may_wakeup = device_may_wakeup(dev);
2801 	if (atmel_serial_clk_will_stop()) {
2802 		unsigned long flags;
2803 
2804 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2805 		atmel_port->suspended = true;
2806 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2807 		device_set_wakeup_enable(dev, 0);
2808 	}
2809 
2810 	uart_suspend_port(&atmel_uart, port);
2811 
2812 	return 0;
2813 }
2814 
2815 static int __maybe_unused atmel_serial_resume(struct device *dev)
2816 {
2817 	struct uart_port *port = dev_get_drvdata(dev);
2818 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2819 	unsigned long flags;
2820 
2821 	if (uart_console(port) && !console_suspend_enabled) {
2822 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2823 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2824 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2825 		atmel_uart_writel(port, atmel_port->rtor,
2826 				  atmel_port->cache.rtor);
2827 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2828 
2829 		if (atmel_port->fifo_size) {
2830 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2831 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2832 			atmel_uart_writel(port, ATMEL_US_FMR,
2833 					  atmel_port->cache.fmr);
2834 			atmel_uart_writel(port, ATMEL_US_FIER,
2835 					  atmel_port->cache.fimr);
2836 		}
2837 		atmel_start_rx(port);
2838 	}
2839 
2840 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2841 	if (atmel_port->pending) {
2842 		atmel_handle_receive(port, atmel_port->pending);
2843 		atmel_handle_status(port, atmel_port->pending,
2844 				    atmel_port->pending_status);
2845 		atmel_handle_transmit(port, atmel_port->pending);
2846 		atmel_port->pending = 0;
2847 	}
2848 	atmel_port->suspended = false;
2849 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2850 
2851 	uart_resume_port(&atmel_uart, port);
2852 	device_set_wakeup_enable(dev, atmel_port->may_wakeup);
2853 
2854 	return 0;
2855 }
2856 
2857 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2858 				     struct platform_device *pdev)
2859 {
2860 	atmel_port->fifo_size = 0;
2861 	atmel_port->rts_low = 0;
2862 	atmel_port->rts_high = 0;
2863 
2864 	if (of_property_read_u32(pdev->dev.of_node,
2865 				 "atmel,fifo-size",
2866 				 &atmel_port->fifo_size))
2867 		return;
2868 
2869 	if (!atmel_port->fifo_size)
2870 		return;
2871 
2872 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2873 		atmel_port->fifo_size = 0;
2874 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2875 		return;
2876 	}
2877 
2878 	/*
2879 	 * 0 <= rts_low <= rts_high <= fifo_size
2880 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2881 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2882 	 * actually stopping to send new data. So we try to set the RTS High
2883 	 * Threshold to a reasonably high value respecting this 16 data
2884 	 * empirical rule when possible.
2885 	 */
2886 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2887 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2888 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2889 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2890 
2891 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2892 		 atmel_port->fifo_size);
2893 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2894 		atmel_port->rts_high);
2895 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2896 		atmel_port->rts_low);
2897 }
2898 
2899 static int atmel_serial_probe(struct platform_device *pdev)
2900 {
2901 	struct atmel_uart_port *atmel_port;
2902 	struct device_node *np = pdev->dev.parent->of_node;
2903 	void *data;
2904 	int ret;
2905 	bool rs485_enabled;
2906 
2907 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2908 
2909 	/*
2910 	 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2911 	 * as compatible string. This driver is probed by at91-usart mfd driver
2912 	 * which is just a wrapper over the atmel_serial driver and
2913 	 * spi-at91-usart driver. All attributes needed by this driver are
2914 	 * found in of_node of parent.
2915 	 */
2916 	pdev->dev.of_node = np;
2917 
2918 	ret = of_alias_get_id(np, "serial");
2919 	if (ret < 0)
2920 		/* port id not found in platform data nor device-tree aliases:
2921 		 * auto-enumerate it */
2922 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2923 
2924 	if (ret >= ATMEL_MAX_UART) {
2925 		ret = -ENODEV;
2926 		goto err;
2927 	}
2928 
2929 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2930 		/* port already in use */
2931 		ret = -EBUSY;
2932 		goto err;
2933 	}
2934 
2935 	atmel_port = &atmel_ports[ret];
2936 	atmel_port->backup_imr = 0;
2937 	atmel_port->uart.line = ret;
2938 	atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
2939 	atmel_serial_probe_fifos(atmel_port, pdev);
2940 
2941 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2942 	spin_lock_init(&atmel_port->lock_suspended);
2943 
2944 	atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
2945 	if (IS_ERR(atmel_port->clk)) {
2946 		ret = PTR_ERR(atmel_port->clk);
2947 		goto err;
2948 	}
2949 	ret = clk_prepare_enable(atmel_port->clk);
2950 	if (ret)
2951 		goto err;
2952 
2953 	atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk");
2954 	if (IS_ERR(atmel_port->gclk)) {
2955 		ret = PTR_ERR(atmel_port->gclk);
2956 		goto err_clk_disable_unprepare;
2957 	}
2958 
2959 	ret = atmel_init_port(atmel_port, pdev);
2960 	if (ret)
2961 		goto err_clk_disable_unprepare;
2962 
2963 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2964 	if (IS_ERR(atmel_port->gpios)) {
2965 		ret = PTR_ERR(atmel_port->gpios);
2966 		goto err_clk_disable_unprepare;
2967 	}
2968 
2969 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2970 		ret = -ENOMEM;
2971 		data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2972 				     sizeof(struct atmel_uart_char),
2973 				     GFP_KERNEL);
2974 		if (!data)
2975 			goto err_clk_disable_unprepare;
2976 		atmel_port->rx_ring.buf = data;
2977 	}
2978 
2979 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2980 
2981 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2982 	if (ret)
2983 		goto err_add_port;
2984 
2985 	device_init_wakeup(&pdev->dev, 1);
2986 	platform_set_drvdata(pdev, atmel_port);
2987 
2988 	if (rs485_enabled) {
2989 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2990 				  ATMEL_US_USMODE_NORMAL);
2991 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2992 				  ATMEL_US_RTSEN);
2993 	}
2994 
2995 	/*
2996 	 * Get port name of usart or uart
2997 	 */
2998 	atmel_get_ip_name(&atmel_port->uart);
2999 
3000 	/*
3001 	 * The peripheral clock can now safely be disabled till the port
3002 	 * is used
3003 	 */
3004 	clk_disable_unprepare(atmel_port->clk);
3005 
3006 	return 0;
3007 
3008 err_add_port:
3009 	kfree(atmel_port->rx_ring.buf);
3010 	atmel_port->rx_ring.buf = NULL;
3011 err_clk_disable_unprepare:
3012 	clk_disable_unprepare(atmel_port->clk);
3013 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
3014 err:
3015 	return ret;
3016 }
3017 
3018 /*
3019  * Even if the driver is not modular, it makes sense to be able to
3020  * unbind a device: there can be many bound devices, and there are
3021  * situations where dynamic binding and unbinding can be useful.
3022  *
3023  * For example, a connected device can require a specific firmware update
3024  * protocol that needs bitbanging on IO lines, but use the regular serial
3025  * port in the normal case.
3026  */
3027 static int atmel_serial_remove(struct platform_device *pdev)
3028 {
3029 	struct uart_port *port = platform_get_drvdata(pdev);
3030 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
3031 	int ret = 0;
3032 
3033 	tasklet_kill(&atmel_port->tasklet_rx);
3034 	tasklet_kill(&atmel_port->tasklet_tx);
3035 
3036 	device_init_wakeup(&pdev->dev, 0);
3037 
3038 	ret = uart_remove_one_port(&atmel_uart, port);
3039 
3040 	kfree(atmel_port->rx_ring.buf);
3041 
3042 	/* "port" is allocated statically, so we shouldn't free it */
3043 
3044 	clear_bit(port->line, atmel_ports_in_use);
3045 
3046 	pdev->dev.of_node = NULL;
3047 
3048 	return ret;
3049 }
3050 
3051 static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
3052 			 atmel_serial_resume);
3053 
3054 static struct platform_driver atmel_serial_driver = {
3055 	.probe		= atmel_serial_probe,
3056 	.remove		= atmel_serial_remove,
3057 	.driver		= {
3058 		.name			= "atmel_usart_serial",
3059 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
3060 		.pm			= pm_ptr(&atmel_serial_pm_ops),
3061 	},
3062 };
3063 
3064 static int __init atmel_serial_init(void)
3065 {
3066 	int ret;
3067 
3068 	ret = uart_register_driver(&atmel_uart);
3069 	if (ret)
3070 		return ret;
3071 
3072 	ret = platform_driver_register(&atmel_serial_driver);
3073 	if (ret)
3074 		uart_unregister_driver(&atmel_uart);
3075 
3076 	return ret;
3077 }
3078 device_initcall(atmel_serial_init);
3079