1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/circ_buf.h>
12 #include <linux/tty.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/serial.h>
17 #include <linux/clk.h>
18 #include <linux/console.h>
19 #include <linux/sysrq.h>
20 #include <linux/tty_flip.h>
21 #include <linux/platform_device.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/atmel_pdc.h>
27 #include <linux/uaccess.h>
28 #include <linux/platform_data/atmel.h>
29 #include <linux/timer.h>
30 #include <linux/err.h>
31 #include <linux/irq.h>
32 #include <linux/suspend.h>
33 #include <linux/mm.h>
34 #include <linux/io.h>
35 
36 #include <asm/div64.h>
37 #include <asm/ioctls.h>
38 
39 #define PDC_BUFFER_SIZE		512
40 /* Revisit: We should calculate this based on the actual port settings */
41 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
42 
43 /* The minium number of data FIFOs should be able to contain */
44 #define ATMEL_MIN_FIFO_SIZE	8
45 /*
46  * These two offsets are substracted from the RX FIFO size to define the RTS
47  * high and low thresholds
48  */
49 #define ATMEL_RTS_HIGH_OFFSET	16
50 #define ATMEL_RTS_LOW_OFFSET	20
51 
52 #include <linux/serial_core.h>
53 
54 #include "serial_mctrl_gpio.h"
55 #include "atmel_serial.h"
56 
57 static void atmel_start_rx(struct uart_port *port);
58 static void atmel_stop_rx(struct uart_port *port);
59 
60 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
61 
62 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
63  * should coexist with the 8250 driver, such as if we have an external 16C550
64  * UART. */
65 #define SERIAL_ATMEL_MAJOR	204
66 #define MINOR_START		154
67 #define ATMEL_DEVICENAME	"ttyAT"
68 
69 #else
70 
71 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
72  * name, but it is legally reserved for the 8250 driver. */
73 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
74 #define MINOR_START		64
75 #define ATMEL_DEVICENAME	"ttyS"
76 
77 #endif
78 
79 #define ATMEL_ISR_PASS_LIMIT	256
80 
81 struct atmel_dma_buffer {
82 	unsigned char	*buf;
83 	dma_addr_t	dma_addr;
84 	unsigned int	dma_size;
85 	unsigned int	ofs;
86 };
87 
88 struct atmel_uart_char {
89 	u16		status;
90 	u16		ch;
91 };
92 
93 /*
94  * Be careful, the real size of the ring buffer is
95  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
96  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
97  * DMA mode.
98  */
99 #define ATMEL_SERIAL_RINGSIZE 1024
100 
101 /*
102  * at91: 6 USARTs and one DBGU port (SAM9260)
103  * samx7: 3 USARTs and 5 UARTs
104  */
105 #define ATMEL_MAX_UART		8
106 
107 /*
108  * We wrap our port structure around the generic uart_port.
109  */
110 struct atmel_uart_port {
111 	struct uart_port	uart;		/* uart */
112 	struct clk		*clk;		/* uart clock */
113 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
114 	u32			backup_imr;	/* IMR saved during suspend */
115 	int			break_active;	/* break being received */
116 
117 	bool			use_dma_rx;	/* enable DMA receiver */
118 	bool			use_pdc_rx;	/* enable PDC receiver */
119 	short			pdc_rx_idx;	/* current PDC RX buffer */
120 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
121 
122 	bool			use_dma_tx;     /* enable DMA transmitter */
123 	bool			use_pdc_tx;	/* enable PDC transmitter */
124 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
125 
126 	spinlock_t			lock_tx;	/* port lock */
127 	spinlock_t			lock_rx;	/* port lock */
128 	struct dma_chan			*chan_tx;
129 	struct dma_chan			*chan_rx;
130 	struct dma_async_tx_descriptor	*desc_tx;
131 	struct dma_async_tx_descriptor	*desc_rx;
132 	dma_cookie_t			cookie_tx;
133 	dma_cookie_t			cookie_rx;
134 	struct scatterlist		sg_tx;
135 	struct scatterlist		sg_rx;
136 	struct tasklet_struct	tasklet_rx;
137 	struct tasklet_struct	tasklet_tx;
138 	atomic_t		tasklet_shutdown;
139 	unsigned int		irq_status_prev;
140 	unsigned int		tx_len;
141 
142 	struct circ_buf		rx_ring;
143 
144 	struct mctrl_gpios	*gpios;
145 	u32			backup_mode;	/* MR saved during iso7816 operations */
146 	u32			backup_brgr;	/* BRGR saved during iso7816 operations */
147 	unsigned int		tx_done_mask;
148 	u32			fifo_size;
149 	u32			rts_high;
150 	u32			rts_low;
151 	bool			ms_irq_enabled;
152 	u32			rtor;	/* address of receiver timeout register if it exists */
153 	bool			has_frac_baudrate;
154 	bool			has_hw_timer;
155 	struct timer_list	uart_timer;
156 
157 	bool			tx_stopped;
158 	bool			suspended;
159 	unsigned int		pending;
160 	unsigned int		pending_status;
161 	spinlock_t		lock_suspended;
162 
163 	bool			hd_start_rx;	/* can start RX during half-duplex operation */
164 
165 	/* ISO7816 */
166 	unsigned int		fidi_min;
167 	unsigned int		fidi_max;
168 
169 #ifdef CONFIG_PM
170 	struct {
171 		u32		cr;
172 		u32		mr;
173 		u32		imr;
174 		u32		brgr;
175 		u32		rtor;
176 		u32		ttgr;
177 		u32		fmr;
178 		u32		fimr;
179 	} cache;
180 #endif
181 
182 	int (*prepare_rx)(struct uart_port *port);
183 	int (*prepare_tx)(struct uart_port *port);
184 	void (*schedule_rx)(struct uart_port *port);
185 	void (*schedule_tx)(struct uart_port *port);
186 	void (*release_rx)(struct uart_port *port);
187 	void (*release_tx)(struct uart_port *port);
188 };
189 
190 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
191 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
192 
193 #if defined(CONFIG_OF)
194 static const struct of_device_id atmel_serial_dt_ids[] = {
195 	{ .compatible = "atmel,at91rm9200-usart-serial" },
196 	{ /* sentinel */ }
197 };
198 #endif
199 
200 static inline struct atmel_uart_port *
201 to_atmel_uart_port(struct uart_port *uart)
202 {
203 	return container_of(uart, struct atmel_uart_port, uart);
204 }
205 
206 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
207 {
208 	return __raw_readl(port->membase + reg);
209 }
210 
211 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
212 {
213 	__raw_writel(value, port->membase + reg);
214 }
215 
216 static inline u8 atmel_uart_read_char(struct uart_port *port)
217 {
218 	return __raw_readb(port->membase + ATMEL_US_RHR);
219 }
220 
221 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
222 {
223 	__raw_writeb(value, port->membase + ATMEL_US_THR);
224 }
225 
226 static inline int atmel_uart_is_half_duplex(struct uart_port *port)
227 {
228 	return ((port->rs485.flags & SER_RS485_ENABLED) &&
229 		!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
230 		(port->iso7816.flags & SER_ISO7816_ENABLED);
231 }
232 
233 #ifdef CONFIG_SERIAL_ATMEL_PDC
234 static bool atmel_use_pdc_rx(struct uart_port *port)
235 {
236 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
237 
238 	return atmel_port->use_pdc_rx;
239 }
240 
241 static bool atmel_use_pdc_tx(struct uart_port *port)
242 {
243 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
244 
245 	return atmel_port->use_pdc_tx;
246 }
247 #else
248 static bool atmel_use_pdc_rx(struct uart_port *port)
249 {
250 	return false;
251 }
252 
253 static bool atmel_use_pdc_tx(struct uart_port *port)
254 {
255 	return false;
256 }
257 #endif
258 
259 static bool atmel_use_dma_tx(struct uart_port *port)
260 {
261 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
262 
263 	return atmel_port->use_dma_tx;
264 }
265 
266 static bool atmel_use_dma_rx(struct uart_port *port)
267 {
268 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
269 
270 	return atmel_port->use_dma_rx;
271 }
272 
273 static bool atmel_use_fifo(struct uart_port *port)
274 {
275 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
276 
277 	return atmel_port->fifo_size;
278 }
279 
280 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
281 				   struct tasklet_struct *t)
282 {
283 	if (!atomic_read(&atmel_port->tasklet_shutdown))
284 		tasklet_schedule(t);
285 }
286 
287 /* Enable or disable the rs485 support */
288 static int atmel_config_rs485(struct uart_port *port,
289 			      struct serial_rs485 *rs485conf)
290 {
291 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
292 	unsigned int mode;
293 
294 	/* Disable interrupts */
295 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
296 
297 	mode = atmel_uart_readl(port, ATMEL_US_MR);
298 
299 	/* Resetting serial mode to RS232 (0x0) */
300 	mode &= ~ATMEL_US_USMODE;
301 
302 	port->rs485 = *rs485conf;
303 
304 	if (rs485conf->flags & SER_RS485_ENABLED) {
305 		dev_dbg(port->dev, "Setting UART to RS485\n");
306 		if (port->rs485.flags & SER_RS485_RX_DURING_TX)
307 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
308 		else
309 			atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
310 
311 		atmel_uart_writel(port, ATMEL_US_TTGR,
312 				  rs485conf->delay_rts_after_send);
313 		mode |= ATMEL_US_USMODE_RS485;
314 	} else {
315 		dev_dbg(port->dev, "Setting UART to RS232\n");
316 		if (atmel_use_pdc_tx(port))
317 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
318 				ATMEL_US_TXBUFE;
319 		else
320 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
321 	}
322 	atmel_uart_writel(port, ATMEL_US_MR, mode);
323 
324 	/* Enable interrupts */
325 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
326 
327 	return 0;
328 }
329 
330 static unsigned int atmel_calc_cd(struct uart_port *port,
331 				  struct serial_iso7816 *iso7816conf)
332 {
333 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
334 	unsigned int cd;
335 	u64 mck_rate;
336 
337 	mck_rate = (u64)clk_get_rate(atmel_port->clk);
338 	do_div(mck_rate, iso7816conf->clk);
339 	cd = mck_rate;
340 	return cd;
341 }
342 
343 static unsigned int atmel_calc_fidi(struct uart_port *port,
344 				    struct serial_iso7816 *iso7816conf)
345 {
346 	u64 fidi = 0;
347 
348 	if (iso7816conf->sc_fi && iso7816conf->sc_di) {
349 		fidi = (u64)iso7816conf->sc_fi;
350 		do_div(fidi, iso7816conf->sc_di);
351 	}
352 	return (u32)fidi;
353 }
354 
355 /* Enable or disable the iso7816 support */
356 /* Called with interrupts disabled */
357 static int atmel_config_iso7816(struct uart_port *port,
358 				struct serial_iso7816 *iso7816conf)
359 {
360 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
361 	unsigned int mode;
362 	unsigned int cd, fidi;
363 	int ret = 0;
364 
365 	/* Disable interrupts */
366 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
367 
368 	mode = atmel_uart_readl(port, ATMEL_US_MR);
369 
370 	if (iso7816conf->flags & SER_ISO7816_ENABLED) {
371 		mode &= ~ATMEL_US_USMODE;
372 
373 		if (iso7816conf->tg > 255) {
374 			dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
375 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
376 			ret = -EINVAL;
377 			goto err_out;
378 		}
379 
380 		if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
381 		    == SER_ISO7816_T(0)) {
382 			mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
383 		} else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
384 			   == SER_ISO7816_T(1)) {
385 			mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
386 		} else {
387 			dev_err(port->dev, "ISO7816: Type not supported\n");
388 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
389 			ret = -EINVAL;
390 			goto err_out;
391 		}
392 
393 		mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
394 
395 		/* select mck clock, and output  */
396 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
397 		/* set parity for normal/inverse mode + max iterations */
398 		mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
399 
400 		cd = atmel_calc_cd(port, iso7816conf);
401 		fidi = atmel_calc_fidi(port, iso7816conf);
402 		if (fidi == 0) {
403 			dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
404 		} else if (fidi < atmel_port->fidi_min
405 			   || fidi > atmel_port->fidi_max) {
406 			dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
407 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
408 			ret = -EINVAL;
409 			goto err_out;
410 		}
411 
412 		if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
413 			/* port not yet in iso7816 mode: store configuration */
414 			atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
415 			atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
416 		}
417 
418 		atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
419 		atmel_uart_writel(port, ATMEL_US_BRGR, cd);
420 		atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
421 
422 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
423 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
424 	} else {
425 		dev_dbg(port->dev, "Setting UART back to RS232\n");
426 		/* back to last RS232 settings */
427 		mode = atmel_port->backup_mode;
428 		memset(iso7816conf, 0, sizeof(struct serial_iso7816));
429 		atmel_uart_writel(port, ATMEL_US_TTGR, 0);
430 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
431 		atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
432 
433 		if (atmel_use_pdc_tx(port))
434 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
435 						   ATMEL_US_TXBUFE;
436 		else
437 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
438 	}
439 
440 	port->iso7816 = *iso7816conf;
441 
442 	atmel_uart_writel(port, ATMEL_US_MR, mode);
443 
444 err_out:
445 	/* Enable interrupts */
446 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
447 
448 	return ret;
449 }
450 
451 /*
452  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
453  */
454 static u_int atmel_tx_empty(struct uart_port *port)
455 {
456 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
457 
458 	if (atmel_port->tx_stopped)
459 		return TIOCSER_TEMT;
460 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
461 		TIOCSER_TEMT :
462 		0;
463 }
464 
465 /*
466  * Set state of the modem control output lines
467  */
468 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
469 {
470 	unsigned int control = 0;
471 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
472 	unsigned int rts_paused, rts_ready;
473 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
474 
475 	/* override mode to RS485 if needed, otherwise keep the current mode */
476 	if (port->rs485.flags & SER_RS485_ENABLED) {
477 		atmel_uart_writel(port, ATMEL_US_TTGR,
478 				  port->rs485.delay_rts_after_send);
479 		mode &= ~ATMEL_US_USMODE;
480 		mode |= ATMEL_US_USMODE_RS485;
481 	}
482 
483 	/* set the RTS line state according to the mode */
484 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
485 		/* force RTS line to high level */
486 		rts_paused = ATMEL_US_RTSEN;
487 
488 		/* give the control of the RTS line back to the hardware */
489 		rts_ready = ATMEL_US_RTSDIS;
490 	} else {
491 		/* force RTS line to high level */
492 		rts_paused = ATMEL_US_RTSDIS;
493 
494 		/* force RTS line to low level */
495 		rts_ready = ATMEL_US_RTSEN;
496 	}
497 
498 	if (mctrl & TIOCM_RTS)
499 		control |= rts_ready;
500 	else
501 		control |= rts_paused;
502 
503 	if (mctrl & TIOCM_DTR)
504 		control |= ATMEL_US_DTREN;
505 	else
506 		control |= ATMEL_US_DTRDIS;
507 
508 	atmel_uart_writel(port, ATMEL_US_CR, control);
509 
510 	mctrl_gpio_set(atmel_port->gpios, mctrl);
511 
512 	/* Local loopback mode? */
513 	mode &= ~ATMEL_US_CHMODE;
514 	if (mctrl & TIOCM_LOOP)
515 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
516 	else
517 		mode |= ATMEL_US_CHMODE_NORMAL;
518 
519 	atmel_uart_writel(port, ATMEL_US_MR, mode);
520 }
521 
522 /*
523  * Get state of the modem control input lines
524  */
525 static u_int atmel_get_mctrl(struct uart_port *port)
526 {
527 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
528 	unsigned int ret = 0, status;
529 
530 	status = atmel_uart_readl(port, ATMEL_US_CSR);
531 
532 	/*
533 	 * The control signals are active low.
534 	 */
535 	if (!(status & ATMEL_US_DCD))
536 		ret |= TIOCM_CD;
537 	if (!(status & ATMEL_US_CTS))
538 		ret |= TIOCM_CTS;
539 	if (!(status & ATMEL_US_DSR))
540 		ret |= TIOCM_DSR;
541 	if (!(status & ATMEL_US_RI))
542 		ret |= TIOCM_RI;
543 
544 	return mctrl_gpio_get(atmel_port->gpios, &ret);
545 }
546 
547 /*
548  * Stop transmitting.
549  */
550 static void atmel_stop_tx(struct uart_port *port)
551 {
552 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
553 
554 	if (atmel_use_pdc_tx(port)) {
555 		/* disable PDC transmit */
556 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
557 	}
558 
559 	/*
560 	 * Disable the transmitter.
561 	 * This is mandatory when DMA is used, otherwise the DMA buffer
562 	 * is fully transmitted.
563 	 */
564 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
565 	atmel_port->tx_stopped = true;
566 
567 	/* Disable interrupts */
568 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
569 
570 	if (atmel_uart_is_half_duplex(port))
571 		if (!atomic_read(&atmel_port->tasklet_shutdown))
572 			atmel_start_rx(port);
573 
574 }
575 
576 /*
577  * Start transmitting.
578  */
579 static void atmel_start_tx(struct uart_port *port)
580 {
581 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
582 
583 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
584 				       & ATMEL_PDC_TXTEN))
585 		/* The transmitter is already running.  Yes, we
586 		   really need this.*/
587 		return;
588 
589 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
590 		if (atmel_uart_is_half_duplex(port))
591 			atmel_stop_rx(port);
592 
593 	if (atmel_use_pdc_tx(port))
594 		/* re-enable PDC transmit */
595 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
596 
597 	/* Enable interrupts */
598 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
599 
600 	/* re-enable the transmitter */
601 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
602 	atmel_port->tx_stopped = false;
603 }
604 
605 /*
606  * start receiving - port is in process of being opened.
607  */
608 static void atmel_start_rx(struct uart_port *port)
609 {
610 	/* reset status and receiver */
611 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
612 
613 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
614 
615 	if (atmel_use_pdc_rx(port)) {
616 		/* enable PDC controller */
617 		atmel_uart_writel(port, ATMEL_US_IER,
618 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
619 				  port->read_status_mask);
620 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
621 	} else {
622 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
623 	}
624 }
625 
626 /*
627  * Stop receiving - port is in process of being closed.
628  */
629 static void atmel_stop_rx(struct uart_port *port)
630 {
631 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
632 
633 	if (atmel_use_pdc_rx(port)) {
634 		/* disable PDC receive */
635 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
636 		atmel_uart_writel(port, ATMEL_US_IDR,
637 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
638 				  port->read_status_mask);
639 	} else {
640 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
641 	}
642 }
643 
644 /*
645  * Enable modem status interrupts
646  */
647 static void atmel_enable_ms(struct uart_port *port)
648 {
649 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
650 	uint32_t ier = 0;
651 
652 	/*
653 	 * Interrupt should not be enabled twice
654 	 */
655 	if (atmel_port->ms_irq_enabled)
656 		return;
657 
658 	atmel_port->ms_irq_enabled = true;
659 
660 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
661 		ier |= ATMEL_US_CTSIC;
662 
663 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
664 		ier |= ATMEL_US_DSRIC;
665 
666 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
667 		ier |= ATMEL_US_RIIC;
668 
669 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
670 		ier |= ATMEL_US_DCDIC;
671 
672 	atmel_uart_writel(port, ATMEL_US_IER, ier);
673 
674 	mctrl_gpio_enable_ms(atmel_port->gpios);
675 }
676 
677 /*
678  * Disable modem status interrupts
679  */
680 static void atmel_disable_ms(struct uart_port *port)
681 {
682 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
683 	uint32_t idr = 0;
684 
685 	/*
686 	 * Interrupt should not be disabled twice
687 	 */
688 	if (!atmel_port->ms_irq_enabled)
689 		return;
690 
691 	atmel_port->ms_irq_enabled = false;
692 
693 	mctrl_gpio_disable_ms(atmel_port->gpios);
694 
695 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
696 		idr |= ATMEL_US_CTSIC;
697 
698 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
699 		idr |= ATMEL_US_DSRIC;
700 
701 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
702 		idr |= ATMEL_US_RIIC;
703 
704 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
705 		idr |= ATMEL_US_DCDIC;
706 
707 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
708 }
709 
710 /*
711  * Control the transmission of a break signal
712  */
713 static void atmel_break_ctl(struct uart_port *port, int break_state)
714 {
715 	if (break_state != 0)
716 		/* start break */
717 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
718 	else
719 		/* stop break */
720 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
721 }
722 
723 /*
724  * Stores the incoming character in the ring buffer
725  */
726 static void
727 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
728 		     unsigned int ch)
729 {
730 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
731 	struct circ_buf *ring = &atmel_port->rx_ring;
732 	struct atmel_uart_char *c;
733 
734 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
735 		/* Buffer overflow, ignore char */
736 		return;
737 
738 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
739 	c->status	= status;
740 	c->ch		= ch;
741 
742 	/* Make sure the character is stored before we update head. */
743 	smp_wmb();
744 
745 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
746 }
747 
748 /*
749  * Deal with parity, framing and overrun errors.
750  */
751 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
752 {
753 	/* clear error */
754 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
755 
756 	if (status & ATMEL_US_RXBRK) {
757 		/* ignore side-effect */
758 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
759 		port->icount.brk++;
760 	}
761 	if (status & ATMEL_US_PARE)
762 		port->icount.parity++;
763 	if (status & ATMEL_US_FRAME)
764 		port->icount.frame++;
765 	if (status & ATMEL_US_OVRE)
766 		port->icount.overrun++;
767 }
768 
769 /*
770  * Characters received (called from interrupt handler)
771  */
772 static void atmel_rx_chars(struct uart_port *port)
773 {
774 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
775 	unsigned int status, ch;
776 
777 	status = atmel_uart_readl(port, ATMEL_US_CSR);
778 	while (status & ATMEL_US_RXRDY) {
779 		ch = atmel_uart_read_char(port);
780 
781 		/*
782 		 * note that the error handling code is
783 		 * out of the main execution path
784 		 */
785 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
786 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
787 			     || atmel_port->break_active)) {
788 
789 			/* clear error */
790 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
791 
792 			if (status & ATMEL_US_RXBRK
793 			    && !atmel_port->break_active) {
794 				atmel_port->break_active = 1;
795 				atmel_uart_writel(port, ATMEL_US_IER,
796 						  ATMEL_US_RXBRK);
797 			} else {
798 				/*
799 				 * This is either the end-of-break
800 				 * condition or we've received at
801 				 * least one character without RXBRK
802 				 * being set. In both cases, the next
803 				 * RXBRK will indicate start-of-break.
804 				 */
805 				atmel_uart_writel(port, ATMEL_US_IDR,
806 						  ATMEL_US_RXBRK);
807 				status &= ~ATMEL_US_RXBRK;
808 				atmel_port->break_active = 0;
809 			}
810 		}
811 
812 		atmel_buffer_rx_char(port, status, ch);
813 		status = atmel_uart_readl(port, ATMEL_US_CSR);
814 	}
815 
816 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
817 }
818 
819 /*
820  * Transmit characters (called from tasklet with TXRDY interrupt
821  * disabled)
822  */
823 static void atmel_tx_chars(struct uart_port *port)
824 {
825 	struct circ_buf *xmit = &port->state->xmit;
826 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
827 
828 	if (port->x_char &&
829 	    (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
830 		atmel_uart_write_char(port, port->x_char);
831 		port->icount.tx++;
832 		port->x_char = 0;
833 	}
834 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
835 		return;
836 
837 	while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
838 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
839 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
840 		port->icount.tx++;
841 		if (uart_circ_empty(xmit))
842 			break;
843 	}
844 
845 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
846 		uart_write_wakeup(port);
847 
848 	if (!uart_circ_empty(xmit)) {
849 		/* we still have characters to transmit, so we should continue
850 		 * transmitting them when TX is ready, regardless of
851 		 * mode or duplexity
852 		 */
853 		atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
854 
855 		/* Enable interrupts */
856 		atmel_uart_writel(port, ATMEL_US_IER,
857 				  atmel_port->tx_done_mask);
858 	} else {
859 		if (atmel_uart_is_half_duplex(port))
860 			atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
861 	}
862 }
863 
864 static void atmel_complete_tx_dma(void *arg)
865 {
866 	struct atmel_uart_port *atmel_port = arg;
867 	struct uart_port *port = &atmel_port->uart;
868 	struct circ_buf *xmit = &port->state->xmit;
869 	struct dma_chan *chan = atmel_port->chan_tx;
870 	unsigned long flags;
871 
872 	spin_lock_irqsave(&port->lock, flags);
873 
874 	if (chan)
875 		dmaengine_terminate_all(chan);
876 	xmit->tail += atmel_port->tx_len;
877 	xmit->tail &= UART_XMIT_SIZE - 1;
878 
879 	port->icount.tx += atmel_port->tx_len;
880 
881 	spin_lock_irq(&atmel_port->lock_tx);
882 	async_tx_ack(atmel_port->desc_tx);
883 	atmel_port->cookie_tx = -EINVAL;
884 	atmel_port->desc_tx = NULL;
885 	spin_unlock_irq(&atmel_port->lock_tx);
886 
887 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
888 		uart_write_wakeup(port);
889 
890 	/*
891 	 * xmit is a circular buffer so, if we have just send data from
892 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
893 	 * remaining data from the beginning of xmit->buf to xmit->head.
894 	 */
895 	if (!uart_circ_empty(xmit))
896 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
897 	else if (atmel_uart_is_half_duplex(port)) {
898 		/*
899 		 * DMA done, re-enable TXEMPTY and signal that we can stop
900 		 * TX and start RX for RS485
901 		 */
902 		atmel_port->hd_start_rx = true;
903 		atmel_uart_writel(port, ATMEL_US_IER,
904 				  atmel_port->tx_done_mask);
905 	}
906 
907 	spin_unlock_irqrestore(&port->lock, flags);
908 }
909 
910 static void atmel_release_tx_dma(struct uart_port *port)
911 {
912 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
913 	struct dma_chan *chan = atmel_port->chan_tx;
914 
915 	if (chan) {
916 		dmaengine_terminate_all(chan);
917 		dma_release_channel(chan);
918 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
919 				DMA_TO_DEVICE);
920 	}
921 
922 	atmel_port->desc_tx = NULL;
923 	atmel_port->chan_tx = NULL;
924 	atmel_port->cookie_tx = -EINVAL;
925 }
926 
927 /*
928  * Called from tasklet with TXRDY interrupt is disabled.
929  */
930 static void atmel_tx_dma(struct uart_port *port)
931 {
932 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
933 	struct circ_buf *xmit = &port->state->xmit;
934 	struct dma_chan *chan = atmel_port->chan_tx;
935 	struct dma_async_tx_descriptor *desc;
936 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
937 	unsigned int tx_len, part1_len, part2_len, sg_len;
938 	dma_addr_t phys_addr;
939 
940 	/* Make sure we have an idle channel */
941 	if (atmel_port->desc_tx != NULL)
942 		return;
943 
944 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
945 		/*
946 		 * DMA is idle now.
947 		 * Port xmit buffer is already mapped,
948 		 * and it is one page... Just adjust
949 		 * offsets and lengths. Since it is a circular buffer,
950 		 * we have to transmit till the end, and then the rest.
951 		 * Take the port lock to get a
952 		 * consistent xmit buffer state.
953 		 */
954 		tx_len = CIRC_CNT_TO_END(xmit->head,
955 					 xmit->tail,
956 					 UART_XMIT_SIZE);
957 
958 		if (atmel_port->fifo_size) {
959 			/* multi data mode */
960 			part1_len = (tx_len & ~0x3); /* DWORD access */
961 			part2_len = (tx_len & 0x3); /* BYTE access */
962 		} else {
963 			/* single data (legacy) mode */
964 			part1_len = 0;
965 			part2_len = tx_len; /* BYTE access only */
966 		}
967 
968 		sg_init_table(sgl, 2);
969 		sg_len = 0;
970 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
971 		if (part1_len) {
972 			sg = &sgl[sg_len++];
973 			sg_dma_address(sg) = phys_addr;
974 			sg_dma_len(sg) = part1_len;
975 
976 			phys_addr += part1_len;
977 		}
978 
979 		if (part2_len) {
980 			sg = &sgl[sg_len++];
981 			sg_dma_address(sg) = phys_addr;
982 			sg_dma_len(sg) = part2_len;
983 		}
984 
985 		/*
986 		 * save tx_len so atmel_complete_tx_dma() will increase
987 		 * xmit->tail correctly
988 		 */
989 		atmel_port->tx_len = tx_len;
990 
991 		desc = dmaengine_prep_slave_sg(chan,
992 					       sgl,
993 					       sg_len,
994 					       DMA_MEM_TO_DEV,
995 					       DMA_PREP_INTERRUPT |
996 					       DMA_CTRL_ACK);
997 		if (!desc) {
998 			dev_err(port->dev, "Failed to send via dma!\n");
999 			return;
1000 		}
1001 
1002 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
1003 
1004 		atmel_port->desc_tx = desc;
1005 		desc->callback = atmel_complete_tx_dma;
1006 		desc->callback_param = atmel_port;
1007 		atmel_port->cookie_tx = dmaengine_submit(desc);
1008 		if (dma_submit_error(atmel_port->cookie_tx)) {
1009 			dev_err(port->dev, "dma_submit_error %d\n",
1010 				atmel_port->cookie_tx);
1011 			return;
1012 		}
1013 
1014 		dma_async_issue_pending(chan);
1015 	}
1016 
1017 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1018 		uart_write_wakeup(port);
1019 }
1020 
1021 static int atmel_prepare_tx_dma(struct uart_port *port)
1022 {
1023 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1024 	struct device *mfd_dev = port->dev->parent;
1025 	dma_cap_mask_t		mask;
1026 	struct dma_slave_config config;
1027 	int ret, nent;
1028 
1029 	dma_cap_zero(mask);
1030 	dma_cap_set(DMA_SLAVE, mask);
1031 
1032 	atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
1033 	if (atmel_port->chan_tx == NULL)
1034 		goto chan_err;
1035 	dev_info(port->dev, "using %s for tx DMA transfers\n",
1036 		dma_chan_name(atmel_port->chan_tx));
1037 
1038 	spin_lock_init(&atmel_port->lock_tx);
1039 	sg_init_table(&atmel_port->sg_tx, 1);
1040 	/* UART circular tx buffer is an aligned page. */
1041 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
1042 	sg_set_page(&atmel_port->sg_tx,
1043 			virt_to_page(port->state->xmit.buf),
1044 			UART_XMIT_SIZE,
1045 			offset_in_page(port->state->xmit.buf));
1046 	nent = dma_map_sg(port->dev,
1047 				&atmel_port->sg_tx,
1048 				1,
1049 				DMA_TO_DEVICE);
1050 
1051 	if (!nent) {
1052 		dev_dbg(port->dev, "need to release resource of dma\n");
1053 		goto chan_err;
1054 	} else {
1055 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1056 			sg_dma_len(&atmel_port->sg_tx),
1057 			port->state->xmit.buf,
1058 			&sg_dma_address(&atmel_port->sg_tx));
1059 	}
1060 
1061 	/* Configure the slave DMA */
1062 	memset(&config, 0, sizeof(config));
1063 	config.direction = DMA_MEM_TO_DEV;
1064 	config.dst_addr_width = (atmel_port->fifo_size) ?
1065 				DMA_SLAVE_BUSWIDTH_4_BYTES :
1066 				DMA_SLAVE_BUSWIDTH_1_BYTE;
1067 	config.dst_addr = port->mapbase + ATMEL_US_THR;
1068 	config.dst_maxburst = 1;
1069 
1070 	ret = dmaengine_slave_config(atmel_port->chan_tx,
1071 				     &config);
1072 	if (ret) {
1073 		dev_err(port->dev, "DMA tx slave configuration failed\n");
1074 		goto chan_err;
1075 	}
1076 
1077 	return 0;
1078 
1079 chan_err:
1080 	dev_err(port->dev, "TX channel not available, switch to pio\n");
1081 	atmel_port->use_dma_tx = false;
1082 	if (atmel_port->chan_tx)
1083 		atmel_release_tx_dma(port);
1084 	return -EINVAL;
1085 }
1086 
1087 static void atmel_complete_rx_dma(void *arg)
1088 {
1089 	struct uart_port *port = arg;
1090 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1091 
1092 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1093 }
1094 
1095 static void atmel_release_rx_dma(struct uart_port *port)
1096 {
1097 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1098 	struct dma_chan *chan = atmel_port->chan_rx;
1099 
1100 	if (chan) {
1101 		dmaengine_terminate_all(chan);
1102 		dma_release_channel(chan);
1103 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1104 				DMA_FROM_DEVICE);
1105 	}
1106 
1107 	atmel_port->desc_rx = NULL;
1108 	atmel_port->chan_rx = NULL;
1109 	atmel_port->cookie_rx = -EINVAL;
1110 }
1111 
1112 static void atmel_rx_from_dma(struct uart_port *port)
1113 {
1114 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1115 	struct tty_port *tport = &port->state->port;
1116 	struct circ_buf *ring = &atmel_port->rx_ring;
1117 	struct dma_chan *chan = atmel_port->chan_rx;
1118 	struct dma_tx_state state;
1119 	enum dma_status dmastat;
1120 	size_t count;
1121 
1122 
1123 	/* Reset the UART timeout early so that we don't miss one */
1124 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1125 	dmastat = dmaengine_tx_status(chan,
1126 				atmel_port->cookie_rx,
1127 				&state);
1128 	/* Restart a new tasklet if DMA status is error */
1129 	if (dmastat == DMA_ERROR) {
1130 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1131 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1132 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1133 		return;
1134 	}
1135 
1136 	/* CPU claims ownership of RX DMA buffer */
1137 	dma_sync_sg_for_cpu(port->dev,
1138 			    &atmel_port->sg_rx,
1139 			    1,
1140 			    DMA_FROM_DEVICE);
1141 
1142 	/*
1143 	 * ring->head points to the end of data already written by the DMA.
1144 	 * ring->tail points to the beginning of data to be read by the
1145 	 * framework.
1146 	 * The current transfer size should not be larger than the dma buffer
1147 	 * length.
1148 	 */
1149 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1150 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1151 	/*
1152 	 * At this point ring->head may point to the first byte right after the
1153 	 * last byte of the dma buffer:
1154 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1155 	 *
1156 	 * However ring->tail must always points inside the dma buffer:
1157 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1158 	 *
1159 	 * Since we use a ring buffer, we have to handle the case
1160 	 * where head is lower than tail. In such a case, we first read from
1161 	 * tail to the end of the buffer then reset tail.
1162 	 */
1163 	if (ring->head < ring->tail) {
1164 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1165 
1166 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1167 		ring->tail = 0;
1168 		port->icount.rx += count;
1169 	}
1170 
1171 	/* Finally we read data from tail to head */
1172 	if (ring->tail < ring->head) {
1173 		count = ring->head - ring->tail;
1174 
1175 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1176 		/* Wrap ring->head if needed */
1177 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1178 			ring->head = 0;
1179 		ring->tail = ring->head;
1180 		port->icount.rx += count;
1181 	}
1182 
1183 	/* USART retreives ownership of RX DMA buffer */
1184 	dma_sync_sg_for_device(port->dev,
1185 			       &atmel_port->sg_rx,
1186 			       1,
1187 			       DMA_FROM_DEVICE);
1188 
1189 	tty_flip_buffer_push(tport);
1190 
1191 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1192 }
1193 
1194 static int atmel_prepare_rx_dma(struct uart_port *port)
1195 {
1196 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1197 	struct device *mfd_dev = port->dev->parent;
1198 	struct dma_async_tx_descriptor *desc;
1199 	dma_cap_mask_t		mask;
1200 	struct dma_slave_config config;
1201 	struct circ_buf		*ring;
1202 	int ret, nent;
1203 
1204 	ring = &atmel_port->rx_ring;
1205 
1206 	dma_cap_zero(mask);
1207 	dma_cap_set(DMA_CYCLIC, mask);
1208 
1209 	atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
1210 	if (atmel_port->chan_rx == NULL)
1211 		goto chan_err;
1212 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1213 		dma_chan_name(atmel_port->chan_rx));
1214 
1215 	spin_lock_init(&atmel_port->lock_rx);
1216 	sg_init_table(&atmel_port->sg_rx, 1);
1217 	/* UART circular rx buffer is an aligned page. */
1218 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1219 	sg_set_page(&atmel_port->sg_rx,
1220 		    virt_to_page(ring->buf),
1221 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1222 		    offset_in_page(ring->buf));
1223 	nent = dma_map_sg(port->dev,
1224 			  &atmel_port->sg_rx,
1225 			  1,
1226 			  DMA_FROM_DEVICE);
1227 
1228 	if (!nent) {
1229 		dev_dbg(port->dev, "need to release resource of dma\n");
1230 		goto chan_err;
1231 	} else {
1232 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1233 			sg_dma_len(&atmel_port->sg_rx),
1234 			ring->buf,
1235 			&sg_dma_address(&atmel_port->sg_rx));
1236 	}
1237 
1238 	/* Configure the slave DMA */
1239 	memset(&config, 0, sizeof(config));
1240 	config.direction = DMA_DEV_TO_MEM;
1241 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1242 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1243 	config.src_maxburst = 1;
1244 
1245 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1246 				     &config);
1247 	if (ret) {
1248 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1249 		goto chan_err;
1250 	}
1251 	/*
1252 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1253 	 * each one is half ring buffer size
1254 	 */
1255 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1256 					 sg_dma_address(&atmel_port->sg_rx),
1257 					 sg_dma_len(&atmel_port->sg_rx),
1258 					 sg_dma_len(&atmel_port->sg_rx)/2,
1259 					 DMA_DEV_TO_MEM,
1260 					 DMA_PREP_INTERRUPT);
1261 	if (!desc) {
1262 		dev_err(port->dev, "Preparing DMA cyclic failed\n");
1263 		goto chan_err;
1264 	}
1265 	desc->callback = atmel_complete_rx_dma;
1266 	desc->callback_param = port;
1267 	atmel_port->desc_rx = desc;
1268 	atmel_port->cookie_rx = dmaengine_submit(desc);
1269 	if (dma_submit_error(atmel_port->cookie_rx)) {
1270 		dev_err(port->dev, "dma_submit_error %d\n",
1271 			atmel_port->cookie_rx);
1272 		goto chan_err;
1273 	}
1274 
1275 	dma_async_issue_pending(atmel_port->chan_rx);
1276 
1277 	return 0;
1278 
1279 chan_err:
1280 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1281 	atmel_port->use_dma_rx = false;
1282 	if (atmel_port->chan_rx)
1283 		atmel_release_rx_dma(port);
1284 	return -EINVAL;
1285 }
1286 
1287 static void atmel_uart_timer_callback(struct timer_list *t)
1288 {
1289 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1290 							uart_timer);
1291 	struct uart_port *port = &atmel_port->uart;
1292 
1293 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1294 		tasklet_schedule(&atmel_port->tasklet_rx);
1295 		mod_timer(&atmel_port->uart_timer,
1296 			  jiffies + uart_poll_timeout(port));
1297 	}
1298 }
1299 
1300 /*
1301  * receive interrupt handler.
1302  */
1303 static void
1304 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1305 {
1306 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1307 
1308 	if (atmel_use_pdc_rx(port)) {
1309 		/*
1310 		 * PDC receive. Just schedule the tasklet and let it
1311 		 * figure out the details.
1312 		 *
1313 		 * TODO: We're not handling error flags correctly at
1314 		 * the moment.
1315 		 */
1316 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1317 			atmel_uart_writel(port, ATMEL_US_IDR,
1318 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1319 			atmel_tasklet_schedule(atmel_port,
1320 					       &atmel_port->tasklet_rx);
1321 		}
1322 
1323 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1324 				ATMEL_US_FRAME | ATMEL_US_PARE))
1325 			atmel_pdc_rxerr(port, pending);
1326 	}
1327 
1328 	if (atmel_use_dma_rx(port)) {
1329 		if (pending & ATMEL_US_TIMEOUT) {
1330 			atmel_uart_writel(port, ATMEL_US_IDR,
1331 					  ATMEL_US_TIMEOUT);
1332 			atmel_tasklet_schedule(atmel_port,
1333 					       &atmel_port->tasklet_rx);
1334 		}
1335 	}
1336 
1337 	/* Interrupt receive */
1338 	if (pending & ATMEL_US_RXRDY)
1339 		atmel_rx_chars(port);
1340 	else if (pending & ATMEL_US_RXBRK) {
1341 		/*
1342 		 * End of break detected. If it came along with a
1343 		 * character, atmel_rx_chars will handle it.
1344 		 */
1345 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1346 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1347 		atmel_port->break_active = 0;
1348 	}
1349 }
1350 
1351 /*
1352  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1353  */
1354 static void
1355 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1356 {
1357 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1358 
1359 	if (pending & atmel_port->tx_done_mask) {
1360 		atmel_uart_writel(port, ATMEL_US_IDR,
1361 				  atmel_port->tx_done_mask);
1362 
1363 		/* Start RX if flag was set and FIFO is empty */
1364 		if (atmel_port->hd_start_rx) {
1365 			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1366 					& ATMEL_US_TXEMPTY))
1367 				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1368 
1369 			atmel_port->hd_start_rx = false;
1370 			atmel_start_rx(port);
1371 		}
1372 
1373 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1374 	}
1375 }
1376 
1377 /*
1378  * status flags interrupt handler.
1379  */
1380 static void
1381 atmel_handle_status(struct uart_port *port, unsigned int pending,
1382 		    unsigned int status)
1383 {
1384 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1385 	unsigned int status_change;
1386 
1387 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1388 				| ATMEL_US_CTSIC)) {
1389 		status_change = status ^ atmel_port->irq_status_prev;
1390 		atmel_port->irq_status_prev = status;
1391 
1392 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1393 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1394 			/* TODO: All reads to CSR will clear these interrupts! */
1395 			if (status_change & ATMEL_US_RI)
1396 				port->icount.rng++;
1397 			if (status_change & ATMEL_US_DSR)
1398 				port->icount.dsr++;
1399 			if (status_change & ATMEL_US_DCD)
1400 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1401 			if (status_change & ATMEL_US_CTS)
1402 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1403 
1404 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1405 		}
1406 	}
1407 
1408 	if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
1409 		dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
1410 }
1411 
1412 /*
1413  * Interrupt handler
1414  */
1415 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1416 {
1417 	struct uart_port *port = dev_id;
1418 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1419 	unsigned int status, pending, mask, pass_counter = 0;
1420 
1421 	spin_lock(&atmel_port->lock_suspended);
1422 
1423 	do {
1424 		status = atmel_uart_readl(port, ATMEL_US_CSR);
1425 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1426 		pending = status & mask;
1427 		if (!pending)
1428 			break;
1429 
1430 		if (atmel_port->suspended) {
1431 			atmel_port->pending |= pending;
1432 			atmel_port->pending_status = status;
1433 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1434 			pm_system_wakeup();
1435 			break;
1436 		}
1437 
1438 		atmel_handle_receive(port, pending);
1439 		atmel_handle_status(port, pending, status);
1440 		atmel_handle_transmit(port, pending);
1441 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1442 
1443 	spin_unlock(&atmel_port->lock_suspended);
1444 
1445 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1446 }
1447 
1448 static void atmel_release_tx_pdc(struct uart_port *port)
1449 {
1450 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1451 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1452 
1453 	dma_unmap_single(port->dev,
1454 			 pdc->dma_addr,
1455 			 pdc->dma_size,
1456 			 DMA_TO_DEVICE);
1457 }
1458 
1459 /*
1460  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1461  */
1462 static void atmel_tx_pdc(struct uart_port *port)
1463 {
1464 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1465 	struct circ_buf *xmit = &port->state->xmit;
1466 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1467 	int count;
1468 
1469 	/* nothing left to transmit? */
1470 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1471 		return;
1472 
1473 	xmit->tail += pdc->ofs;
1474 	xmit->tail &= UART_XMIT_SIZE - 1;
1475 
1476 	port->icount.tx += pdc->ofs;
1477 	pdc->ofs = 0;
1478 
1479 	/* more to transmit - setup next transfer */
1480 
1481 	/* disable PDC transmit */
1482 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1483 
1484 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1485 		dma_sync_single_for_device(port->dev,
1486 					   pdc->dma_addr,
1487 					   pdc->dma_size,
1488 					   DMA_TO_DEVICE);
1489 
1490 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1491 		pdc->ofs = count;
1492 
1493 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1494 				  pdc->dma_addr + xmit->tail);
1495 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1496 		/* re-enable PDC transmit */
1497 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1498 		/* Enable interrupts */
1499 		atmel_uart_writel(port, ATMEL_US_IER,
1500 				  atmel_port->tx_done_mask);
1501 	} else {
1502 		if (atmel_uart_is_half_duplex(port)) {
1503 			/* DMA done, stop TX, start RX for RS485 */
1504 			atmel_start_rx(port);
1505 		}
1506 	}
1507 
1508 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1509 		uart_write_wakeup(port);
1510 }
1511 
1512 static int atmel_prepare_tx_pdc(struct uart_port *port)
1513 {
1514 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1515 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1516 	struct circ_buf *xmit = &port->state->xmit;
1517 
1518 	pdc->buf = xmit->buf;
1519 	pdc->dma_addr = dma_map_single(port->dev,
1520 					pdc->buf,
1521 					UART_XMIT_SIZE,
1522 					DMA_TO_DEVICE);
1523 	pdc->dma_size = UART_XMIT_SIZE;
1524 	pdc->ofs = 0;
1525 
1526 	return 0;
1527 }
1528 
1529 static void atmel_rx_from_ring(struct uart_port *port)
1530 {
1531 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1532 	struct circ_buf *ring = &atmel_port->rx_ring;
1533 	unsigned int flg;
1534 	unsigned int status;
1535 
1536 	while (ring->head != ring->tail) {
1537 		struct atmel_uart_char c;
1538 
1539 		/* Make sure c is loaded after head. */
1540 		smp_rmb();
1541 
1542 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1543 
1544 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1545 
1546 		port->icount.rx++;
1547 		status = c.status;
1548 		flg = TTY_NORMAL;
1549 
1550 		/*
1551 		 * note that the error handling code is
1552 		 * out of the main execution path
1553 		 */
1554 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1555 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1556 			if (status & ATMEL_US_RXBRK) {
1557 				/* ignore side-effect */
1558 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1559 
1560 				port->icount.brk++;
1561 				if (uart_handle_break(port))
1562 					continue;
1563 			}
1564 			if (status & ATMEL_US_PARE)
1565 				port->icount.parity++;
1566 			if (status & ATMEL_US_FRAME)
1567 				port->icount.frame++;
1568 			if (status & ATMEL_US_OVRE)
1569 				port->icount.overrun++;
1570 
1571 			status &= port->read_status_mask;
1572 
1573 			if (status & ATMEL_US_RXBRK)
1574 				flg = TTY_BREAK;
1575 			else if (status & ATMEL_US_PARE)
1576 				flg = TTY_PARITY;
1577 			else if (status & ATMEL_US_FRAME)
1578 				flg = TTY_FRAME;
1579 		}
1580 
1581 
1582 		if (uart_handle_sysrq_char(port, c.ch))
1583 			continue;
1584 
1585 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1586 	}
1587 
1588 	tty_flip_buffer_push(&port->state->port);
1589 }
1590 
1591 static void atmel_release_rx_pdc(struct uart_port *port)
1592 {
1593 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1594 	int i;
1595 
1596 	for (i = 0; i < 2; i++) {
1597 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1598 
1599 		dma_unmap_single(port->dev,
1600 				 pdc->dma_addr,
1601 				 pdc->dma_size,
1602 				 DMA_FROM_DEVICE);
1603 		kfree(pdc->buf);
1604 	}
1605 }
1606 
1607 static void atmel_rx_from_pdc(struct uart_port *port)
1608 {
1609 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1610 	struct tty_port *tport = &port->state->port;
1611 	struct atmel_dma_buffer *pdc;
1612 	int rx_idx = atmel_port->pdc_rx_idx;
1613 	unsigned int head;
1614 	unsigned int tail;
1615 	unsigned int count;
1616 
1617 	do {
1618 		/* Reset the UART timeout early so that we don't miss one */
1619 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1620 
1621 		pdc = &atmel_port->pdc_rx[rx_idx];
1622 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1623 		tail = pdc->ofs;
1624 
1625 		/* If the PDC has switched buffers, RPR won't contain
1626 		 * any address within the current buffer. Since head
1627 		 * is unsigned, we just need a one-way comparison to
1628 		 * find out.
1629 		 *
1630 		 * In this case, we just need to consume the entire
1631 		 * buffer and resubmit it for DMA. This will clear the
1632 		 * ENDRX bit as well, so that we can safely re-enable
1633 		 * all interrupts below.
1634 		 */
1635 		head = min(head, pdc->dma_size);
1636 
1637 		if (likely(head != tail)) {
1638 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1639 					pdc->dma_size, DMA_FROM_DEVICE);
1640 
1641 			/*
1642 			 * head will only wrap around when we recycle
1643 			 * the DMA buffer, and when that happens, we
1644 			 * explicitly set tail to 0. So head will
1645 			 * always be greater than tail.
1646 			 */
1647 			count = head - tail;
1648 
1649 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1650 						count);
1651 
1652 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1653 					pdc->dma_size, DMA_FROM_DEVICE);
1654 
1655 			port->icount.rx += count;
1656 			pdc->ofs = head;
1657 		}
1658 
1659 		/*
1660 		 * If the current buffer is full, we need to check if
1661 		 * the next one contains any additional data.
1662 		 */
1663 		if (head >= pdc->dma_size) {
1664 			pdc->ofs = 0;
1665 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1666 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1667 
1668 			rx_idx = !rx_idx;
1669 			atmel_port->pdc_rx_idx = rx_idx;
1670 		}
1671 	} while (head >= pdc->dma_size);
1672 
1673 	tty_flip_buffer_push(tport);
1674 
1675 	atmel_uart_writel(port, ATMEL_US_IER,
1676 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1677 }
1678 
1679 static int atmel_prepare_rx_pdc(struct uart_port *port)
1680 {
1681 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1682 	int i;
1683 
1684 	for (i = 0; i < 2; i++) {
1685 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1686 
1687 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1688 		if (pdc->buf == NULL) {
1689 			if (i != 0) {
1690 				dma_unmap_single(port->dev,
1691 					atmel_port->pdc_rx[0].dma_addr,
1692 					PDC_BUFFER_SIZE,
1693 					DMA_FROM_DEVICE);
1694 				kfree(atmel_port->pdc_rx[0].buf);
1695 			}
1696 			atmel_port->use_pdc_rx = false;
1697 			return -ENOMEM;
1698 		}
1699 		pdc->dma_addr = dma_map_single(port->dev,
1700 						pdc->buf,
1701 						PDC_BUFFER_SIZE,
1702 						DMA_FROM_DEVICE);
1703 		pdc->dma_size = PDC_BUFFER_SIZE;
1704 		pdc->ofs = 0;
1705 	}
1706 
1707 	atmel_port->pdc_rx_idx = 0;
1708 
1709 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1710 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1711 
1712 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1713 			  atmel_port->pdc_rx[1].dma_addr);
1714 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1715 
1716 	return 0;
1717 }
1718 
1719 /*
1720  * tasklet handling tty stuff outside the interrupt handler.
1721  */
1722 static void atmel_tasklet_rx_func(struct tasklet_struct *t)
1723 {
1724 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1725 							  tasklet_rx);
1726 	struct uart_port *port = &atmel_port->uart;
1727 
1728 	/* The interrupt handler does not take the lock */
1729 	spin_lock(&port->lock);
1730 	atmel_port->schedule_rx(port);
1731 	spin_unlock(&port->lock);
1732 }
1733 
1734 static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1735 {
1736 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1737 							  tasklet_tx);
1738 	struct uart_port *port = &atmel_port->uart;
1739 
1740 	/* The interrupt handler does not take the lock */
1741 	spin_lock(&port->lock);
1742 	atmel_port->schedule_tx(port);
1743 	spin_unlock(&port->lock);
1744 }
1745 
1746 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1747 				struct platform_device *pdev)
1748 {
1749 	struct device_node *np = pdev->dev.of_node;
1750 
1751 	/* DMA/PDC usage specification */
1752 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1753 		if (of_property_read_bool(np, "dmas")) {
1754 			atmel_port->use_dma_rx  = true;
1755 			atmel_port->use_pdc_rx  = false;
1756 		} else {
1757 			atmel_port->use_dma_rx  = false;
1758 			atmel_port->use_pdc_rx  = true;
1759 		}
1760 	} else {
1761 		atmel_port->use_dma_rx  = false;
1762 		atmel_port->use_pdc_rx  = false;
1763 	}
1764 
1765 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1766 		if (of_property_read_bool(np, "dmas")) {
1767 			atmel_port->use_dma_tx  = true;
1768 			atmel_port->use_pdc_tx  = false;
1769 		} else {
1770 			atmel_port->use_dma_tx  = false;
1771 			atmel_port->use_pdc_tx  = true;
1772 		}
1773 	} else {
1774 		atmel_port->use_dma_tx  = false;
1775 		atmel_port->use_pdc_tx  = false;
1776 	}
1777 }
1778 
1779 static void atmel_set_ops(struct uart_port *port)
1780 {
1781 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1782 
1783 	if (atmel_use_dma_rx(port)) {
1784 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1785 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1786 		atmel_port->release_rx = &atmel_release_rx_dma;
1787 	} else if (atmel_use_pdc_rx(port)) {
1788 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1789 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1790 		atmel_port->release_rx = &atmel_release_rx_pdc;
1791 	} else {
1792 		atmel_port->prepare_rx = NULL;
1793 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1794 		atmel_port->release_rx = NULL;
1795 	}
1796 
1797 	if (atmel_use_dma_tx(port)) {
1798 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1799 		atmel_port->schedule_tx = &atmel_tx_dma;
1800 		atmel_port->release_tx = &atmel_release_tx_dma;
1801 	} else if (atmel_use_pdc_tx(port)) {
1802 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1803 		atmel_port->schedule_tx = &atmel_tx_pdc;
1804 		atmel_port->release_tx = &atmel_release_tx_pdc;
1805 	} else {
1806 		atmel_port->prepare_tx = NULL;
1807 		atmel_port->schedule_tx = &atmel_tx_chars;
1808 		atmel_port->release_tx = NULL;
1809 	}
1810 }
1811 
1812 /*
1813  * Get ip name usart or uart
1814  */
1815 static void atmel_get_ip_name(struct uart_port *port)
1816 {
1817 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1818 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1819 	u32 version;
1820 	u32 usart, dbgu_uart, new_uart;
1821 	/* ASCII decoding for IP version */
1822 	usart = 0x55534152;	/* USAR(T) */
1823 	dbgu_uart = 0x44424755;	/* DBGU */
1824 	new_uart = 0x55415254;	/* UART */
1825 
1826 	/*
1827 	 * Only USART devices from at91sam9260 SOC implement fractional
1828 	 * baudrate. It is available for all asynchronous modes, with the
1829 	 * following restriction: the sampling clock's duty cycle is not
1830 	 * constant.
1831 	 */
1832 	atmel_port->has_frac_baudrate = false;
1833 	atmel_port->has_hw_timer = false;
1834 
1835 	if (name == new_uart) {
1836 		dev_dbg(port->dev, "Uart with hw timer");
1837 		atmel_port->has_hw_timer = true;
1838 		atmel_port->rtor = ATMEL_UA_RTOR;
1839 	} else if (name == usart) {
1840 		dev_dbg(port->dev, "Usart\n");
1841 		atmel_port->has_frac_baudrate = true;
1842 		atmel_port->has_hw_timer = true;
1843 		atmel_port->rtor = ATMEL_US_RTOR;
1844 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1845 		switch (version) {
1846 		case 0x814:	/* sama5d2 */
1847 			fallthrough;
1848 		case 0x701:	/* sama5d4 */
1849 			atmel_port->fidi_min = 3;
1850 			atmel_port->fidi_max = 65535;
1851 			break;
1852 		case 0x502:	/* sam9x5, sama5d3 */
1853 			atmel_port->fidi_min = 3;
1854 			atmel_port->fidi_max = 2047;
1855 			break;
1856 		default:
1857 			atmel_port->fidi_min = 1;
1858 			atmel_port->fidi_max = 2047;
1859 		}
1860 	} else if (name == dbgu_uart) {
1861 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1862 	} else {
1863 		/* fallback for older SoCs: use version field */
1864 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1865 		switch (version) {
1866 		case 0x302:
1867 		case 0x10213:
1868 		case 0x10302:
1869 			dev_dbg(port->dev, "This version is usart\n");
1870 			atmel_port->has_frac_baudrate = true;
1871 			atmel_port->has_hw_timer = true;
1872 			atmel_port->rtor = ATMEL_US_RTOR;
1873 			break;
1874 		case 0x203:
1875 		case 0x10202:
1876 			dev_dbg(port->dev, "This version is uart\n");
1877 			break;
1878 		default:
1879 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1880 		}
1881 	}
1882 }
1883 
1884 /*
1885  * Perform initialization and enable port for reception
1886  */
1887 static int atmel_startup(struct uart_port *port)
1888 {
1889 	struct platform_device *pdev = to_platform_device(port->dev);
1890 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1891 	int retval;
1892 
1893 	/*
1894 	 * Ensure that no interrupts are enabled otherwise when
1895 	 * request_irq() is called we could get stuck trying to
1896 	 * handle an unexpected interrupt
1897 	 */
1898 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1899 	atmel_port->ms_irq_enabled = false;
1900 
1901 	/*
1902 	 * Allocate the IRQ
1903 	 */
1904 	retval = request_irq(port->irq, atmel_interrupt,
1905 			     IRQF_SHARED | IRQF_COND_SUSPEND,
1906 			     dev_name(&pdev->dev), port);
1907 	if (retval) {
1908 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1909 		return retval;
1910 	}
1911 
1912 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1913 	tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
1914 	tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
1915 
1916 	/*
1917 	 * Initialize DMA (if necessary)
1918 	 */
1919 	atmel_init_property(atmel_port, pdev);
1920 	atmel_set_ops(port);
1921 
1922 	if (atmel_port->prepare_rx) {
1923 		retval = atmel_port->prepare_rx(port);
1924 		if (retval < 0)
1925 			atmel_set_ops(port);
1926 	}
1927 
1928 	if (atmel_port->prepare_tx) {
1929 		retval = atmel_port->prepare_tx(port);
1930 		if (retval < 0)
1931 			atmel_set_ops(port);
1932 	}
1933 
1934 	/*
1935 	 * Enable FIFO when available
1936 	 */
1937 	if (atmel_port->fifo_size) {
1938 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1939 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1940 		unsigned int fmr;
1941 
1942 		atmel_uart_writel(port, ATMEL_US_CR,
1943 				  ATMEL_US_FIFOEN |
1944 				  ATMEL_US_RXFCLR |
1945 				  ATMEL_US_TXFLCLR);
1946 
1947 		if (atmel_use_dma_tx(port))
1948 			txrdym = ATMEL_US_FOUR_DATA;
1949 
1950 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1951 		if (atmel_port->rts_high &&
1952 		    atmel_port->rts_low)
1953 			fmr |=	ATMEL_US_FRTSC |
1954 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1955 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1956 
1957 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1958 	}
1959 
1960 	/* Save current CSR for comparison in atmel_tasklet_func() */
1961 	atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
1962 
1963 	/*
1964 	 * Finally, enable the serial port
1965 	 */
1966 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1967 	/* enable xmit & rcvr */
1968 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1969 	atmel_port->tx_stopped = false;
1970 
1971 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1972 
1973 	if (atmel_use_pdc_rx(port)) {
1974 		/* set UART timeout */
1975 		if (!atmel_port->has_hw_timer) {
1976 			mod_timer(&atmel_port->uart_timer,
1977 					jiffies + uart_poll_timeout(port));
1978 		/* set USART timeout */
1979 		} else {
1980 			atmel_uart_writel(port, atmel_port->rtor,
1981 					  PDC_RX_TIMEOUT);
1982 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1983 
1984 			atmel_uart_writel(port, ATMEL_US_IER,
1985 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1986 		}
1987 		/* enable PDC controller */
1988 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1989 	} else if (atmel_use_dma_rx(port)) {
1990 		/* set UART timeout */
1991 		if (!atmel_port->has_hw_timer) {
1992 			mod_timer(&atmel_port->uart_timer,
1993 					jiffies + uart_poll_timeout(port));
1994 		/* set USART timeout */
1995 		} else {
1996 			atmel_uart_writel(port, atmel_port->rtor,
1997 					  PDC_RX_TIMEOUT);
1998 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1999 
2000 			atmel_uart_writel(port, ATMEL_US_IER,
2001 					  ATMEL_US_TIMEOUT);
2002 		}
2003 	} else {
2004 		/* enable receive only */
2005 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
2006 	}
2007 
2008 	return 0;
2009 }
2010 
2011 /*
2012  * Flush any TX data submitted for DMA. Called when the TX circular
2013  * buffer is reset.
2014  */
2015 static void atmel_flush_buffer(struct uart_port *port)
2016 {
2017 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2018 
2019 	if (atmel_use_pdc_tx(port)) {
2020 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
2021 		atmel_port->pdc_tx.ofs = 0;
2022 	}
2023 	/*
2024 	 * in uart_flush_buffer(), the xmit circular buffer has just
2025 	 * been cleared, so we have to reset tx_len accordingly.
2026 	 */
2027 	atmel_port->tx_len = 0;
2028 }
2029 
2030 /*
2031  * Disable the port
2032  */
2033 static void atmel_shutdown(struct uart_port *port)
2034 {
2035 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2036 
2037 	/* Disable modem control lines interrupts */
2038 	atmel_disable_ms(port);
2039 
2040 	/* Disable interrupts at device level */
2041 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2042 
2043 	/* Prevent spurious interrupts from scheduling the tasklet */
2044 	atomic_inc(&atmel_port->tasklet_shutdown);
2045 
2046 	/*
2047 	 * Prevent any tasklets being scheduled during
2048 	 * cleanup
2049 	 */
2050 	del_timer_sync(&atmel_port->uart_timer);
2051 
2052 	/* Make sure that no interrupt is on the fly */
2053 	synchronize_irq(port->irq);
2054 
2055 	/*
2056 	 * Clear out any scheduled tasklets before
2057 	 * we destroy the buffers
2058 	 */
2059 	tasklet_kill(&atmel_port->tasklet_rx);
2060 	tasklet_kill(&atmel_port->tasklet_tx);
2061 
2062 	/*
2063 	 * Ensure everything is stopped and
2064 	 * disable port and break condition.
2065 	 */
2066 	atmel_stop_rx(port);
2067 	atmel_stop_tx(port);
2068 
2069 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2070 
2071 	/*
2072 	 * Shut-down the DMA.
2073 	 */
2074 	if (atmel_port->release_rx)
2075 		atmel_port->release_rx(port);
2076 	if (atmel_port->release_tx)
2077 		atmel_port->release_tx(port);
2078 
2079 	/*
2080 	 * Reset ring buffer pointers
2081 	 */
2082 	atmel_port->rx_ring.head = 0;
2083 	atmel_port->rx_ring.tail = 0;
2084 
2085 	/*
2086 	 * Free the interrupts
2087 	 */
2088 	free_irq(port->irq, port);
2089 
2090 	atmel_flush_buffer(port);
2091 }
2092 
2093 /*
2094  * Power / Clock management.
2095  */
2096 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2097 			    unsigned int oldstate)
2098 {
2099 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2100 
2101 	switch (state) {
2102 	case UART_PM_STATE_ON:
2103 		/*
2104 		 * Enable the peripheral clock for this serial port.
2105 		 * This is called on uart_open() or a resume event.
2106 		 */
2107 		clk_prepare_enable(atmel_port->clk);
2108 
2109 		/* re-enable interrupts if we disabled some on suspend */
2110 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2111 		break;
2112 	case UART_PM_STATE_OFF:
2113 		/* Back up the interrupt mask and disable all interrupts */
2114 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2115 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2116 
2117 		/*
2118 		 * Disable the peripheral clock for this serial port.
2119 		 * This is called on uart_close() or a suspend event.
2120 		 */
2121 		clk_disable_unprepare(atmel_port->clk);
2122 		break;
2123 	default:
2124 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2125 	}
2126 }
2127 
2128 /*
2129  * Change the port parameters
2130  */
2131 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2132 			      struct ktermios *old)
2133 {
2134 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2135 	unsigned long flags;
2136 	unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2137 
2138 	/* save the current mode register */
2139 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2140 
2141 	/* reset the mode, clock divisor, parity, stop bits and data size */
2142 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2143 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2144 
2145 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2146 
2147 	/* byte size */
2148 	switch (termios->c_cflag & CSIZE) {
2149 	case CS5:
2150 		mode |= ATMEL_US_CHRL_5;
2151 		break;
2152 	case CS6:
2153 		mode |= ATMEL_US_CHRL_6;
2154 		break;
2155 	case CS7:
2156 		mode |= ATMEL_US_CHRL_7;
2157 		break;
2158 	default:
2159 		mode |= ATMEL_US_CHRL_8;
2160 		break;
2161 	}
2162 
2163 	/* stop bits */
2164 	if (termios->c_cflag & CSTOPB)
2165 		mode |= ATMEL_US_NBSTOP_2;
2166 
2167 	/* parity */
2168 	if (termios->c_cflag & PARENB) {
2169 		/* Mark or Space parity */
2170 		if (termios->c_cflag & CMSPAR) {
2171 			if (termios->c_cflag & PARODD)
2172 				mode |= ATMEL_US_PAR_MARK;
2173 			else
2174 				mode |= ATMEL_US_PAR_SPACE;
2175 		} else if (termios->c_cflag & PARODD)
2176 			mode |= ATMEL_US_PAR_ODD;
2177 		else
2178 			mode |= ATMEL_US_PAR_EVEN;
2179 	} else
2180 		mode |= ATMEL_US_PAR_NONE;
2181 
2182 	spin_lock_irqsave(&port->lock, flags);
2183 
2184 	port->read_status_mask = ATMEL_US_OVRE;
2185 	if (termios->c_iflag & INPCK)
2186 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2187 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2188 		port->read_status_mask |= ATMEL_US_RXBRK;
2189 
2190 	if (atmel_use_pdc_rx(port))
2191 		/* need to enable error interrupts */
2192 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2193 
2194 	/*
2195 	 * Characters to ignore
2196 	 */
2197 	port->ignore_status_mask = 0;
2198 	if (termios->c_iflag & IGNPAR)
2199 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2200 	if (termios->c_iflag & IGNBRK) {
2201 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2202 		/*
2203 		 * If we're ignoring parity and break indicators,
2204 		 * ignore overruns too (for real raw support).
2205 		 */
2206 		if (termios->c_iflag & IGNPAR)
2207 			port->ignore_status_mask |= ATMEL_US_OVRE;
2208 	}
2209 	/* TODO: Ignore all characters if CREAD is set.*/
2210 
2211 	/* update the per-port timeout */
2212 	uart_update_timeout(port, termios->c_cflag, baud);
2213 
2214 	/*
2215 	 * save/disable interrupts. The tty layer will ensure that the
2216 	 * transmitter is empty if requested by the caller, so there's
2217 	 * no need to wait for it here.
2218 	 */
2219 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2220 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2221 
2222 	/* disable receiver and transmitter */
2223 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2224 	atmel_port->tx_stopped = true;
2225 
2226 	/* mode */
2227 	if (port->rs485.flags & SER_RS485_ENABLED) {
2228 		atmel_uart_writel(port, ATMEL_US_TTGR,
2229 				  port->rs485.delay_rts_after_send);
2230 		mode |= ATMEL_US_USMODE_RS485;
2231 	} else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
2232 		atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
2233 		/* select mck clock, and output  */
2234 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
2235 		/* set max iterations */
2236 		mode |= ATMEL_US_MAX_ITER(3);
2237 		if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
2238 				== SER_ISO7816_T(0))
2239 			mode |= ATMEL_US_USMODE_ISO7816_T0;
2240 		else
2241 			mode |= ATMEL_US_USMODE_ISO7816_T1;
2242 	} else if (termios->c_cflag & CRTSCTS) {
2243 		/* RS232 with hardware handshake (RTS/CTS) */
2244 		if (atmel_use_fifo(port) &&
2245 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2246 			/*
2247 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2248 			 * be able to drive the RTS pin high/low when the RX
2249 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2250 			 * It will also disable the transmitter when the CTS
2251 			 * pin is high.
2252 			 * This mode is not activated if CTS pin is a GPIO
2253 			 * because in this case, the transmitter is always
2254 			 * disabled (there must be an internal pull-up
2255 			 * responsible for this behaviour).
2256 			 * If the RTS pin is a GPIO, the controller won't be
2257 			 * able to drive it according to the FIFO thresholds,
2258 			 * but it will be handled by the driver.
2259 			 */
2260 			mode |= ATMEL_US_USMODE_HWHS;
2261 		} else {
2262 			/*
2263 			 * For platforms without FIFO, the flow control is
2264 			 * handled by the driver.
2265 			 */
2266 			mode |= ATMEL_US_USMODE_NORMAL;
2267 		}
2268 	} else {
2269 		/* RS232 without hadware handshake */
2270 		mode |= ATMEL_US_USMODE_NORMAL;
2271 	}
2272 
2273 	/*
2274 	 * Set the baud rate:
2275 	 * Fractional baudrate allows to setup output frequency more
2276 	 * accurately. This feature is enabled only when using normal mode.
2277 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2278 	 * Currently, OVER is always set to 0 so we get
2279 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2280 	 * then
2281 	 * 8 CD + FP = selected clock / (2 * baudrate)
2282 	 */
2283 	if (atmel_port->has_frac_baudrate) {
2284 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2285 		cd = div >> 3;
2286 		fp = div & ATMEL_US_FP_MASK;
2287 	} else {
2288 		cd = uart_get_divisor(port, baud);
2289 	}
2290 
2291 	if (cd > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2292 		cd /= 8;
2293 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2294 	}
2295 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2296 
2297 	if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
2298 		atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2299 
2300 	/* set the mode, clock divisor, parity, stop bits and data size */
2301 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2302 
2303 	/*
2304 	 * when switching the mode, set the RTS line state according to the
2305 	 * new mode, otherwise keep the former state
2306 	 */
2307 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2308 		unsigned int rts_state;
2309 
2310 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2311 			/* let the hardware control the RTS line */
2312 			rts_state = ATMEL_US_RTSDIS;
2313 		} else {
2314 			/* force RTS line to low level */
2315 			rts_state = ATMEL_US_RTSEN;
2316 		}
2317 
2318 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2319 	}
2320 
2321 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2322 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2323 	atmel_port->tx_stopped = false;
2324 
2325 	/* restore interrupts */
2326 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2327 
2328 	/* CTS flow-control and modem-status interrupts */
2329 	if (UART_ENABLE_MS(port, termios->c_cflag))
2330 		atmel_enable_ms(port);
2331 	else
2332 		atmel_disable_ms(port);
2333 
2334 	spin_unlock_irqrestore(&port->lock, flags);
2335 }
2336 
2337 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2338 {
2339 	if (termios->c_line == N_PPS) {
2340 		port->flags |= UPF_HARDPPS_CD;
2341 		spin_lock_irq(&port->lock);
2342 		atmel_enable_ms(port);
2343 		spin_unlock_irq(&port->lock);
2344 	} else {
2345 		port->flags &= ~UPF_HARDPPS_CD;
2346 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2347 			spin_lock_irq(&port->lock);
2348 			atmel_disable_ms(port);
2349 			spin_unlock_irq(&port->lock);
2350 		}
2351 	}
2352 }
2353 
2354 /*
2355  * Return string describing the specified port
2356  */
2357 static const char *atmel_type(struct uart_port *port)
2358 {
2359 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2360 }
2361 
2362 /*
2363  * Release the memory region(s) being used by 'port'.
2364  */
2365 static void atmel_release_port(struct uart_port *port)
2366 {
2367 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2368 	int size = resource_size(mpdev->resource);
2369 
2370 	release_mem_region(port->mapbase, size);
2371 
2372 	if (port->flags & UPF_IOREMAP) {
2373 		iounmap(port->membase);
2374 		port->membase = NULL;
2375 	}
2376 }
2377 
2378 /*
2379  * Request the memory region(s) being used by 'port'.
2380  */
2381 static int atmel_request_port(struct uart_port *port)
2382 {
2383 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2384 	int size = resource_size(mpdev->resource);
2385 
2386 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2387 		return -EBUSY;
2388 
2389 	if (port->flags & UPF_IOREMAP) {
2390 		port->membase = ioremap(port->mapbase, size);
2391 		if (port->membase == NULL) {
2392 			release_mem_region(port->mapbase, size);
2393 			return -ENOMEM;
2394 		}
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 /*
2401  * Configure/autoconfigure the port.
2402  */
2403 static void atmel_config_port(struct uart_port *port, int flags)
2404 {
2405 	if (flags & UART_CONFIG_TYPE) {
2406 		port->type = PORT_ATMEL;
2407 		atmel_request_port(port);
2408 	}
2409 }
2410 
2411 /*
2412  * Verify the new serial_struct (for TIOCSSERIAL).
2413  */
2414 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2415 {
2416 	int ret = 0;
2417 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2418 		ret = -EINVAL;
2419 	if (port->irq != ser->irq)
2420 		ret = -EINVAL;
2421 	if (ser->io_type != SERIAL_IO_MEM)
2422 		ret = -EINVAL;
2423 	if (port->uartclk / 16 != ser->baud_base)
2424 		ret = -EINVAL;
2425 	if (port->mapbase != (unsigned long)ser->iomem_base)
2426 		ret = -EINVAL;
2427 	if (port->iobase != ser->port)
2428 		ret = -EINVAL;
2429 	if (ser->hub6 != 0)
2430 		ret = -EINVAL;
2431 	return ret;
2432 }
2433 
2434 #ifdef CONFIG_CONSOLE_POLL
2435 static int atmel_poll_get_char(struct uart_port *port)
2436 {
2437 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2438 		cpu_relax();
2439 
2440 	return atmel_uart_read_char(port);
2441 }
2442 
2443 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2444 {
2445 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2446 		cpu_relax();
2447 
2448 	atmel_uart_write_char(port, ch);
2449 }
2450 #endif
2451 
2452 static const struct uart_ops atmel_pops = {
2453 	.tx_empty	= atmel_tx_empty,
2454 	.set_mctrl	= atmel_set_mctrl,
2455 	.get_mctrl	= atmel_get_mctrl,
2456 	.stop_tx	= atmel_stop_tx,
2457 	.start_tx	= atmel_start_tx,
2458 	.stop_rx	= atmel_stop_rx,
2459 	.enable_ms	= atmel_enable_ms,
2460 	.break_ctl	= atmel_break_ctl,
2461 	.startup	= atmel_startup,
2462 	.shutdown	= atmel_shutdown,
2463 	.flush_buffer	= atmel_flush_buffer,
2464 	.set_termios	= atmel_set_termios,
2465 	.set_ldisc	= atmel_set_ldisc,
2466 	.type		= atmel_type,
2467 	.release_port	= atmel_release_port,
2468 	.request_port	= atmel_request_port,
2469 	.config_port	= atmel_config_port,
2470 	.verify_port	= atmel_verify_port,
2471 	.pm		= atmel_serial_pm,
2472 #ifdef CONFIG_CONSOLE_POLL
2473 	.poll_get_char	= atmel_poll_get_char,
2474 	.poll_put_char	= atmel_poll_put_char,
2475 #endif
2476 };
2477 
2478 /*
2479  * Configure the port from the platform device resource info.
2480  */
2481 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2482 				      struct platform_device *pdev)
2483 {
2484 	int ret;
2485 	struct uart_port *port = &atmel_port->uart;
2486 	struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2487 
2488 	atmel_init_property(atmel_port, pdev);
2489 	atmel_set_ops(port);
2490 
2491 	port->iotype		= UPIO_MEM;
2492 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2493 	port->ops		= &atmel_pops;
2494 	port->fifosize		= 1;
2495 	port->dev		= &pdev->dev;
2496 	port->mapbase		= mpdev->resource[0].start;
2497 	port->irq		= platform_get_irq(mpdev, 0);
2498 	port->rs485_config	= atmel_config_rs485;
2499 	port->iso7816_config	= atmel_config_iso7816;
2500 	port->membase		= NULL;
2501 
2502 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2503 
2504 	ret = uart_get_rs485_mode(port);
2505 	if (ret)
2506 		return ret;
2507 
2508 	/* for console, the clock could already be configured */
2509 	if (!atmel_port->clk) {
2510 		atmel_port->clk = clk_get(&mpdev->dev, "usart");
2511 		if (IS_ERR(atmel_port->clk)) {
2512 			ret = PTR_ERR(atmel_port->clk);
2513 			atmel_port->clk = NULL;
2514 			return ret;
2515 		}
2516 		ret = clk_prepare_enable(atmel_port->clk);
2517 		if (ret) {
2518 			clk_put(atmel_port->clk);
2519 			atmel_port->clk = NULL;
2520 			return ret;
2521 		}
2522 		port->uartclk = clk_get_rate(atmel_port->clk);
2523 		clk_disable_unprepare(atmel_port->clk);
2524 		/* only enable clock when USART is in use */
2525 	}
2526 
2527 	/*
2528 	 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
2529 	 * ENDTX|TXBUFE
2530 	 */
2531 	if (atmel_uart_is_half_duplex(port))
2532 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2533 	else if (atmel_use_pdc_tx(port)) {
2534 		port->fifosize = PDC_BUFFER_SIZE;
2535 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2536 	} else {
2537 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2544 static void atmel_console_putchar(struct uart_port *port, unsigned char ch)
2545 {
2546 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2547 		cpu_relax();
2548 	atmel_uart_write_char(port, ch);
2549 }
2550 
2551 /*
2552  * Interrupts are disabled on entering
2553  */
2554 static void atmel_console_write(struct console *co, const char *s, u_int count)
2555 {
2556 	struct uart_port *port = &atmel_ports[co->index].uart;
2557 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2558 	unsigned int status, imr;
2559 	unsigned int pdc_tx;
2560 
2561 	/*
2562 	 * First, save IMR and then disable interrupts
2563 	 */
2564 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2565 	atmel_uart_writel(port, ATMEL_US_IDR,
2566 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2567 
2568 	/* Store PDC transmit status and disable it */
2569 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2570 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2571 
2572 	/* Make sure that tx path is actually able to send characters */
2573 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2574 	atmel_port->tx_stopped = false;
2575 
2576 	uart_console_write(port, s, count, atmel_console_putchar);
2577 
2578 	/*
2579 	 * Finally, wait for transmitter to become empty
2580 	 * and restore IMR
2581 	 */
2582 	do {
2583 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2584 	} while (!(status & ATMEL_US_TXRDY));
2585 
2586 	/* Restore PDC transmit status */
2587 	if (pdc_tx)
2588 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2589 
2590 	/* set interrupts back the way they were */
2591 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2592 }
2593 
2594 /*
2595  * If the port was already initialised (eg, by a boot loader),
2596  * try to determine the current setup.
2597  */
2598 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2599 					     int *parity, int *bits)
2600 {
2601 	unsigned int mr, quot;
2602 
2603 	/*
2604 	 * If the baud rate generator isn't running, the port wasn't
2605 	 * initialized by the boot loader.
2606 	 */
2607 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2608 	if (!quot)
2609 		return;
2610 
2611 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2612 	if (mr == ATMEL_US_CHRL_8)
2613 		*bits = 8;
2614 	else
2615 		*bits = 7;
2616 
2617 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2618 	if (mr == ATMEL_US_PAR_EVEN)
2619 		*parity = 'e';
2620 	else if (mr == ATMEL_US_PAR_ODD)
2621 		*parity = 'o';
2622 
2623 	/*
2624 	 * The serial core only rounds down when matching this to a
2625 	 * supported baud rate. Make sure we don't end up slightly
2626 	 * lower than one of those, as it would make us fall through
2627 	 * to a much lower baud rate than we really want.
2628 	 */
2629 	*baud = port->uartclk / (16 * (quot - 1));
2630 }
2631 
2632 static int __init atmel_console_setup(struct console *co, char *options)
2633 {
2634 	int ret;
2635 	struct uart_port *port = &atmel_ports[co->index].uart;
2636 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2637 	int baud = 115200;
2638 	int bits = 8;
2639 	int parity = 'n';
2640 	int flow = 'n';
2641 
2642 	if (port->membase == NULL) {
2643 		/* Port not initialized yet - delay setup */
2644 		return -ENODEV;
2645 	}
2646 
2647 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2648 	if (ret)
2649 		return ret;
2650 
2651 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2652 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2653 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2654 	atmel_port->tx_stopped = false;
2655 
2656 	if (options)
2657 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2658 	else
2659 		atmel_console_get_options(port, &baud, &parity, &bits);
2660 
2661 	return uart_set_options(port, co, baud, parity, bits, flow);
2662 }
2663 
2664 static struct uart_driver atmel_uart;
2665 
2666 static struct console atmel_console = {
2667 	.name		= ATMEL_DEVICENAME,
2668 	.write		= atmel_console_write,
2669 	.device		= uart_console_device,
2670 	.setup		= atmel_console_setup,
2671 	.flags		= CON_PRINTBUFFER,
2672 	.index		= -1,
2673 	.data		= &atmel_uart,
2674 };
2675 
2676 static void atmel_serial_early_write(struct console *con, const char *s,
2677 				     unsigned int n)
2678 {
2679 	struct earlycon_device *dev = con->data;
2680 
2681 	uart_console_write(&dev->port, s, n, atmel_console_putchar);
2682 }
2683 
2684 static int __init atmel_early_console_setup(struct earlycon_device *device,
2685 					    const char *options)
2686 {
2687 	if (!device->port.membase)
2688 		return -ENODEV;
2689 
2690 	device->con->write = atmel_serial_early_write;
2691 
2692 	return 0;
2693 }
2694 
2695 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart",
2696 		    atmel_early_console_setup);
2697 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart",
2698 		    atmel_early_console_setup);
2699 
2700 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2701 
2702 #else
2703 #define ATMEL_CONSOLE_DEVICE	NULL
2704 #endif
2705 
2706 static struct uart_driver atmel_uart = {
2707 	.owner		= THIS_MODULE,
2708 	.driver_name	= "atmel_serial",
2709 	.dev_name	= ATMEL_DEVICENAME,
2710 	.major		= SERIAL_ATMEL_MAJOR,
2711 	.minor		= MINOR_START,
2712 	.nr		= ATMEL_MAX_UART,
2713 	.cons		= ATMEL_CONSOLE_DEVICE,
2714 };
2715 
2716 #ifdef CONFIG_PM
2717 static bool atmel_serial_clk_will_stop(void)
2718 {
2719 #ifdef CONFIG_ARCH_AT91
2720 	return at91_suspend_entering_slow_clock();
2721 #else
2722 	return false;
2723 #endif
2724 }
2725 
2726 static int atmel_serial_suspend(struct platform_device *pdev,
2727 				pm_message_t state)
2728 {
2729 	struct uart_port *port = platform_get_drvdata(pdev);
2730 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2731 
2732 	if (uart_console(port) && console_suspend_enabled) {
2733 		/* Drain the TX shifter */
2734 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2735 			 ATMEL_US_TXEMPTY))
2736 			cpu_relax();
2737 	}
2738 
2739 	if (uart_console(port) && !console_suspend_enabled) {
2740 		/* Cache register values as we won't get a full shutdown/startup
2741 		 * cycle
2742 		 */
2743 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2744 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2745 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2746 		atmel_port->cache.rtor = atmel_uart_readl(port,
2747 							  atmel_port->rtor);
2748 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2749 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2750 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2751 	}
2752 
2753 	/* we can not wake up if we're running on slow clock */
2754 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2755 	if (atmel_serial_clk_will_stop()) {
2756 		unsigned long flags;
2757 
2758 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2759 		atmel_port->suspended = true;
2760 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2761 		device_set_wakeup_enable(&pdev->dev, 0);
2762 	}
2763 
2764 	uart_suspend_port(&atmel_uart, port);
2765 
2766 	return 0;
2767 }
2768 
2769 static int atmel_serial_resume(struct platform_device *pdev)
2770 {
2771 	struct uart_port *port = platform_get_drvdata(pdev);
2772 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2773 	unsigned long flags;
2774 
2775 	if (uart_console(port) && !console_suspend_enabled) {
2776 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2777 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2778 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2779 		atmel_uart_writel(port, atmel_port->rtor,
2780 				  atmel_port->cache.rtor);
2781 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2782 
2783 		if (atmel_port->fifo_size) {
2784 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2785 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2786 			atmel_uart_writel(port, ATMEL_US_FMR,
2787 					  atmel_port->cache.fmr);
2788 			atmel_uart_writel(port, ATMEL_US_FIER,
2789 					  atmel_port->cache.fimr);
2790 		}
2791 		atmel_start_rx(port);
2792 	}
2793 
2794 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2795 	if (atmel_port->pending) {
2796 		atmel_handle_receive(port, atmel_port->pending);
2797 		atmel_handle_status(port, atmel_port->pending,
2798 				    atmel_port->pending_status);
2799 		atmel_handle_transmit(port, atmel_port->pending);
2800 		atmel_port->pending = 0;
2801 	}
2802 	atmel_port->suspended = false;
2803 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2804 
2805 	uart_resume_port(&atmel_uart, port);
2806 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2807 
2808 	return 0;
2809 }
2810 #else
2811 #define atmel_serial_suspend NULL
2812 #define atmel_serial_resume NULL
2813 #endif
2814 
2815 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2816 				     struct platform_device *pdev)
2817 {
2818 	atmel_port->fifo_size = 0;
2819 	atmel_port->rts_low = 0;
2820 	atmel_port->rts_high = 0;
2821 
2822 	if (of_property_read_u32(pdev->dev.of_node,
2823 				 "atmel,fifo-size",
2824 				 &atmel_port->fifo_size))
2825 		return;
2826 
2827 	if (!atmel_port->fifo_size)
2828 		return;
2829 
2830 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2831 		atmel_port->fifo_size = 0;
2832 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2833 		return;
2834 	}
2835 
2836 	/*
2837 	 * 0 <= rts_low <= rts_high <= fifo_size
2838 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2839 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2840 	 * actually stopping to send new data. So we try to set the RTS High
2841 	 * Threshold to a reasonably high value respecting this 16 data
2842 	 * empirical rule when possible.
2843 	 */
2844 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2845 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2846 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2847 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2848 
2849 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2850 		 atmel_port->fifo_size);
2851 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2852 		atmel_port->rts_high);
2853 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2854 		atmel_port->rts_low);
2855 }
2856 
2857 static int atmel_serial_probe(struct platform_device *pdev)
2858 {
2859 	struct atmel_uart_port *atmel_port;
2860 	struct device_node *np = pdev->dev.parent->of_node;
2861 	void *data;
2862 	int ret;
2863 	bool rs485_enabled;
2864 
2865 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2866 
2867 	/*
2868 	 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2869 	 * as compatible string. This driver is probed by at91-usart mfd driver
2870 	 * which is just a wrapper over the atmel_serial driver and
2871 	 * spi-at91-usart driver. All attributes needed by this driver are
2872 	 * found in of_node of parent.
2873 	 */
2874 	pdev->dev.of_node = np;
2875 
2876 	ret = of_alias_get_id(np, "serial");
2877 	if (ret < 0)
2878 		/* port id not found in platform data nor device-tree aliases:
2879 		 * auto-enumerate it */
2880 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2881 
2882 	if (ret >= ATMEL_MAX_UART) {
2883 		ret = -ENODEV;
2884 		goto err;
2885 	}
2886 
2887 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2888 		/* port already in use */
2889 		ret = -EBUSY;
2890 		goto err;
2891 	}
2892 
2893 	atmel_port = &atmel_ports[ret];
2894 	atmel_port->backup_imr = 0;
2895 	atmel_port->uart.line = ret;
2896 	atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
2897 	atmel_serial_probe_fifos(atmel_port, pdev);
2898 
2899 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2900 	spin_lock_init(&atmel_port->lock_suspended);
2901 
2902 	ret = atmel_init_port(atmel_port, pdev);
2903 	if (ret)
2904 		goto err_clear_bit;
2905 
2906 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2907 	if (IS_ERR(atmel_port->gpios)) {
2908 		ret = PTR_ERR(atmel_port->gpios);
2909 		goto err_clear_bit;
2910 	}
2911 
2912 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2913 		ret = -ENOMEM;
2914 		data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2915 				     sizeof(struct atmel_uart_char),
2916 				     GFP_KERNEL);
2917 		if (!data)
2918 			goto err_alloc_ring;
2919 		atmel_port->rx_ring.buf = data;
2920 	}
2921 
2922 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2923 
2924 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2925 	if (ret)
2926 		goto err_add_port;
2927 
2928 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2929 	if (uart_console(&atmel_port->uart)
2930 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2931 		/*
2932 		 * The serial core enabled the clock for us, so undo
2933 		 * the clk_prepare_enable() in atmel_console_setup()
2934 		 */
2935 		clk_disable_unprepare(atmel_port->clk);
2936 	}
2937 #endif
2938 
2939 	device_init_wakeup(&pdev->dev, 1);
2940 	platform_set_drvdata(pdev, atmel_port);
2941 
2942 	/*
2943 	 * The peripheral clock has been disabled by atmel_init_port():
2944 	 * enable it before accessing I/O registers
2945 	 */
2946 	clk_prepare_enable(atmel_port->clk);
2947 
2948 	if (rs485_enabled) {
2949 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2950 				  ATMEL_US_USMODE_NORMAL);
2951 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2952 				  ATMEL_US_RTSEN);
2953 	}
2954 
2955 	/*
2956 	 * Get port name of usart or uart
2957 	 */
2958 	atmel_get_ip_name(&atmel_port->uart);
2959 
2960 	/*
2961 	 * The peripheral clock can now safely be disabled till the port
2962 	 * is used
2963 	 */
2964 	clk_disable_unprepare(atmel_port->clk);
2965 
2966 	return 0;
2967 
2968 err_add_port:
2969 	kfree(atmel_port->rx_ring.buf);
2970 	atmel_port->rx_ring.buf = NULL;
2971 err_alloc_ring:
2972 	if (!uart_console(&atmel_port->uart)) {
2973 		clk_put(atmel_port->clk);
2974 		atmel_port->clk = NULL;
2975 	}
2976 err_clear_bit:
2977 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2978 err:
2979 	return ret;
2980 }
2981 
2982 /*
2983  * Even if the driver is not modular, it makes sense to be able to
2984  * unbind a device: there can be many bound devices, and there are
2985  * situations where dynamic binding and unbinding can be useful.
2986  *
2987  * For example, a connected device can require a specific firmware update
2988  * protocol that needs bitbanging on IO lines, but use the regular serial
2989  * port in the normal case.
2990  */
2991 static int atmel_serial_remove(struct platform_device *pdev)
2992 {
2993 	struct uart_port *port = platform_get_drvdata(pdev);
2994 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2995 	int ret = 0;
2996 
2997 	tasklet_kill(&atmel_port->tasklet_rx);
2998 	tasklet_kill(&atmel_port->tasklet_tx);
2999 
3000 	device_init_wakeup(&pdev->dev, 0);
3001 
3002 	ret = uart_remove_one_port(&atmel_uart, port);
3003 
3004 	kfree(atmel_port->rx_ring.buf);
3005 
3006 	/* "port" is allocated statically, so we shouldn't free it */
3007 
3008 	clear_bit(port->line, atmel_ports_in_use);
3009 
3010 	clk_put(atmel_port->clk);
3011 	atmel_port->clk = NULL;
3012 	pdev->dev.of_node = NULL;
3013 
3014 	return ret;
3015 }
3016 
3017 static struct platform_driver atmel_serial_driver = {
3018 	.probe		= atmel_serial_probe,
3019 	.remove		= atmel_serial_remove,
3020 	.suspend	= atmel_serial_suspend,
3021 	.resume		= atmel_serial_resume,
3022 	.driver		= {
3023 		.name			= "atmel_usart_serial",
3024 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
3025 	},
3026 };
3027 
3028 static int __init atmel_serial_init(void)
3029 {
3030 	int ret;
3031 
3032 	ret = uart_register_driver(&atmel_uart);
3033 	if (ret)
3034 		return ret;
3035 
3036 	ret = platform_driver_register(&atmel_serial_driver);
3037 	if (ret)
3038 		uart_unregister_driver(&atmel_uart);
3039 
3040 	return ret;
3041 }
3042 device_initcall(atmel_serial_init);
3043