1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/tty.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/serial.h>
16 #include <linux/clk.h>
17 #include <linux/console.h>
18 #include <linux/sysrq.h>
19 #include <linux/tty_flip.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/atmel_pdc.h>
27 #include <linux/uaccess.h>
28 #include <linux/platform_data/atmel.h>
29 #include <linux/timer.h>
30 #include <linux/gpio.h>
31 #include <linux/gpio/consumer.h>
32 #include <linux/err.h>
33 #include <linux/irq.h>
34 #include <linux/suspend.h>
35 #include <linux/mm.h>
36 
37 #include <asm/io.h>
38 #include <asm/ioctls.h>
39 
40 #define PDC_BUFFER_SIZE		512
41 /* Revisit: We should calculate this based on the actual port settings */
42 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
43 
44 /* The minium number of data FIFOs should be able to contain */
45 #define ATMEL_MIN_FIFO_SIZE	8
46 /*
47  * These two offsets are substracted from the RX FIFO size to define the RTS
48  * high and low thresholds
49  */
50 #define ATMEL_RTS_HIGH_OFFSET	16
51 #define ATMEL_RTS_LOW_OFFSET	20
52 
53 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
54 #define SUPPORT_SYSRQ
55 #endif
56 
57 #include <linux/serial_core.h>
58 
59 #include "serial_mctrl_gpio.h"
60 #include "atmel_serial.h"
61 
62 static void atmel_start_rx(struct uart_port *port);
63 static void atmel_stop_rx(struct uart_port *port);
64 
65 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
66 
67 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
68  * should coexist with the 8250 driver, such as if we have an external 16C550
69  * UART. */
70 #define SERIAL_ATMEL_MAJOR	204
71 #define MINOR_START		154
72 #define ATMEL_DEVICENAME	"ttyAT"
73 
74 #else
75 
76 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
77  * name, but it is legally reserved for the 8250 driver. */
78 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
79 #define MINOR_START		64
80 #define ATMEL_DEVICENAME	"ttyS"
81 
82 #endif
83 
84 #define ATMEL_ISR_PASS_LIMIT	256
85 
86 struct atmel_dma_buffer {
87 	unsigned char	*buf;
88 	dma_addr_t	dma_addr;
89 	unsigned int	dma_size;
90 	unsigned int	ofs;
91 };
92 
93 struct atmel_uart_char {
94 	u16		status;
95 	u16		ch;
96 };
97 
98 /*
99  * Be careful, the real size of the ring buffer is
100  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
101  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
102  * DMA mode.
103  */
104 #define ATMEL_SERIAL_RINGSIZE 1024
105 
106 /*
107  * at91: 6 USARTs and one DBGU port (SAM9260)
108  * samx7: 3 USARTs and 5 UARTs
109  */
110 #define ATMEL_MAX_UART		8
111 
112 /*
113  * We wrap our port structure around the generic uart_port.
114  */
115 struct atmel_uart_port {
116 	struct uart_port	uart;		/* uart */
117 	struct clk		*clk;		/* uart clock */
118 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
119 	u32			backup_imr;	/* IMR saved during suspend */
120 	int			break_active;	/* break being received */
121 
122 	bool			use_dma_rx;	/* enable DMA receiver */
123 	bool			use_pdc_rx;	/* enable PDC receiver */
124 	short			pdc_rx_idx;	/* current PDC RX buffer */
125 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
126 
127 	bool			use_dma_tx;     /* enable DMA transmitter */
128 	bool			use_pdc_tx;	/* enable PDC transmitter */
129 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
130 
131 	spinlock_t			lock_tx;	/* port lock */
132 	spinlock_t			lock_rx;	/* port lock */
133 	struct dma_chan			*chan_tx;
134 	struct dma_chan			*chan_rx;
135 	struct dma_async_tx_descriptor	*desc_tx;
136 	struct dma_async_tx_descriptor	*desc_rx;
137 	dma_cookie_t			cookie_tx;
138 	dma_cookie_t			cookie_rx;
139 	struct scatterlist		sg_tx;
140 	struct scatterlist		sg_rx;
141 	struct tasklet_struct	tasklet_rx;
142 	struct tasklet_struct	tasklet_tx;
143 	atomic_t		tasklet_shutdown;
144 	unsigned int		irq_status_prev;
145 	unsigned int		tx_len;
146 
147 	struct circ_buf		rx_ring;
148 
149 	struct mctrl_gpios	*gpios;
150 	unsigned int		tx_done_mask;
151 	u32			fifo_size;
152 	u32			rts_high;
153 	u32			rts_low;
154 	bool			ms_irq_enabled;
155 	u32			rtor;	/* address of receiver timeout register if it exists */
156 	bool			has_frac_baudrate;
157 	bool			has_hw_timer;
158 	struct timer_list	uart_timer;
159 
160 	bool			tx_stopped;
161 	bool			suspended;
162 	unsigned int		pending;
163 	unsigned int		pending_status;
164 	spinlock_t		lock_suspended;
165 
166 #ifdef CONFIG_PM
167 	struct {
168 		u32		cr;
169 		u32		mr;
170 		u32		imr;
171 		u32		brgr;
172 		u32		rtor;
173 		u32		ttgr;
174 		u32		fmr;
175 		u32		fimr;
176 	} cache;
177 #endif
178 
179 	int (*prepare_rx)(struct uart_port *port);
180 	int (*prepare_tx)(struct uart_port *port);
181 	void (*schedule_rx)(struct uart_port *port);
182 	void (*schedule_tx)(struct uart_port *port);
183 	void (*release_rx)(struct uart_port *port);
184 	void (*release_tx)(struct uart_port *port);
185 };
186 
187 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
188 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
189 
190 #ifdef SUPPORT_SYSRQ
191 static struct console atmel_console;
192 #endif
193 
194 #if defined(CONFIG_OF)
195 static const struct of_device_id atmel_serial_dt_ids[] = {
196 	{ .compatible = "atmel,at91rm9200-usart" },
197 	{ .compatible = "atmel,at91sam9260-usart" },
198 	{ /* sentinel */ }
199 };
200 #endif
201 
202 static inline struct atmel_uart_port *
203 to_atmel_uart_port(struct uart_port *uart)
204 {
205 	return container_of(uart, struct atmel_uart_port, uart);
206 }
207 
208 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
209 {
210 	return __raw_readl(port->membase + reg);
211 }
212 
213 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
214 {
215 	__raw_writel(value, port->membase + reg);
216 }
217 
218 static inline u8 atmel_uart_read_char(struct uart_port *port)
219 {
220 	return __raw_readb(port->membase + ATMEL_US_RHR);
221 }
222 
223 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
224 {
225 	__raw_writeb(value, port->membase + ATMEL_US_THR);
226 }
227 
228 #ifdef CONFIG_SERIAL_ATMEL_PDC
229 static bool atmel_use_pdc_rx(struct uart_port *port)
230 {
231 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
232 
233 	return atmel_port->use_pdc_rx;
234 }
235 
236 static bool atmel_use_pdc_tx(struct uart_port *port)
237 {
238 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
239 
240 	return atmel_port->use_pdc_tx;
241 }
242 #else
243 static bool atmel_use_pdc_rx(struct uart_port *port)
244 {
245 	return false;
246 }
247 
248 static bool atmel_use_pdc_tx(struct uart_port *port)
249 {
250 	return false;
251 }
252 #endif
253 
254 static bool atmel_use_dma_tx(struct uart_port *port)
255 {
256 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
257 
258 	return atmel_port->use_dma_tx;
259 }
260 
261 static bool atmel_use_dma_rx(struct uart_port *port)
262 {
263 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
264 
265 	return atmel_port->use_dma_rx;
266 }
267 
268 static bool atmel_use_fifo(struct uart_port *port)
269 {
270 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
271 
272 	return atmel_port->fifo_size;
273 }
274 
275 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
276 				   struct tasklet_struct *t)
277 {
278 	if (!atomic_read(&atmel_port->tasklet_shutdown))
279 		tasklet_schedule(t);
280 }
281 
282 static unsigned int atmel_get_lines_status(struct uart_port *port)
283 {
284 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
285 	unsigned int status, ret = 0;
286 
287 	status = atmel_uart_readl(port, ATMEL_US_CSR);
288 
289 	mctrl_gpio_get(atmel_port->gpios, &ret);
290 
291 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
292 						UART_GPIO_CTS))) {
293 		if (ret & TIOCM_CTS)
294 			status &= ~ATMEL_US_CTS;
295 		else
296 			status |= ATMEL_US_CTS;
297 	}
298 
299 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
300 						UART_GPIO_DSR))) {
301 		if (ret & TIOCM_DSR)
302 			status &= ~ATMEL_US_DSR;
303 		else
304 			status |= ATMEL_US_DSR;
305 	}
306 
307 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
308 						UART_GPIO_RI))) {
309 		if (ret & TIOCM_RI)
310 			status &= ~ATMEL_US_RI;
311 		else
312 			status |= ATMEL_US_RI;
313 	}
314 
315 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
316 						UART_GPIO_DCD))) {
317 		if (ret & TIOCM_CD)
318 			status &= ~ATMEL_US_DCD;
319 		else
320 			status |= ATMEL_US_DCD;
321 	}
322 
323 	return status;
324 }
325 
326 /* Enable or disable the rs485 support */
327 static int atmel_config_rs485(struct uart_port *port,
328 			      struct serial_rs485 *rs485conf)
329 {
330 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
331 	unsigned int mode;
332 
333 	/* Disable interrupts */
334 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
335 
336 	mode = atmel_uart_readl(port, ATMEL_US_MR);
337 
338 	/* Resetting serial mode to RS232 (0x0) */
339 	mode &= ~ATMEL_US_USMODE;
340 
341 	port->rs485 = *rs485conf;
342 
343 	if (rs485conf->flags & SER_RS485_ENABLED) {
344 		dev_dbg(port->dev, "Setting UART to RS485\n");
345 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
346 		atmel_uart_writel(port, ATMEL_US_TTGR,
347 				  rs485conf->delay_rts_after_send);
348 		mode |= ATMEL_US_USMODE_RS485;
349 	} else {
350 		dev_dbg(port->dev, "Setting UART to RS232\n");
351 		if (atmel_use_pdc_tx(port))
352 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
353 				ATMEL_US_TXBUFE;
354 		else
355 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
356 	}
357 	atmel_uart_writel(port, ATMEL_US_MR, mode);
358 
359 	/* Enable interrupts */
360 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
361 
362 	return 0;
363 }
364 
365 /*
366  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
367  */
368 static u_int atmel_tx_empty(struct uart_port *port)
369 {
370 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
371 
372 	if (atmel_port->tx_stopped)
373 		return TIOCSER_TEMT;
374 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
375 		TIOCSER_TEMT :
376 		0;
377 }
378 
379 /*
380  * Set state of the modem control output lines
381  */
382 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
383 {
384 	unsigned int control = 0;
385 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
386 	unsigned int rts_paused, rts_ready;
387 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
388 
389 	/* override mode to RS485 if needed, otherwise keep the current mode */
390 	if (port->rs485.flags & SER_RS485_ENABLED) {
391 		atmel_uart_writel(port, ATMEL_US_TTGR,
392 				  port->rs485.delay_rts_after_send);
393 		mode &= ~ATMEL_US_USMODE;
394 		mode |= ATMEL_US_USMODE_RS485;
395 	}
396 
397 	/* set the RTS line state according to the mode */
398 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
399 		/* force RTS line to high level */
400 		rts_paused = ATMEL_US_RTSEN;
401 
402 		/* give the control of the RTS line back to the hardware */
403 		rts_ready = ATMEL_US_RTSDIS;
404 	} else {
405 		/* force RTS line to high level */
406 		rts_paused = ATMEL_US_RTSDIS;
407 
408 		/* force RTS line to low level */
409 		rts_ready = ATMEL_US_RTSEN;
410 	}
411 
412 	if (mctrl & TIOCM_RTS)
413 		control |= rts_ready;
414 	else
415 		control |= rts_paused;
416 
417 	if (mctrl & TIOCM_DTR)
418 		control |= ATMEL_US_DTREN;
419 	else
420 		control |= ATMEL_US_DTRDIS;
421 
422 	atmel_uart_writel(port, ATMEL_US_CR, control);
423 
424 	mctrl_gpio_set(atmel_port->gpios, mctrl);
425 
426 	/* Local loopback mode? */
427 	mode &= ~ATMEL_US_CHMODE;
428 	if (mctrl & TIOCM_LOOP)
429 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
430 	else
431 		mode |= ATMEL_US_CHMODE_NORMAL;
432 
433 	atmel_uart_writel(port, ATMEL_US_MR, mode);
434 }
435 
436 /*
437  * Get state of the modem control input lines
438  */
439 static u_int atmel_get_mctrl(struct uart_port *port)
440 {
441 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
442 	unsigned int ret = 0, status;
443 
444 	status = atmel_uart_readl(port, ATMEL_US_CSR);
445 
446 	/*
447 	 * The control signals are active low.
448 	 */
449 	if (!(status & ATMEL_US_DCD))
450 		ret |= TIOCM_CD;
451 	if (!(status & ATMEL_US_CTS))
452 		ret |= TIOCM_CTS;
453 	if (!(status & ATMEL_US_DSR))
454 		ret |= TIOCM_DSR;
455 	if (!(status & ATMEL_US_RI))
456 		ret |= TIOCM_RI;
457 
458 	return mctrl_gpio_get(atmel_port->gpios, &ret);
459 }
460 
461 /*
462  * Stop transmitting.
463  */
464 static void atmel_stop_tx(struct uart_port *port)
465 {
466 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
467 
468 	if (atmel_use_pdc_tx(port)) {
469 		/* disable PDC transmit */
470 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
471 	}
472 
473 	/*
474 	 * Disable the transmitter.
475 	 * This is mandatory when DMA is used, otherwise the DMA buffer
476 	 * is fully transmitted.
477 	 */
478 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
479 	atmel_port->tx_stopped = true;
480 
481 	/* Disable interrupts */
482 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
483 
484 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
485 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
486 		atmel_start_rx(port);
487 }
488 
489 /*
490  * Start transmitting.
491  */
492 static void atmel_start_tx(struct uart_port *port)
493 {
494 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
495 
496 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
497 				       & ATMEL_PDC_TXTEN))
498 		/* The transmitter is already running.  Yes, we
499 		   really need this.*/
500 		return;
501 
502 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
503 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
504 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
505 			atmel_stop_rx(port);
506 
507 	if (atmel_use_pdc_tx(port))
508 		/* re-enable PDC transmit */
509 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
510 
511 	/* Enable interrupts */
512 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
513 
514 	/* re-enable the transmitter */
515 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
516 	atmel_port->tx_stopped = false;
517 }
518 
519 /*
520  * start receiving - port is in process of being opened.
521  */
522 static void atmel_start_rx(struct uart_port *port)
523 {
524 	/* reset status and receiver */
525 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
526 
527 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
528 
529 	if (atmel_use_pdc_rx(port)) {
530 		/* enable PDC controller */
531 		atmel_uart_writel(port, ATMEL_US_IER,
532 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
533 				  port->read_status_mask);
534 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
535 	} else {
536 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
537 	}
538 }
539 
540 /*
541  * Stop receiving - port is in process of being closed.
542  */
543 static void atmel_stop_rx(struct uart_port *port)
544 {
545 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
546 
547 	if (atmel_use_pdc_rx(port)) {
548 		/* disable PDC receive */
549 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
550 		atmel_uart_writel(port, ATMEL_US_IDR,
551 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
552 				  port->read_status_mask);
553 	} else {
554 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
555 	}
556 }
557 
558 /*
559  * Enable modem status interrupts
560  */
561 static void atmel_enable_ms(struct uart_port *port)
562 {
563 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
564 	uint32_t ier = 0;
565 
566 	/*
567 	 * Interrupt should not be enabled twice
568 	 */
569 	if (atmel_port->ms_irq_enabled)
570 		return;
571 
572 	atmel_port->ms_irq_enabled = true;
573 
574 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
575 		ier |= ATMEL_US_CTSIC;
576 
577 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
578 		ier |= ATMEL_US_DSRIC;
579 
580 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
581 		ier |= ATMEL_US_RIIC;
582 
583 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
584 		ier |= ATMEL_US_DCDIC;
585 
586 	atmel_uart_writel(port, ATMEL_US_IER, ier);
587 
588 	mctrl_gpio_enable_ms(atmel_port->gpios);
589 }
590 
591 /*
592  * Disable modem status interrupts
593  */
594 static void atmel_disable_ms(struct uart_port *port)
595 {
596 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
597 	uint32_t idr = 0;
598 
599 	/*
600 	 * Interrupt should not be disabled twice
601 	 */
602 	if (!atmel_port->ms_irq_enabled)
603 		return;
604 
605 	atmel_port->ms_irq_enabled = false;
606 
607 	mctrl_gpio_disable_ms(atmel_port->gpios);
608 
609 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
610 		idr |= ATMEL_US_CTSIC;
611 
612 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
613 		idr |= ATMEL_US_DSRIC;
614 
615 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
616 		idr |= ATMEL_US_RIIC;
617 
618 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
619 		idr |= ATMEL_US_DCDIC;
620 
621 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
622 }
623 
624 /*
625  * Control the transmission of a break signal
626  */
627 static void atmel_break_ctl(struct uart_port *port, int break_state)
628 {
629 	if (break_state != 0)
630 		/* start break */
631 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
632 	else
633 		/* stop break */
634 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
635 }
636 
637 /*
638  * Stores the incoming character in the ring buffer
639  */
640 static void
641 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
642 		     unsigned int ch)
643 {
644 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
645 	struct circ_buf *ring = &atmel_port->rx_ring;
646 	struct atmel_uart_char *c;
647 
648 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
649 		/* Buffer overflow, ignore char */
650 		return;
651 
652 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
653 	c->status	= status;
654 	c->ch		= ch;
655 
656 	/* Make sure the character is stored before we update head. */
657 	smp_wmb();
658 
659 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
660 }
661 
662 /*
663  * Deal with parity, framing and overrun errors.
664  */
665 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
666 {
667 	/* clear error */
668 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
669 
670 	if (status & ATMEL_US_RXBRK) {
671 		/* ignore side-effect */
672 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
673 		port->icount.brk++;
674 	}
675 	if (status & ATMEL_US_PARE)
676 		port->icount.parity++;
677 	if (status & ATMEL_US_FRAME)
678 		port->icount.frame++;
679 	if (status & ATMEL_US_OVRE)
680 		port->icount.overrun++;
681 }
682 
683 /*
684  * Characters received (called from interrupt handler)
685  */
686 static void atmel_rx_chars(struct uart_port *port)
687 {
688 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
689 	unsigned int status, ch;
690 
691 	status = atmel_uart_readl(port, ATMEL_US_CSR);
692 	while (status & ATMEL_US_RXRDY) {
693 		ch = atmel_uart_read_char(port);
694 
695 		/*
696 		 * note that the error handling code is
697 		 * out of the main execution path
698 		 */
699 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
700 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
701 			     || atmel_port->break_active)) {
702 
703 			/* clear error */
704 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
705 
706 			if (status & ATMEL_US_RXBRK
707 			    && !atmel_port->break_active) {
708 				atmel_port->break_active = 1;
709 				atmel_uart_writel(port, ATMEL_US_IER,
710 						  ATMEL_US_RXBRK);
711 			} else {
712 				/*
713 				 * This is either the end-of-break
714 				 * condition or we've received at
715 				 * least one character without RXBRK
716 				 * being set. In both cases, the next
717 				 * RXBRK will indicate start-of-break.
718 				 */
719 				atmel_uart_writel(port, ATMEL_US_IDR,
720 						  ATMEL_US_RXBRK);
721 				status &= ~ATMEL_US_RXBRK;
722 				atmel_port->break_active = 0;
723 			}
724 		}
725 
726 		atmel_buffer_rx_char(port, status, ch);
727 		status = atmel_uart_readl(port, ATMEL_US_CSR);
728 	}
729 
730 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
731 }
732 
733 /*
734  * Transmit characters (called from tasklet with TXRDY interrupt
735  * disabled)
736  */
737 static void atmel_tx_chars(struct uart_port *port)
738 {
739 	struct circ_buf *xmit = &port->state->xmit;
740 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
741 
742 	if (port->x_char &&
743 	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
744 		atmel_uart_write_char(port, port->x_char);
745 		port->icount.tx++;
746 		port->x_char = 0;
747 	}
748 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
749 		return;
750 
751 	while (atmel_uart_readl(port, ATMEL_US_CSR) &
752 	       atmel_port->tx_done_mask) {
753 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
754 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
755 		port->icount.tx++;
756 		if (uart_circ_empty(xmit))
757 			break;
758 	}
759 
760 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
761 		uart_write_wakeup(port);
762 
763 	if (!uart_circ_empty(xmit))
764 		/* Enable interrupts */
765 		atmel_uart_writel(port, ATMEL_US_IER,
766 				  atmel_port->tx_done_mask);
767 }
768 
769 static void atmel_complete_tx_dma(void *arg)
770 {
771 	struct atmel_uart_port *atmel_port = arg;
772 	struct uart_port *port = &atmel_port->uart;
773 	struct circ_buf *xmit = &port->state->xmit;
774 	struct dma_chan *chan = atmel_port->chan_tx;
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&port->lock, flags);
778 
779 	if (chan)
780 		dmaengine_terminate_all(chan);
781 	xmit->tail += atmel_port->tx_len;
782 	xmit->tail &= UART_XMIT_SIZE - 1;
783 
784 	port->icount.tx += atmel_port->tx_len;
785 
786 	spin_lock_irq(&atmel_port->lock_tx);
787 	async_tx_ack(atmel_port->desc_tx);
788 	atmel_port->cookie_tx = -EINVAL;
789 	atmel_port->desc_tx = NULL;
790 	spin_unlock_irq(&atmel_port->lock_tx);
791 
792 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
793 		uart_write_wakeup(port);
794 
795 	/*
796 	 * xmit is a circular buffer so, if we have just send data from
797 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
798 	 * remaining data from the beginning of xmit->buf to xmit->head.
799 	 */
800 	if (!uart_circ_empty(xmit))
801 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
802 	else if ((port->rs485.flags & SER_RS485_ENABLED) &&
803 		 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
804 		/* DMA done, stop TX, start RX for RS485 */
805 		atmel_start_rx(port);
806 	}
807 
808 	spin_unlock_irqrestore(&port->lock, flags);
809 }
810 
811 static void atmel_release_tx_dma(struct uart_port *port)
812 {
813 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
814 	struct dma_chan *chan = atmel_port->chan_tx;
815 
816 	if (chan) {
817 		dmaengine_terminate_all(chan);
818 		dma_release_channel(chan);
819 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
820 				DMA_TO_DEVICE);
821 	}
822 
823 	atmel_port->desc_tx = NULL;
824 	atmel_port->chan_tx = NULL;
825 	atmel_port->cookie_tx = -EINVAL;
826 }
827 
828 /*
829  * Called from tasklet with TXRDY interrupt is disabled.
830  */
831 static void atmel_tx_dma(struct uart_port *port)
832 {
833 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
834 	struct circ_buf *xmit = &port->state->xmit;
835 	struct dma_chan *chan = atmel_port->chan_tx;
836 	struct dma_async_tx_descriptor *desc;
837 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
838 	unsigned int tx_len, part1_len, part2_len, sg_len;
839 	dma_addr_t phys_addr;
840 
841 	/* Make sure we have an idle channel */
842 	if (atmel_port->desc_tx != NULL)
843 		return;
844 
845 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
846 		/*
847 		 * DMA is idle now.
848 		 * Port xmit buffer is already mapped,
849 		 * and it is one page... Just adjust
850 		 * offsets and lengths. Since it is a circular buffer,
851 		 * we have to transmit till the end, and then the rest.
852 		 * Take the port lock to get a
853 		 * consistent xmit buffer state.
854 		 */
855 		tx_len = CIRC_CNT_TO_END(xmit->head,
856 					 xmit->tail,
857 					 UART_XMIT_SIZE);
858 
859 		if (atmel_port->fifo_size) {
860 			/* multi data mode */
861 			part1_len = (tx_len & ~0x3); /* DWORD access */
862 			part2_len = (tx_len & 0x3); /* BYTE access */
863 		} else {
864 			/* single data (legacy) mode */
865 			part1_len = 0;
866 			part2_len = tx_len; /* BYTE access only */
867 		}
868 
869 		sg_init_table(sgl, 2);
870 		sg_len = 0;
871 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
872 		if (part1_len) {
873 			sg = &sgl[sg_len++];
874 			sg_dma_address(sg) = phys_addr;
875 			sg_dma_len(sg) = part1_len;
876 
877 			phys_addr += part1_len;
878 		}
879 
880 		if (part2_len) {
881 			sg = &sgl[sg_len++];
882 			sg_dma_address(sg) = phys_addr;
883 			sg_dma_len(sg) = part2_len;
884 		}
885 
886 		/*
887 		 * save tx_len so atmel_complete_tx_dma() will increase
888 		 * xmit->tail correctly
889 		 */
890 		atmel_port->tx_len = tx_len;
891 
892 		desc = dmaengine_prep_slave_sg(chan,
893 					       sgl,
894 					       sg_len,
895 					       DMA_MEM_TO_DEV,
896 					       DMA_PREP_INTERRUPT |
897 					       DMA_CTRL_ACK);
898 		if (!desc) {
899 			dev_err(port->dev, "Failed to send via dma!\n");
900 			return;
901 		}
902 
903 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
904 
905 		atmel_port->desc_tx = desc;
906 		desc->callback = atmel_complete_tx_dma;
907 		desc->callback_param = atmel_port;
908 		atmel_port->cookie_tx = dmaengine_submit(desc);
909 	}
910 
911 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
912 		uart_write_wakeup(port);
913 }
914 
915 static int atmel_prepare_tx_dma(struct uart_port *port)
916 {
917 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
918 	dma_cap_mask_t		mask;
919 	struct dma_slave_config config;
920 	int ret, nent;
921 
922 	dma_cap_zero(mask);
923 	dma_cap_set(DMA_SLAVE, mask);
924 
925 	atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
926 	if (atmel_port->chan_tx == NULL)
927 		goto chan_err;
928 	dev_info(port->dev, "using %s for tx DMA transfers\n",
929 		dma_chan_name(atmel_port->chan_tx));
930 
931 	spin_lock_init(&atmel_port->lock_tx);
932 	sg_init_table(&atmel_port->sg_tx, 1);
933 	/* UART circular tx buffer is an aligned page. */
934 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
935 	sg_set_page(&atmel_port->sg_tx,
936 			virt_to_page(port->state->xmit.buf),
937 			UART_XMIT_SIZE,
938 			offset_in_page(port->state->xmit.buf));
939 	nent = dma_map_sg(port->dev,
940 				&atmel_port->sg_tx,
941 				1,
942 				DMA_TO_DEVICE);
943 
944 	if (!nent) {
945 		dev_dbg(port->dev, "need to release resource of dma\n");
946 		goto chan_err;
947 	} else {
948 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
949 			sg_dma_len(&atmel_port->sg_tx),
950 			port->state->xmit.buf,
951 			&sg_dma_address(&atmel_port->sg_tx));
952 	}
953 
954 	/* Configure the slave DMA */
955 	memset(&config, 0, sizeof(config));
956 	config.direction = DMA_MEM_TO_DEV;
957 	config.dst_addr_width = (atmel_port->fifo_size) ?
958 				DMA_SLAVE_BUSWIDTH_4_BYTES :
959 				DMA_SLAVE_BUSWIDTH_1_BYTE;
960 	config.dst_addr = port->mapbase + ATMEL_US_THR;
961 	config.dst_maxburst = 1;
962 
963 	ret = dmaengine_slave_config(atmel_port->chan_tx,
964 				     &config);
965 	if (ret) {
966 		dev_err(port->dev, "DMA tx slave configuration failed\n");
967 		goto chan_err;
968 	}
969 
970 	return 0;
971 
972 chan_err:
973 	dev_err(port->dev, "TX channel not available, switch to pio\n");
974 	atmel_port->use_dma_tx = 0;
975 	if (atmel_port->chan_tx)
976 		atmel_release_tx_dma(port);
977 	return -EINVAL;
978 }
979 
980 static void atmel_complete_rx_dma(void *arg)
981 {
982 	struct uart_port *port = arg;
983 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
984 
985 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
986 }
987 
988 static void atmel_release_rx_dma(struct uart_port *port)
989 {
990 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
991 	struct dma_chan *chan = atmel_port->chan_rx;
992 
993 	if (chan) {
994 		dmaengine_terminate_all(chan);
995 		dma_release_channel(chan);
996 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
997 				DMA_FROM_DEVICE);
998 	}
999 
1000 	atmel_port->desc_rx = NULL;
1001 	atmel_port->chan_rx = NULL;
1002 	atmel_port->cookie_rx = -EINVAL;
1003 }
1004 
1005 static void atmel_rx_from_dma(struct uart_port *port)
1006 {
1007 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1008 	struct tty_port *tport = &port->state->port;
1009 	struct circ_buf *ring = &atmel_port->rx_ring;
1010 	struct dma_chan *chan = atmel_port->chan_rx;
1011 	struct dma_tx_state state;
1012 	enum dma_status dmastat;
1013 	size_t count;
1014 
1015 
1016 	/* Reset the UART timeout early so that we don't miss one */
1017 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1018 	dmastat = dmaengine_tx_status(chan,
1019 				atmel_port->cookie_rx,
1020 				&state);
1021 	/* Restart a new tasklet if DMA status is error */
1022 	if (dmastat == DMA_ERROR) {
1023 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1024 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1025 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1026 		return;
1027 	}
1028 
1029 	/* CPU claims ownership of RX DMA buffer */
1030 	dma_sync_sg_for_cpu(port->dev,
1031 			    &atmel_port->sg_rx,
1032 			    1,
1033 			    DMA_FROM_DEVICE);
1034 
1035 	/*
1036 	 * ring->head points to the end of data already written by the DMA.
1037 	 * ring->tail points to the beginning of data to be read by the
1038 	 * framework.
1039 	 * The current transfer size should not be larger than the dma buffer
1040 	 * length.
1041 	 */
1042 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1043 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1044 	/*
1045 	 * At this point ring->head may point to the first byte right after the
1046 	 * last byte of the dma buffer:
1047 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1048 	 *
1049 	 * However ring->tail must always points inside the dma buffer:
1050 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1051 	 *
1052 	 * Since we use a ring buffer, we have to handle the case
1053 	 * where head is lower than tail. In such a case, we first read from
1054 	 * tail to the end of the buffer then reset tail.
1055 	 */
1056 	if (ring->head < ring->tail) {
1057 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1058 
1059 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1060 		ring->tail = 0;
1061 		port->icount.rx += count;
1062 	}
1063 
1064 	/* Finally we read data from tail to head */
1065 	if (ring->tail < ring->head) {
1066 		count = ring->head - ring->tail;
1067 
1068 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1069 		/* Wrap ring->head if needed */
1070 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1071 			ring->head = 0;
1072 		ring->tail = ring->head;
1073 		port->icount.rx += count;
1074 	}
1075 
1076 	/* USART retreives ownership of RX DMA buffer */
1077 	dma_sync_sg_for_device(port->dev,
1078 			       &atmel_port->sg_rx,
1079 			       1,
1080 			       DMA_FROM_DEVICE);
1081 
1082 	/*
1083 	 * Drop the lock here since it might end up calling
1084 	 * uart_start(), which takes the lock.
1085 	 */
1086 	spin_unlock(&port->lock);
1087 	tty_flip_buffer_push(tport);
1088 	spin_lock(&port->lock);
1089 
1090 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1091 }
1092 
1093 static int atmel_prepare_rx_dma(struct uart_port *port)
1094 {
1095 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1096 	struct dma_async_tx_descriptor *desc;
1097 	dma_cap_mask_t		mask;
1098 	struct dma_slave_config config;
1099 	struct circ_buf		*ring;
1100 	int ret, nent;
1101 
1102 	ring = &atmel_port->rx_ring;
1103 
1104 	dma_cap_zero(mask);
1105 	dma_cap_set(DMA_CYCLIC, mask);
1106 
1107 	atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1108 	if (atmel_port->chan_rx == NULL)
1109 		goto chan_err;
1110 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1111 		dma_chan_name(atmel_port->chan_rx));
1112 
1113 	spin_lock_init(&atmel_port->lock_rx);
1114 	sg_init_table(&atmel_port->sg_rx, 1);
1115 	/* UART circular rx buffer is an aligned page. */
1116 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1117 	sg_set_page(&atmel_port->sg_rx,
1118 		    virt_to_page(ring->buf),
1119 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1120 		    offset_in_page(ring->buf));
1121 	nent = dma_map_sg(port->dev,
1122 			  &atmel_port->sg_rx,
1123 			  1,
1124 			  DMA_FROM_DEVICE);
1125 
1126 	if (!nent) {
1127 		dev_dbg(port->dev, "need to release resource of dma\n");
1128 		goto chan_err;
1129 	} else {
1130 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1131 			sg_dma_len(&atmel_port->sg_rx),
1132 			ring->buf,
1133 			&sg_dma_address(&atmel_port->sg_rx));
1134 	}
1135 
1136 	/* Configure the slave DMA */
1137 	memset(&config, 0, sizeof(config));
1138 	config.direction = DMA_DEV_TO_MEM;
1139 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1140 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1141 	config.src_maxburst = 1;
1142 
1143 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1144 				     &config);
1145 	if (ret) {
1146 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1147 		goto chan_err;
1148 	}
1149 	/*
1150 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1151 	 * each one is half ring buffer size
1152 	 */
1153 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1154 					 sg_dma_address(&atmel_port->sg_rx),
1155 					 sg_dma_len(&atmel_port->sg_rx),
1156 					 sg_dma_len(&atmel_port->sg_rx)/2,
1157 					 DMA_DEV_TO_MEM,
1158 					 DMA_PREP_INTERRUPT);
1159 	desc->callback = atmel_complete_rx_dma;
1160 	desc->callback_param = port;
1161 	atmel_port->desc_rx = desc;
1162 	atmel_port->cookie_rx = dmaengine_submit(desc);
1163 
1164 	return 0;
1165 
1166 chan_err:
1167 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1168 	atmel_port->use_dma_rx = 0;
1169 	if (atmel_port->chan_rx)
1170 		atmel_release_rx_dma(port);
1171 	return -EINVAL;
1172 }
1173 
1174 static void atmel_uart_timer_callback(struct timer_list *t)
1175 {
1176 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1177 							uart_timer);
1178 	struct uart_port *port = &atmel_port->uart;
1179 
1180 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1181 		tasklet_schedule(&atmel_port->tasklet_rx);
1182 		mod_timer(&atmel_port->uart_timer,
1183 			  jiffies + uart_poll_timeout(port));
1184 	}
1185 }
1186 
1187 /*
1188  * receive interrupt handler.
1189  */
1190 static void
1191 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1192 {
1193 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1194 
1195 	if (atmel_use_pdc_rx(port)) {
1196 		/*
1197 		 * PDC receive. Just schedule the tasklet and let it
1198 		 * figure out the details.
1199 		 *
1200 		 * TODO: We're not handling error flags correctly at
1201 		 * the moment.
1202 		 */
1203 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1204 			atmel_uart_writel(port, ATMEL_US_IDR,
1205 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1206 			atmel_tasklet_schedule(atmel_port,
1207 					       &atmel_port->tasklet_rx);
1208 		}
1209 
1210 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1211 				ATMEL_US_FRAME | ATMEL_US_PARE))
1212 			atmel_pdc_rxerr(port, pending);
1213 	}
1214 
1215 	if (atmel_use_dma_rx(port)) {
1216 		if (pending & ATMEL_US_TIMEOUT) {
1217 			atmel_uart_writel(port, ATMEL_US_IDR,
1218 					  ATMEL_US_TIMEOUT);
1219 			atmel_tasklet_schedule(atmel_port,
1220 					       &atmel_port->tasklet_rx);
1221 		}
1222 	}
1223 
1224 	/* Interrupt receive */
1225 	if (pending & ATMEL_US_RXRDY)
1226 		atmel_rx_chars(port);
1227 	else if (pending & ATMEL_US_RXBRK) {
1228 		/*
1229 		 * End of break detected. If it came along with a
1230 		 * character, atmel_rx_chars will handle it.
1231 		 */
1232 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1233 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1234 		atmel_port->break_active = 0;
1235 	}
1236 }
1237 
1238 /*
1239  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1240  */
1241 static void
1242 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1243 {
1244 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1245 
1246 	if (pending & atmel_port->tx_done_mask) {
1247 		/* Either PDC or interrupt transmission */
1248 		atmel_uart_writel(port, ATMEL_US_IDR,
1249 				  atmel_port->tx_done_mask);
1250 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1251 	}
1252 }
1253 
1254 /*
1255  * status flags interrupt handler.
1256  */
1257 static void
1258 atmel_handle_status(struct uart_port *port, unsigned int pending,
1259 		    unsigned int status)
1260 {
1261 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1262 	unsigned int status_change;
1263 
1264 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1265 				| ATMEL_US_CTSIC)) {
1266 		status_change = status ^ atmel_port->irq_status_prev;
1267 		atmel_port->irq_status_prev = status;
1268 
1269 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1270 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1271 			/* TODO: All reads to CSR will clear these interrupts! */
1272 			if (status_change & ATMEL_US_RI)
1273 				port->icount.rng++;
1274 			if (status_change & ATMEL_US_DSR)
1275 				port->icount.dsr++;
1276 			if (status_change & ATMEL_US_DCD)
1277 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1278 			if (status_change & ATMEL_US_CTS)
1279 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1280 
1281 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1282 		}
1283 	}
1284 }
1285 
1286 /*
1287  * Interrupt handler
1288  */
1289 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1290 {
1291 	struct uart_port *port = dev_id;
1292 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1293 	unsigned int status, pending, mask, pass_counter = 0;
1294 
1295 	spin_lock(&atmel_port->lock_suspended);
1296 
1297 	do {
1298 		status = atmel_get_lines_status(port);
1299 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1300 		pending = status & mask;
1301 		if (!pending)
1302 			break;
1303 
1304 		if (atmel_port->suspended) {
1305 			atmel_port->pending |= pending;
1306 			atmel_port->pending_status = status;
1307 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1308 			pm_system_wakeup();
1309 			break;
1310 		}
1311 
1312 		atmel_handle_receive(port, pending);
1313 		atmel_handle_status(port, pending, status);
1314 		atmel_handle_transmit(port, pending);
1315 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1316 
1317 	spin_unlock(&atmel_port->lock_suspended);
1318 
1319 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1320 }
1321 
1322 static void atmel_release_tx_pdc(struct uart_port *port)
1323 {
1324 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1325 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1326 
1327 	dma_unmap_single(port->dev,
1328 			 pdc->dma_addr,
1329 			 pdc->dma_size,
1330 			 DMA_TO_DEVICE);
1331 }
1332 
1333 /*
1334  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1335  */
1336 static void atmel_tx_pdc(struct uart_port *port)
1337 {
1338 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1339 	struct circ_buf *xmit = &port->state->xmit;
1340 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1341 	int count;
1342 
1343 	/* nothing left to transmit? */
1344 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1345 		return;
1346 
1347 	xmit->tail += pdc->ofs;
1348 	xmit->tail &= UART_XMIT_SIZE - 1;
1349 
1350 	port->icount.tx += pdc->ofs;
1351 	pdc->ofs = 0;
1352 
1353 	/* more to transmit - setup next transfer */
1354 
1355 	/* disable PDC transmit */
1356 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1357 
1358 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1359 		dma_sync_single_for_device(port->dev,
1360 					   pdc->dma_addr,
1361 					   pdc->dma_size,
1362 					   DMA_TO_DEVICE);
1363 
1364 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1365 		pdc->ofs = count;
1366 
1367 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1368 				  pdc->dma_addr + xmit->tail);
1369 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1370 		/* re-enable PDC transmit */
1371 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1372 		/* Enable interrupts */
1373 		atmel_uart_writel(port, ATMEL_US_IER,
1374 				  atmel_port->tx_done_mask);
1375 	} else {
1376 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
1377 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1378 			/* DMA done, stop TX, start RX for RS485 */
1379 			atmel_start_rx(port);
1380 		}
1381 	}
1382 
1383 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1384 		uart_write_wakeup(port);
1385 }
1386 
1387 static int atmel_prepare_tx_pdc(struct uart_port *port)
1388 {
1389 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1390 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1391 	struct circ_buf *xmit = &port->state->xmit;
1392 
1393 	pdc->buf = xmit->buf;
1394 	pdc->dma_addr = dma_map_single(port->dev,
1395 					pdc->buf,
1396 					UART_XMIT_SIZE,
1397 					DMA_TO_DEVICE);
1398 	pdc->dma_size = UART_XMIT_SIZE;
1399 	pdc->ofs = 0;
1400 
1401 	return 0;
1402 }
1403 
1404 static void atmel_rx_from_ring(struct uart_port *port)
1405 {
1406 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1407 	struct circ_buf *ring = &atmel_port->rx_ring;
1408 	unsigned int flg;
1409 	unsigned int status;
1410 
1411 	while (ring->head != ring->tail) {
1412 		struct atmel_uart_char c;
1413 
1414 		/* Make sure c is loaded after head. */
1415 		smp_rmb();
1416 
1417 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1418 
1419 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1420 
1421 		port->icount.rx++;
1422 		status = c.status;
1423 		flg = TTY_NORMAL;
1424 
1425 		/*
1426 		 * note that the error handling code is
1427 		 * out of the main execution path
1428 		 */
1429 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1430 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1431 			if (status & ATMEL_US_RXBRK) {
1432 				/* ignore side-effect */
1433 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1434 
1435 				port->icount.brk++;
1436 				if (uart_handle_break(port))
1437 					continue;
1438 			}
1439 			if (status & ATMEL_US_PARE)
1440 				port->icount.parity++;
1441 			if (status & ATMEL_US_FRAME)
1442 				port->icount.frame++;
1443 			if (status & ATMEL_US_OVRE)
1444 				port->icount.overrun++;
1445 
1446 			status &= port->read_status_mask;
1447 
1448 			if (status & ATMEL_US_RXBRK)
1449 				flg = TTY_BREAK;
1450 			else if (status & ATMEL_US_PARE)
1451 				flg = TTY_PARITY;
1452 			else if (status & ATMEL_US_FRAME)
1453 				flg = TTY_FRAME;
1454 		}
1455 
1456 
1457 		if (uart_handle_sysrq_char(port, c.ch))
1458 			continue;
1459 
1460 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1461 	}
1462 
1463 	/*
1464 	 * Drop the lock here since it might end up calling
1465 	 * uart_start(), which takes the lock.
1466 	 */
1467 	spin_unlock(&port->lock);
1468 	tty_flip_buffer_push(&port->state->port);
1469 	spin_lock(&port->lock);
1470 }
1471 
1472 static void atmel_release_rx_pdc(struct uart_port *port)
1473 {
1474 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1475 	int i;
1476 
1477 	for (i = 0; i < 2; i++) {
1478 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1479 
1480 		dma_unmap_single(port->dev,
1481 				 pdc->dma_addr,
1482 				 pdc->dma_size,
1483 				 DMA_FROM_DEVICE);
1484 		kfree(pdc->buf);
1485 	}
1486 }
1487 
1488 static void atmel_rx_from_pdc(struct uart_port *port)
1489 {
1490 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1491 	struct tty_port *tport = &port->state->port;
1492 	struct atmel_dma_buffer *pdc;
1493 	int rx_idx = atmel_port->pdc_rx_idx;
1494 	unsigned int head;
1495 	unsigned int tail;
1496 	unsigned int count;
1497 
1498 	do {
1499 		/* Reset the UART timeout early so that we don't miss one */
1500 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1501 
1502 		pdc = &atmel_port->pdc_rx[rx_idx];
1503 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1504 		tail = pdc->ofs;
1505 
1506 		/* If the PDC has switched buffers, RPR won't contain
1507 		 * any address within the current buffer. Since head
1508 		 * is unsigned, we just need a one-way comparison to
1509 		 * find out.
1510 		 *
1511 		 * In this case, we just need to consume the entire
1512 		 * buffer and resubmit it for DMA. This will clear the
1513 		 * ENDRX bit as well, so that we can safely re-enable
1514 		 * all interrupts below.
1515 		 */
1516 		head = min(head, pdc->dma_size);
1517 
1518 		if (likely(head != tail)) {
1519 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1520 					pdc->dma_size, DMA_FROM_DEVICE);
1521 
1522 			/*
1523 			 * head will only wrap around when we recycle
1524 			 * the DMA buffer, and when that happens, we
1525 			 * explicitly set tail to 0. So head will
1526 			 * always be greater than tail.
1527 			 */
1528 			count = head - tail;
1529 
1530 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1531 						count);
1532 
1533 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1534 					pdc->dma_size, DMA_FROM_DEVICE);
1535 
1536 			port->icount.rx += count;
1537 			pdc->ofs = head;
1538 		}
1539 
1540 		/*
1541 		 * If the current buffer is full, we need to check if
1542 		 * the next one contains any additional data.
1543 		 */
1544 		if (head >= pdc->dma_size) {
1545 			pdc->ofs = 0;
1546 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1547 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1548 
1549 			rx_idx = !rx_idx;
1550 			atmel_port->pdc_rx_idx = rx_idx;
1551 		}
1552 	} while (head >= pdc->dma_size);
1553 
1554 	/*
1555 	 * Drop the lock here since it might end up calling
1556 	 * uart_start(), which takes the lock.
1557 	 */
1558 	spin_unlock(&port->lock);
1559 	tty_flip_buffer_push(tport);
1560 	spin_lock(&port->lock);
1561 
1562 	atmel_uart_writel(port, ATMEL_US_IER,
1563 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1564 }
1565 
1566 static int atmel_prepare_rx_pdc(struct uart_port *port)
1567 {
1568 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1569 	int i;
1570 
1571 	for (i = 0; i < 2; i++) {
1572 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1573 
1574 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1575 		if (pdc->buf == NULL) {
1576 			if (i != 0) {
1577 				dma_unmap_single(port->dev,
1578 					atmel_port->pdc_rx[0].dma_addr,
1579 					PDC_BUFFER_SIZE,
1580 					DMA_FROM_DEVICE);
1581 				kfree(atmel_port->pdc_rx[0].buf);
1582 			}
1583 			atmel_port->use_pdc_rx = 0;
1584 			return -ENOMEM;
1585 		}
1586 		pdc->dma_addr = dma_map_single(port->dev,
1587 						pdc->buf,
1588 						PDC_BUFFER_SIZE,
1589 						DMA_FROM_DEVICE);
1590 		pdc->dma_size = PDC_BUFFER_SIZE;
1591 		pdc->ofs = 0;
1592 	}
1593 
1594 	atmel_port->pdc_rx_idx = 0;
1595 
1596 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1597 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1598 
1599 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1600 			  atmel_port->pdc_rx[1].dma_addr);
1601 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1602 
1603 	return 0;
1604 }
1605 
1606 /*
1607  * tasklet handling tty stuff outside the interrupt handler.
1608  */
1609 static void atmel_tasklet_rx_func(unsigned long data)
1610 {
1611 	struct uart_port *port = (struct uart_port *)data;
1612 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1613 
1614 	/* The interrupt handler does not take the lock */
1615 	spin_lock(&port->lock);
1616 	atmel_port->schedule_rx(port);
1617 	spin_unlock(&port->lock);
1618 }
1619 
1620 static void atmel_tasklet_tx_func(unsigned long data)
1621 {
1622 	struct uart_port *port = (struct uart_port *)data;
1623 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1624 
1625 	/* The interrupt handler does not take the lock */
1626 	spin_lock(&port->lock);
1627 	atmel_port->schedule_tx(port);
1628 	spin_unlock(&port->lock);
1629 }
1630 
1631 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1632 				struct platform_device *pdev)
1633 {
1634 	struct device_node *np = pdev->dev.of_node;
1635 
1636 	/* DMA/PDC usage specification */
1637 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1638 		if (of_property_read_bool(np, "dmas")) {
1639 			atmel_port->use_dma_rx  = true;
1640 			atmel_port->use_pdc_rx  = false;
1641 		} else {
1642 			atmel_port->use_dma_rx  = false;
1643 			atmel_port->use_pdc_rx  = true;
1644 		}
1645 	} else {
1646 		atmel_port->use_dma_rx  = false;
1647 		atmel_port->use_pdc_rx  = false;
1648 	}
1649 
1650 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1651 		if (of_property_read_bool(np, "dmas")) {
1652 			atmel_port->use_dma_tx  = true;
1653 			atmel_port->use_pdc_tx  = false;
1654 		} else {
1655 			atmel_port->use_dma_tx  = false;
1656 			atmel_port->use_pdc_tx  = true;
1657 		}
1658 	} else {
1659 		atmel_port->use_dma_tx  = false;
1660 		atmel_port->use_pdc_tx  = false;
1661 	}
1662 }
1663 
1664 static void atmel_set_ops(struct uart_port *port)
1665 {
1666 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1667 
1668 	if (atmel_use_dma_rx(port)) {
1669 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1670 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1671 		atmel_port->release_rx = &atmel_release_rx_dma;
1672 	} else if (atmel_use_pdc_rx(port)) {
1673 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1674 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1675 		atmel_port->release_rx = &atmel_release_rx_pdc;
1676 	} else {
1677 		atmel_port->prepare_rx = NULL;
1678 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1679 		atmel_port->release_rx = NULL;
1680 	}
1681 
1682 	if (atmel_use_dma_tx(port)) {
1683 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1684 		atmel_port->schedule_tx = &atmel_tx_dma;
1685 		atmel_port->release_tx = &atmel_release_tx_dma;
1686 	} else if (atmel_use_pdc_tx(port)) {
1687 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1688 		atmel_port->schedule_tx = &atmel_tx_pdc;
1689 		atmel_port->release_tx = &atmel_release_tx_pdc;
1690 	} else {
1691 		atmel_port->prepare_tx = NULL;
1692 		atmel_port->schedule_tx = &atmel_tx_chars;
1693 		atmel_port->release_tx = NULL;
1694 	}
1695 }
1696 
1697 /*
1698  * Get ip name usart or uart
1699  */
1700 static void atmel_get_ip_name(struct uart_port *port)
1701 {
1702 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1703 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1704 	u32 version;
1705 	u32 usart, dbgu_uart, new_uart;
1706 	/* ASCII decoding for IP version */
1707 	usart = 0x55534152;	/* USAR(T) */
1708 	dbgu_uart = 0x44424755;	/* DBGU */
1709 	new_uart = 0x55415254;	/* UART */
1710 
1711 	/*
1712 	 * Only USART devices from at91sam9260 SOC implement fractional
1713 	 * baudrate. It is available for all asynchronous modes, with the
1714 	 * following restriction: the sampling clock's duty cycle is not
1715 	 * constant.
1716 	 */
1717 	atmel_port->has_frac_baudrate = false;
1718 	atmel_port->has_hw_timer = false;
1719 
1720 	if (name == new_uart) {
1721 		dev_dbg(port->dev, "Uart with hw timer");
1722 		atmel_port->has_hw_timer = true;
1723 		atmel_port->rtor = ATMEL_UA_RTOR;
1724 	} else if (name == usart) {
1725 		dev_dbg(port->dev, "Usart\n");
1726 		atmel_port->has_frac_baudrate = true;
1727 		atmel_port->has_hw_timer = true;
1728 		atmel_port->rtor = ATMEL_US_RTOR;
1729 	} else if (name == dbgu_uart) {
1730 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1731 	} else {
1732 		/* fallback for older SoCs: use version field */
1733 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1734 		switch (version) {
1735 		case 0x302:
1736 		case 0x10213:
1737 		case 0x10302:
1738 			dev_dbg(port->dev, "This version is usart\n");
1739 			atmel_port->has_frac_baudrate = true;
1740 			atmel_port->has_hw_timer = true;
1741 			atmel_port->rtor = ATMEL_US_RTOR;
1742 			break;
1743 		case 0x203:
1744 		case 0x10202:
1745 			dev_dbg(port->dev, "This version is uart\n");
1746 			break;
1747 		default:
1748 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1749 		}
1750 	}
1751 }
1752 
1753 /*
1754  * Perform initialization and enable port for reception
1755  */
1756 static int atmel_startup(struct uart_port *port)
1757 {
1758 	struct platform_device *pdev = to_platform_device(port->dev);
1759 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1760 	struct tty_struct *tty = port->state->port.tty;
1761 	int retval;
1762 
1763 	/*
1764 	 * Ensure that no interrupts are enabled otherwise when
1765 	 * request_irq() is called we could get stuck trying to
1766 	 * handle an unexpected interrupt
1767 	 */
1768 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1769 	atmel_port->ms_irq_enabled = false;
1770 
1771 	/*
1772 	 * Allocate the IRQ
1773 	 */
1774 	retval = request_irq(port->irq, atmel_interrupt,
1775 			IRQF_SHARED | IRQF_COND_SUSPEND,
1776 			tty ? tty->name : "atmel_serial", port);
1777 	if (retval) {
1778 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1779 		return retval;
1780 	}
1781 
1782 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1783 	tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
1784 			(unsigned long)port);
1785 	tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
1786 			(unsigned long)port);
1787 
1788 	/*
1789 	 * Initialize DMA (if necessary)
1790 	 */
1791 	atmel_init_property(atmel_port, pdev);
1792 	atmel_set_ops(port);
1793 
1794 	if (atmel_port->prepare_rx) {
1795 		retval = atmel_port->prepare_rx(port);
1796 		if (retval < 0)
1797 			atmel_set_ops(port);
1798 	}
1799 
1800 	if (atmel_port->prepare_tx) {
1801 		retval = atmel_port->prepare_tx(port);
1802 		if (retval < 0)
1803 			atmel_set_ops(port);
1804 	}
1805 
1806 	/*
1807 	 * Enable FIFO when available
1808 	 */
1809 	if (atmel_port->fifo_size) {
1810 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1811 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1812 		unsigned int fmr;
1813 
1814 		atmel_uart_writel(port, ATMEL_US_CR,
1815 				  ATMEL_US_FIFOEN |
1816 				  ATMEL_US_RXFCLR |
1817 				  ATMEL_US_TXFLCLR);
1818 
1819 		if (atmel_use_dma_tx(port))
1820 			txrdym = ATMEL_US_FOUR_DATA;
1821 
1822 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1823 		if (atmel_port->rts_high &&
1824 		    atmel_port->rts_low)
1825 			fmr |=	ATMEL_US_FRTSC |
1826 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1827 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1828 
1829 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1830 	}
1831 
1832 	/* Save current CSR for comparison in atmel_tasklet_func() */
1833 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
1834 
1835 	/*
1836 	 * Finally, enable the serial port
1837 	 */
1838 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1839 	/* enable xmit & rcvr */
1840 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1841 	atmel_port->tx_stopped = false;
1842 
1843 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1844 
1845 	if (atmel_use_pdc_rx(port)) {
1846 		/* set UART timeout */
1847 		if (!atmel_port->has_hw_timer) {
1848 			mod_timer(&atmel_port->uart_timer,
1849 					jiffies + uart_poll_timeout(port));
1850 		/* set USART timeout */
1851 		} else {
1852 			atmel_uart_writel(port, atmel_port->rtor,
1853 					  PDC_RX_TIMEOUT);
1854 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1855 
1856 			atmel_uart_writel(port, ATMEL_US_IER,
1857 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1858 		}
1859 		/* enable PDC controller */
1860 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1861 	} else if (atmel_use_dma_rx(port)) {
1862 		/* set UART timeout */
1863 		if (!atmel_port->has_hw_timer) {
1864 			mod_timer(&atmel_port->uart_timer,
1865 					jiffies + uart_poll_timeout(port));
1866 		/* set USART timeout */
1867 		} else {
1868 			atmel_uart_writel(port, atmel_port->rtor,
1869 					  PDC_RX_TIMEOUT);
1870 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1871 
1872 			atmel_uart_writel(port, ATMEL_US_IER,
1873 					  ATMEL_US_TIMEOUT);
1874 		}
1875 	} else {
1876 		/* enable receive only */
1877 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 /*
1884  * Flush any TX data submitted for DMA. Called when the TX circular
1885  * buffer is reset.
1886  */
1887 static void atmel_flush_buffer(struct uart_port *port)
1888 {
1889 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1890 
1891 	if (atmel_use_pdc_tx(port)) {
1892 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1893 		atmel_port->pdc_tx.ofs = 0;
1894 	}
1895 	/*
1896 	 * in uart_flush_buffer(), the xmit circular buffer has just
1897 	 * been cleared, so we have to reset tx_len accordingly.
1898 	 */
1899 	atmel_port->tx_len = 0;
1900 }
1901 
1902 /*
1903  * Disable the port
1904  */
1905 static void atmel_shutdown(struct uart_port *port)
1906 {
1907 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1908 
1909 	/* Disable modem control lines interrupts */
1910 	atmel_disable_ms(port);
1911 
1912 	/* Disable interrupts at device level */
1913 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1914 
1915 	/* Prevent spurious interrupts from scheduling the tasklet */
1916 	atomic_inc(&atmel_port->tasklet_shutdown);
1917 
1918 	/*
1919 	 * Prevent any tasklets being scheduled during
1920 	 * cleanup
1921 	 */
1922 	del_timer_sync(&atmel_port->uart_timer);
1923 
1924 	/* Make sure that no interrupt is on the fly */
1925 	synchronize_irq(port->irq);
1926 
1927 	/*
1928 	 * Clear out any scheduled tasklets before
1929 	 * we destroy the buffers
1930 	 */
1931 	tasklet_kill(&atmel_port->tasklet_rx);
1932 	tasklet_kill(&atmel_port->tasklet_tx);
1933 
1934 	/*
1935 	 * Ensure everything is stopped and
1936 	 * disable port and break condition.
1937 	 */
1938 	atmel_stop_rx(port);
1939 	atmel_stop_tx(port);
1940 
1941 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1942 
1943 	/*
1944 	 * Shut-down the DMA.
1945 	 */
1946 	if (atmel_port->release_rx)
1947 		atmel_port->release_rx(port);
1948 	if (atmel_port->release_tx)
1949 		atmel_port->release_tx(port);
1950 
1951 	/*
1952 	 * Reset ring buffer pointers
1953 	 */
1954 	atmel_port->rx_ring.head = 0;
1955 	atmel_port->rx_ring.tail = 0;
1956 
1957 	/*
1958 	 * Free the interrupts
1959 	 */
1960 	free_irq(port->irq, port);
1961 
1962 	atmel_flush_buffer(port);
1963 }
1964 
1965 /*
1966  * Power / Clock management.
1967  */
1968 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
1969 			    unsigned int oldstate)
1970 {
1971 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1972 
1973 	switch (state) {
1974 	case 0:
1975 		/*
1976 		 * Enable the peripheral clock for this serial port.
1977 		 * This is called on uart_open() or a resume event.
1978 		 */
1979 		clk_prepare_enable(atmel_port->clk);
1980 
1981 		/* re-enable interrupts if we disabled some on suspend */
1982 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
1983 		break;
1984 	case 3:
1985 		/* Back up the interrupt mask and disable all interrupts */
1986 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
1987 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
1988 
1989 		/*
1990 		 * Disable the peripheral clock for this serial port.
1991 		 * This is called on uart_close() or a suspend event.
1992 		 */
1993 		clk_disable_unprepare(atmel_port->clk);
1994 		break;
1995 	default:
1996 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
1997 	}
1998 }
1999 
2000 /*
2001  * Change the port parameters
2002  */
2003 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2004 			      struct ktermios *old)
2005 {
2006 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2007 	unsigned long flags;
2008 	unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2009 
2010 	/* save the current mode register */
2011 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2012 
2013 	/* reset the mode, clock divisor, parity, stop bits and data size */
2014 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2015 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2016 
2017 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2018 
2019 	/* byte size */
2020 	switch (termios->c_cflag & CSIZE) {
2021 	case CS5:
2022 		mode |= ATMEL_US_CHRL_5;
2023 		break;
2024 	case CS6:
2025 		mode |= ATMEL_US_CHRL_6;
2026 		break;
2027 	case CS7:
2028 		mode |= ATMEL_US_CHRL_7;
2029 		break;
2030 	default:
2031 		mode |= ATMEL_US_CHRL_8;
2032 		break;
2033 	}
2034 
2035 	/* stop bits */
2036 	if (termios->c_cflag & CSTOPB)
2037 		mode |= ATMEL_US_NBSTOP_2;
2038 
2039 	/* parity */
2040 	if (termios->c_cflag & PARENB) {
2041 		/* Mark or Space parity */
2042 		if (termios->c_cflag & CMSPAR) {
2043 			if (termios->c_cflag & PARODD)
2044 				mode |= ATMEL_US_PAR_MARK;
2045 			else
2046 				mode |= ATMEL_US_PAR_SPACE;
2047 		} else if (termios->c_cflag & PARODD)
2048 			mode |= ATMEL_US_PAR_ODD;
2049 		else
2050 			mode |= ATMEL_US_PAR_EVEN;
2051 	} else
2052 		mode |= ATMEL_US_PAR_NONE;
2053 
2054 	spin_lock_irqsave(&port->lock, flags);
2055 
2056 	port->read_status_mask = ATMEL_US_OVRE;
2057 	if (termios->c_iflag & INPCK)
2058 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2059 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2060 		port->read_status_mask |= ATMEL_US_RXBRK;
2061 
2062 	if (atmel_use_pdc_rx(port))
2063 		/* need to enable error interrupts */
2064 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2065 
2066 	/*
2067 	 * Characters to ignore
2068 	 */
2069 	port->ignore_status_mask = 0;
2070 	if (termios->c_iflag & IGNPAR)
2071 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2072 	if (termios->c_iflag & IGNBRK) {
2073 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2074 		/*
2075 		 * If we're ignoring parity and break indicators,
2076 		 * ignore overruns too (for real raw support).
2077 		 */
2078 		if (termios->c_iflag & IGNPAR)
2079 			port->ignore_status_mask |= ATMEL_US_OVRE;
2080 	}
2081 	/* TODO: Ignore all characters if CREAD is set.*/
2082 
2083 	/* update the per-port timeout */
2084 	uart_update_timeout(port, termios->c_cflag, baud);
2085 
2086 	/*
2087 	 * save/disable interrupts. The tty layer will ensure that the
2088 	 * transmitter is empty if requested by the caller, so there's
2089 	 * no need to wait for it here.
2090 	 */
2091 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2092 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2093 
2094 	/* disable receiver and transmitter */
2095 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2096 	atmel_port->tx_stopped = true;
2097 
2098 	/* mode */
2099 	if (port->rs485.flags & SER_RS485_ENABLED) {
2100 		atmel_uart_writel(port, ATMEL_US_TTGR,
2101 				  port->rs485.delay_rts_after_send);
2102 		mode |= ATMEL_US_USMODE_RS485;
2103 	} else if (termios->c_cflag & CRTSCTS) {
2104 		/* RS232 with hardware handshake (RTS/CTS) */
2105 		if (atmel_use_fifo(port) &&
2106 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2107 			/*
2108 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2109 			 * be able to drive the RTS pin high/low when the RX
2110 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2111 			 * It will also disable the transmitter when the CTS
2112 			 * pin is high.
2113 			 * This mode is not activated if CTS pin is a GPIO
2114 			 * because in this case, the transmitter is always
2115 			 * disabled (there must be an internal pull-up
2116 			 * responsible for this behaviour).
2117 			 * If the RTS pin is a GPIO, the controller won't be
2118 			 * able to drive it according to the FIFO thresholds,
2119 			 * but it will be handled by the driver.
2120 			 */
2121 			mode |= ATMEL_US_USMODE_HWHS;
2122 		} else {
2123 			/*
2124 			 * For platforms without FIFO, the flow control is
2125 			 * handled by the driver.
2126 			 */
2127 			mode |= ATMEL_US_USMODE_NORMAL;
2128 		}
2129 	} else {
2130 		/* RS232 without hadware handshake */
2131 		mode |= ATMEL_US_USMODE_NORMAL;
2132 	}
2133 
2134 	/* set the mode, clock divisor, parity, stop bits and data size */
2135 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2136 
2137 	/*
2138 	 * when switching the mode, set the RTS line state according to the
2139 	 * new mode, otherwise keep the former state
2140 	 */
2141 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2142 		unsigned int rts_state;
2143 
2144 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2145 			/* let the hardware control the RTS line */
2146 			rts_state = ATMEL_US_RTSDIS;
2147 		} else {
2148 			/* force RTS line to low level */
2149 			rts_state = ATMEL_US_RTSEN;
2150 		}
2151 
2152 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2153 	}
2154 
2155 	/*
2156 	 * Set the baud rate:
2157 	 * Fractional baudrate allows to setup output frequency more
2158 	 * accurately. This feature is enabled only when using normal mode.
2159 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2160 	 * Currently, OVER is always set to 0 so we get
2161 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2162 	 * then
2163 	 * 8 CD + FP = selected clock / (2 * baudrate)
2164 	 */
2165 	if (atmel_port->has_frac_baudrate) {
2166 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2167 		cd = div >> 3;
2168 		fp = div & ATMEL_US_FP_MASK;
2169 	} else {
2170 		cd = uart_get_divisor(port, baud);
2171 	}
2172 
2173 	if (cd > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2174 		cd /= 8;
2175 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2176 	}
2177 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2178 
2179 	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2180 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2181 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2182 	atmel_port->tx_stopped = false;
2183 
2184 	/* restore interrupts */
2185 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2186 
2187 	/* CTS flow-control and modem-status interrupts */
2188 	if (UART_ENABLE_MS(port, termios->c_cflag))
2189 		atmel_enable_ms(port);
2190 	else
2191 		atmel_disable_ms(port);
2192 
2193 	spin_unlock_irqrestore(&port->lock, flags);
2194 }
2195 
2196 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2197 {
2198 	if (termios->c_line == N_PPS) {
2199 		port->flags |= UPF_HARDPPS_CD;
2200 		spin_lock_irq(&port->lock);
2201 		atmel_enable_ms(port);
2202 		spin_unlock_irq(&port->lock);
2203 	} else {
2204 		port->flags &= ~UPF_HARDPPS_CD;
2205 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2206 			spin_lock_irq(&port->lock);
2207 			atmel_disable_ms(port);
2208 			spin_unlock_irq(&port->lock);
2209 		}
2210 	}
2211 }
2212 
2213 /*
2214  * Return string describing the specified port
2215  */
2216 static const char *atmel_type(struct uart_port *port)
2217 {
2218 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2219 }
2220 
2221 /*
2222  * Release the memory region(s) being used by 'port'.
2223  */
2224 static void atmel_release_port(struct uart_port *port)
2225 {
2226 	struct platform_device *pdev = to_platform_device(port->dev);
2227 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2228 
2229 	release_mem_region(port->mapbase, size);
2230 
2231 	if (port->flags & UPF_IOREMAP) {
2232 		iounmap(port->membase);
2233 		port->membase = NULL;
2234 	}
2235 }
2236 
2237 /*
2238  * Request the memory region(s) being used by 'port'.
2239  */
2240 static int atmel_request_port(struct uart_port *port)
2241 {
2242 	struct platform_device *pdev = to_platform_device(port->dev);
2243 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2244 
2245 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2246 		return -EBUSY;
2247 
2248 	if (port->flags & UPF_IOREMAP) {
2249 		port->membase = ioremap(port->mapbase, size);
2250 		if (port->membase == NULL) {
2251 			release_mem_region(port->mapbase, size);
2252 			return -ENOMEM;
2253 		}
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 /*
2260  * Configure/autoconfigure the port.
2261  */
2262 static void atmel_config_port(struct uart_port *port, int flags)
2263 {
2264 	if (flags & UART_CONFIG_TYPE) {
2265 		port->type = PORT_ATMEL;
2266 		atmel_request_port(port);
2267 	}
2268 }
2269 
2270 /*
2271  * Verify the new serial_struct (for TIOCSSERIAL).
2272  */
2273 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2274 {
2275 	int ret = 0;
2276 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2277 		ret = -EINVAL;
2278 	if (port->irq != ser->irq)
2279 		ret = -EINVAL;
2280 	if (ser->io_type != SERIAL_IO_MEM)
2281 		ret = -EINVAL;
2282 	if (port->uartclk / 16 != ser->baud_base)
2283 		ret = -EINVAL;
2284 	if (port->mapbase != (unsigned long)ser->iomem_base)
2285 		ret = -EINVAL;
2286 	if (port->iobase != ser->port)
2287 		ret = -EINVAL;
2288 	if (ser->hub6 != 0)
2289 		ret = -EINVAL;
2290 	return ret;
2291 }
2292 
2293 #ifdef CONFIG_CONSOLE_POLL
2294 static int atmel_poll_get_char(struct uart_port *port)
2295 {
2296 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2297 		cpu_relax();
2298 
2299 	return atmel_uart_read_char(port);
2300 }
2301 
2302 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2303 {
2304 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2305 		cpu_relax();
2306 
2307 	atmel_uart_write_char(port, ch);
2308 }
2309 #endif
2310 
2311 static const struct uart_ops atmel_pops = {
2312 	.tx_empty	= atmel_tx_empty,
2313 	.set_mctrl	= atmel_set_mctrl,
2314 	.get_mctrl	= atmel_get_mctrl,
2315 	.stop_tx	= atmel_stop_tx,
2316 	.start_tx	= atmel_start_tx,
2317 	.stop_rx	= atmel_stop_rx,
2318 	.enable_ms	= atmel_enable_ms,
2319 	.break_ctl	= atmel_break_ctl,
2320 	.startup	= atmel_startup,
2321 	.shutdown	= atmel_shutdown,
2322 	.flush_buffer	= atmel_flush_buffer,
2323 	.set_termios	= atmel_set_termios,
2324 	.set_ldisc	= atmel_set_ldisc,
2325 	.type		= atmel_type,
2326 	.release_port	= atmel_release_port,
2327 	.request_port	= atmel_request_port,
2328 	.config_port	= atmel_config_port,
2329 	.verify_port	= atmel_verify_port,
2330 	.pm		= atmel_serial_pm,
2331 #ifdef CONFIG_CONSOLE_POLL
2332 	.poll_get_char	= atmel_poll_get_char,
2333 	.poll_put_char	= atmel_poll_put_char,
2334 #endif
2335 };
2336 
2337 /*
2338  * Configure the port from the platform device resource info.
2339  */
2340 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2341 				      struct platform_device *pdev)
2342 {
2343 	int ret;
2344 	struct uart_port *port = &atmel_port->uart;
2345 
2346 	atmel_init_property(atmel_port, pdev);
2347 	atmel_set_ops(port);
2348 
2349 	uart_get_rs485_mode(&pdev->dev, &port->rs485);
2350 
2351 	port->iotype		= UPIO_MEM;
2352 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2353 	port->ops		= &atmel_pops;
2354 	port->fifosize		= 1;
2355 	port->dev		= &pdev->dev;
2356 	port->mapbase	= pdev->resource[0].start;
2357 	port->irq	= pdev->resource[1].start;
2358 	port->rs485_config	= atmel_config_rs485;
2359 	port->membase	= NULL;
2360 
2361 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2362 
2363 	/* for console, the clock could already be configured */
2364 	if (!atmel_port->clk) {
2365 		atmel_port->clk = clk_get(&pdev->dev, "usart");
2366 		if (IS_ERR(atmel_port->clk)) {
2367 			ret = PTR_ERR(atmel_port->clk);
2368 			atmel_port->clk = NULL;
2369 			return ret;
2370 		}
2371 		ret = clk_prepare_enable(atmel_port->clk);
2372 		if (ret) {
2373 			clk_put(atmel_port->clk);
2374 			atmel_port->clk = NULL;
2375 			return ret;
2376 		}
2377 		port->uartclk = clk_get_rate(atmel_port->clk);
2378 		clk_disable_unprepare(atmel_port->clk);
2379 		/* only enable clock when USART is in use */
2380 	}
2381 
2382 	/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2383 	if (port->rs485.flags & SER_RS485_ENABLED)
2384 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2385 	else if (atmel_use_pdc_tx(port)) {
2386 		port->fifosize = PDC_BUFFER_SIZE;
2387 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2388 	} else {
2389 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2396 static void atmel_console_putchar(struct uart_port *port, int ch)
2397 {
2398 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2399 		cpu_relax();
2400 	atmel_uart_write_char(port, ch);
2401 }
2402 
2403 /*
2404  * Interrupts are disabled on entering
2405  */
2406 static void atmel_console_write(struct console *co, const char *s, u_int count)
2407 {
2408 	struct uart_port *port = &atmel_ports[co->index].uart;
2409 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2410 	unsigned int status, imr;
2411 	unsigned int pdc_tx;
2412 
2413 	/*
2414 	 * First, save IMR and then disable interrupts
2415 	 */
2416 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2417 	atmel_uart_writel(port, ATMEL_US_IDR,
2418 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2419 
2420 	/* Store PDC transmit status and disable it */
2421 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2422 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2423 
2424 	/* Make sure that tx path is actually able to send characters */
2425 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2426 	atmel_port->tx_stopped = false;
2427 
2428 	uart_console_write(port, s, count, atmel_console_putchar);
2429 
2430 	/*
2431 	 * Finally, wait for transmitter to become empty
2432 	 * and restore IMR
2433 	 */
2434 	do {
2435 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2436 	} while (!(status & ATMEL_US_TXRDY));
2437 
2438 	/* Restore PDC transmit status */
2439 	if (pdc_tx)
2440 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2441 
2442 	/* set interrupts back the way they were */
2443 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2444 }
2445 
2446 /*
2447  * If the port was already initialised (eg, by a boot loader),
2448  * try to determine the current setup.
2449  */
2450 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2451 					     int *parity, int *bits)
2452 {
2453 	unsigned int mr, quot;
2454 
2455 	/*
2456 	 * If the baud rate generator isn't running, the port wasn't
2457 	 * initialized by the boot loader.
2458 	 */
2459 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2460 	if (!quot)
2461 		return;
2462 
2463 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2464 	if (mr == ATMEL_US_CHRL_8)
2465 		*bits = 8;
2466 	else
2467 		*bits = 7;
2468 
2469 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2470 	if (mr == ATMEL_US_PAR_EVEN)
2471 		*parity = 'e';
2472 	else if (mr == ATMEL_US_PAR_ODD)
2473 		*parity = 'o';
2474 
2475 	/*
2476 	 * The serial core only rounds down when matching this to a
2477 	 * supported baud rate. Make sure we don't end up slightly
2478 	 * lower than one of those, as it would make us fall through
2479 	 * to a much lower baud rate than we really want.
2480 	 */
2481 	*baud = port->uartclk / (16 * (quot - 1));
2482 }
2483 
2484 static int __init atmel_console_setup(struct console *co, char *options)
2485 {
2486 	int ret;
2487 	struct uart_port *port = &atmel_ports[co->index].uart;
2488 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2489 	int baud = 115200;
2490 	int bits = 8;
2491 	int parity = 'n';
2492 	int flow = 'n';
2493 
2494 	if (port->membase == NULL) {
2495 		/* Port not initialized yet - delay setup */
2496 		return -ENODEV;
2497 	}
2498 
2499 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2500 	if (ret)
2501 		return ret;
2502 
2503 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2504 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2505 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2506 	atmel_port->tx_stopped = false;
2507 
2508 	if (options)
2509 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2510 	else
2511 		atmel_console_get_options(port, &baud, &parity, &bits);
2512 
2513 	return uart_set_options(port, co, baud, parity, bits, flow);
2514 }
2515 
2516 static struct uart_driver atmel_uart;
2517 
2518 static struct console atmel_console = {
2519 	.name		= ATMEL_DEVICENAME,
2520 	.write		= atmel_console_write,
2521 	.device		= uart_console_device,
2522 	.setup		= atmel_console_setup,
2523 	.flags		= CON_PRINTBUFFER,
2524 	.index		= -1,
2525 	.data		= &atmel_uart,
2526 };
2527 
2528 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2529 
2530 static inline bool atmel_is_console_port(struct uart_port *port)
2531 {
2532 	return port->cons && port->cons->index == port->line;
2533 }
2534 
2535 #else
2536 #define ATMEL_CONSOLE_DEVICE	NULL
2537 
2538 static inline bool atmel_is_console_port(struct uart_port *port)
2539 {
2540 	return false;
2541 }
2542 #endif
2543 
2544 static struct uart_driver atmel_uart = {
2545 	.owner		= THIS_MODULE,
2546 	.driver_name	= "atmel_serial",
2547 	.dev_name	= ATMEL_DEVICENAME,
2548 	.major		= SERIAL_ATMEL_MAJOR,
2549 	.minor		= MINOR_START,
2550 	.nr		= ATMEL_MAX_UART,
2551 	.cons		= ATMEL_CONSOLE_DEVICE,
2552 };
2553 
2554 #ifdef CONFIG_PM
2555 static bool atmel_serial_clk_will_stop(void)
2556 {
2557 #ifdef CONFIG_ARCH_AT91
2558 	return at91_suspend_entering_slow_clock();
2559 #else
2560 	return false;
2561 #endif
2562 }
2563 
2564 static int atmel_serial_suspend(struct platform_device *pdev,
2565 				pm_message_t state)
2566 {
2567 	struct uart_port *port = platform_get_drvdata(pdev);
2568 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2569 
2570 	if (atmel_is_console_port(port) && console_suspend_enabled) {
2571 		/* Drain the TX shifter */
2572 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2573 			 ATMEL_US_TXEMPTY))
2574 			cpu_relax();
2575 	}
2576 
2577 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2578 		/* Cache register values as we won't get a full shutdown/startup
2579 		 * cycle
2580 		 */
2581 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2582 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2583 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2584 		atmel_port->cache.rtor = atmel_uart_readl(port,
2585 							  atmel_port->rtor);
2586 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2587 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2588 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2589 	}
2590 
2591 	/* we can not wake up if we're running on slow clock */
2592 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2593 	if (atmel_serial_clk_will_stop()) {
2594 		unsigned long flags;
2595 
2596 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2597 		atmel_port->suspended = true;
2598 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2599 		device_set_wakeup_enable(&pdev->dev, 0);
2600 	}
2601 
2602 	uart_suspend_port(&atmel_uart, port);
2603 
2604 	return 0;
2605 }
2606 
2607 static int atmel_serial_resume(struct platform_device *pdev)
2608 {
2609 	struct uart_port *port = platform_get_drvdata(pdev);
2610 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2611 	unsigned long flags;
2612 
2613 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2614 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2615 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2616 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2617 		atmel_uart_writel(port, atmel_port->rtor,
2618 				  atmel_port->cache.rtor);
2619 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2620 
2621 		if (atmel_port->fifo_size) {
2622 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2623 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2624 			atmel_uart_writel(port, ATMEL_US_FMR,
2625 					  atmel_port->cache.fmr);
2626 			atmel_uart_writel(port, ATMEL_US_FIER,
2627 					  atmel_port->cache.fimr);
2628 		}
2629 		atmel_start_rx(port);
2630 	}
2631 
2632 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2633 	if (atmel_port->pending) {
2634 		atmel_handle_receive(port, atmel_port->pending);
2635 		atmel_handle_status(port, atmel_port->pending,
2636 				    atmel_port->pending_status);
2637 		atmel_handle_transmit(port, atmel_port->pending);
2638 		atmel_port->pending = 0;
2639 	}
2640 	atmel_port->suspended = false;
2641 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2642 
2643 	uart_resume_port(&atmel_uart, port);
2644 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2645 
2646 	return 0;
2647 }
2648 #else
2649 #define atmel_serial_suspend NULL
2650 #define atmel_serial_resume NULL
2651 #endif
2652 
2653 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2654 				     struct platform_device *pdev)
2655 {
2656 	atmel_port->fifo_size = 0;
2657 	atmel_port->rts_low = 0;
2658 	atmel_port->rts_high = 0;
2659 
2660 	if (of_property_read_u32(pdev->dev.of_node,
2661 				 "atmel,fifo-size",
2662 				 &atmel_port->fifo_size))
2663 		return;
2664 
2665 	if (!atmel_port->fifo_size)
2666 		return;
2667 
2668 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2669 		atmel_port->fifo_size = 0;
2670 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2671 		return;
2672 	}
2673 
2674 	/*
2675 	 * 0 <= rts_low <= rts_high <= fifo_size
2676 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2677 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2678 	 * actually stopping to send new data. So we try to set the RTS High
2679 	 * Threshold to a reasonably high value respecting this 16 data
2680 	 * empirical rule when possible.
2681 	 */
2682 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2683 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2684 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2685 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2686 
2687 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2688 		 atmel_port->fifo_size);
2689 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2690 		atmel_port->rts_high);
2691 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2692 		atmel_port->rts_low);
2693 }
2694 
2695 static int atmel_serial_probe(struct platform_device *pdev)
2696 {
2697 	struct atmel_uart_port *atmel_port;
2698 	struct device_node *np = pdev->dev.of_node;
2699 	void *data;
2700 	int ret = -ENODEV;
2701 	bool rs485_enabled;
2702 
2703 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2704 
2705 	ret = of_alias_get_id(np, "serial");
2706 	if (ret < 0)
2707 		/* port id not found in platform data nor device-tree aliases:
2708 		 * auto-enumerate it */
2709 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2710 
2711 	if (ret >= ATMEL_MAX_UART) {
2712 		ret = -ENODEV;
2713 		goto err;
2714 	}
2715 
2716 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2717 		/* port already in use */
2718 		ret = -EBUSY;
2719 		goto err;
2720 	}
2721 
2722 	atmel_port = &atmel_ports[ret];
2723 	atmel_port->backup_imr = 0;
2724 	atmel_port->uart.line = ret;
2725 	atmel_serial_probe_fifos(atmel_port, pdev);
2726 
2727 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2728 	spin_lock_init(&atmel_port->lock_suspended);
2729 
2730 	ret = atmel_init_port(atmel_port, pdev);
2731 	if (ret)
2732 		goto err_clear_bit;
2733 
2734 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2735 	if (IS_ERR(atmel_port->gpios)) {
2736 		ret = PTR_ERR(atmel_port->gpios);
2737 		goto err_clear_bit;
2738 	}
2739 
2740 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2741 		ret = -ENOMEM;
2742 		data = kmalloc(sizeof(struct atmel_uart_char)
2743 				* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2744 		if (!data)
2745 			goto err_alloc_ring;
2746 		atmel_port->rx_ring.buf = data;
2747 	}
2748 
2749 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2750 
2751 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2752 	if (ret)
2753 		goto err_add_port;
2754 
2755 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2756 	if (atmel_is_console_port(&atmel_port->uart)
2757 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2758 		/*
2759 		 * The serial core enabled the clock for us, so undo
2760 		 * the clk_prepare_enable() in atmel_console_setup()
2761 		 */
2762 		clk_disable_unprepare(atmel_port->clk);
2763 	}
2764 #endif
2765 
2766 	device_init_wakeup(&pdev->dev, 1);
2767 	platform_set_drvdata(pdev, atmel_port);
2768 
2769 	/*
2770 	 * The peripheral clock has been disabled by atmel_init_port():
2771 	 * enable it before accessing I/O registers
2772 	 */
2773 	clk_prepare_enable(atmel_port->clk);
2774 
2775 	if (rs485_enabled) {
2776 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2777 				  ATMEL_US_USMODE_NORMAL);
2778 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2779 				  ATMEL_US_RTSEN);
2780 	}
2781 
2782 	/*
2783 	 * Get port name of usart or uart
2784 	 */
2785 	atmel_get_ip_name(&atmel_port->uart);
2786 
2787 	/*
2788 	 * The peripheral clock can now safely be disabled till the port
2789 	 * is used
2790 	 */
2791 	clk_disable_unprepare(atmel_port->clk);
2792 
2793 	return 0;
2794 
2795 err_add_port:
2796 	kfree(atmel_port->rx_ring.buf);
2797 	atmel_port->rx_ring.buf = NULL;
2798 err_alloc_ring:
2799 	if (!atmel_is_console_port(&atmel_port->uart)) {
2800 		clk_put(atmel_port->clk);
2801 		atmel_port->clk = NULL;
2802 	}
2803 err_clear_bit:
2804 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2805 err:
2806 	return ret;
2807 }
2808 
2809 /*
2810  * Even if the driver is not modular, it makes sense to be able to
2811  * unbind a device: there can be many bound devices, and there are
2812  * situations where dynamic binding and unbinding can be useful.
2813  *
2814  * For example, a connected device can require a specific firmware update
2815  * protocol that needs bitbanging on IO lines, but use the regular serial
2816  * port in the normal case.
2817  */
2818 static int atmel_serial_remove(struct platform_device *pdev)
2819 {
2820 	struct uart_port *port = platform_get_drvdata(pdev);
2821 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2822 	int ret = 0;
2823 
2824 	tasklet_kill(&atmel_port->tasklet_rx);
2825 	tasklet_kill(&atmel_port->tasklet_tx);
2826 
2827 	device_init_wakeup(&pdev->dev, 0);
2828 
2829 	ret = uart_remove_one_port(&atmel_uart, port);
2830 
2831 	kfree(atmel_port->rx_ring.buf);
2832 
2833 	/* "port" is allocated statically, so we shouldn't free it */
2834 
2835 	clear_bit(port->line, atmel_ports_in_use);
2836 
2837 	clk_put(atmel_port->clk);
2838 	atmel_port->clk = NULL;
2839 
2840 	return ret;
2841 }
2842 
2843 static struct platform_driver atmel_serial_driver = {
2844 	.probe		= atmel_serial_probe,
2845 	.remove		= atmel_serial_remove,
2846 	.suspend	= atmel_serial_suspend,
2847 	.resume		= atmel_serial_resume,
2848 	.driver		= {
2849 		.name			= "atmel_usart",
2850 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
2851 	},
2852 };
2853 
2854 static int __init atmel_serial_init(void)
2855 {
2856 	int ret;
2857 
2858 	ret = uart_register_driver(&atmel_uart);
2859 	if (ret)
2860 		return ret;
2861 
2862 	ret = platform_driver_register(&atmel_serial_driver);
2863 	if (ret)
2864 		uart_unregister_driver(&atmel_uart);
2865 
2866 	return ret;
2867 }
2868 device_initcall(atmel_serial_init);
2869