1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/tty.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/serial.h>
16 #include <linux/clk.h>
17 #include <linux/console.h>
18 #include <linux/sysrq.h>
19 #include <linux/tty_flip.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/atmel_pdc.h>
27 #include <linux/uaccess.h>
28 #include <linux/platform_data/atmel.h>
29 #include <linux/timer.h>
30 #include <linux/gpio.h>
31 #include <linux/gpio/consumer.h>
32 #include <linux/err.h>
33 #include <linux/irq.h>
34 #include <linux/suspend.h>
35 #include <linux/mm.h>
36 
37 #include <asm/io.h>
38 #include <asm/ioctls.h>
39 
40 #define PDC_BUFFER_SIZE		512
41 /* Revisit: We should calculate this based on the actual port settings */
42 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
43 
44 /* The minium number of data FIFOs should be able to contain */
45 #define ATMEL_MIN_FIFO_SIZE	8
46 /*
47  * These two offsets are substracted from the RX FIFO size to define the RTS
48  * high and low thresholds
49  */
50 #define ATMEL_RTS_HIGH_OFFSET	16
51 #define ATMEL_RTS_LOW_OFFSET	20
52 
53 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
54 #define SUPPORT_SYSRQ
55 #endif
56 
57 #include <linux/serial_core.h>
58 
59 #include "serial_mctrl_gpio.h"
60 #include "atmel_serial.h"
61 
62 static void atmel_start_rx(struct uart_port *port);
63 static void atmel_stop_rx(struct uart_port *port);
64 
65 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
66 
67 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
68  * should coexist with the 8250 driver, such as if we have an external 16C550
69  * UART. */
70 #define SERIAL_ATMEL_MAJOR	204
71 #define MINOR_START		154
72 #define ATMEL_DEVICENAME	"ttyAT"
73 
74 #else
75 
76 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
77  * name, but it is legally reserved for the 8250 driver. */
78 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
79 #define MINOR_START		64
80 #define ATMEL_DEVICENAME	"ttyS"
81 
82 #endif
83 
84 #define ATMEL_ISR_PASS_LIMIT	256
85 
86 struct atmel_dma_buffer {
87 	unsigned char	*buf;
88 	dma_addr_t	dma_addr;
89 	unsigned int	dma_size;
90 	unsigned int	ofs;
91 };
92 
93 struct atmel_uart_char {
94 	u16		status;
95 	u16		ch;
96 };
97 
98 /*
99  * Be careful, the real size of the ring buffer is
100  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
101  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
102  * DMA mode.
103  */
104 #define ATMEL_SERIAL_RINGSIZE 1024
105 
106 /*
107  * at91: 6 USARTs and one DBGU port (SAM9260)
108  * samx7: 3 USARTs and 5 UARTs
109  */
110 #define ATMEL_MAX_UART		8
111 
112 /*
113  * We wrap our port structure around the generic uart_port.
114  */
115 struct atmel_uart_port {
116 	struct uart_port	uart;		/* uart */
117 	struct clk		*clk;		/* uart clock */
118 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
119 	u32			backup_imr;	/* IMR saved during suspend */
120 	int			break_active;	/* break being received */
121 
122 	bool			use_dma_rx;	/* enable DMA receiver */
123 	bool			use_pdc_rx;	/* enable PDC receiver */
124 	short			pdc_rx_idx;	/* current PDC RX buffer */
125 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
126 
127 	bool			use_dma_tx;     /* enable DMA transmitter */
128 	bool			use_pdc_tx;	/* enable PDC transmitter */
129 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
130 
131 	spinlock_t			lock_tx;	/* port lock */
132 	spinlock_t			lock_rx;	/* port lock */
133 	struct dma_chan			*chan_tx;
134 	struct dma_chan			*chan_rx;
135 	struct dma_async_tx_descriptor	*desc_tx;
136 	struct dma_async_tx_descriptor	*desc_rx;
137 	dma_cookie_t			cookie_tx;
138 	dma_cookie_t			cookie_rx;
139 	struct scatterlist		sg_tx;
140 	struct scatterlist		sg_rx;
141 	struct tasklet_struct	tasklet_rx;
142 	struct tasklet_struct	tasklet_tx;
143 	atomic_t		tasklet_shutdown;
144 	unsigned int		irq_status_prev;
145 	unsigned int		tx_len;
146 
147 	struct circ_buf		rx_ring;
148 
149 	struct mctrl_gpios	*gpios;
150 	unsigned int		tx_done_mask;
151 	u32			fifo_size;
152 	u32			rts_high;
153 	u32			rts_low;
154 	bool			ms_irq_enabled;
155 	u32			rtor;	/* address of receiver timeout register if it exists */
156 	bool			has_frac_baudrate;
157 	bool			has_hw_timer;
158 	struct timer_list	uart_timer;
159 
160 	bool			tx_stopped;
161 	bool			suspended;
162 	unsigned int		pending;
163 	unsigned int		pending_status;
164 	spinlock_t		lock_suspended;
165 
166 #ifdef CONFIG_PM
167 	struct {
168 		u32		cr;
169 		u32		mr;
170 		u32		imr;
171 		u32		brgr;
172 		u32		rtor;
173 		u32		ttgr;
174 		u32		fmr;
175 		u32		fimr;
176 	} cache;
177 #endif
178 
179 	int (*prepare_rx)(struct uart_port *port);
180 	int (*prepare_tx)(struct uart_port *port);
181 	void (*schedule_rx)(struct uart_port *port);
182 	void (*schedule_tx)(struct uart_port *port);
183 	void (*release_rx)(struct uart_port *port);
184 	void (*release_tx)(struct uart_port *port);
185 };
186 
187 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
188 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
189 
190 #ifdef SUPPORT_SYSRQ
191 static struct console atmel_console;
192 #endif
193 
194 #if defined(CONFIG_OF)
195 static const struct of_device_id atmel_serial_dt_ids[] = {
196 	{ .compatible = "atmel,at91rm9200-usart" },
197 	{ .compatible = "atmel,at91sam9260-usart" },
198 	{ /* sentinel */ }
199 };
200 #endif
201 
202 static inline struct atmel_uart_port *
203 to_atmel_uart_port(struct uart_port *uart)
204 {
205 	return container_of(uart, struct atmel_uart_port, uart);
206 }
207 
208 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
209 {
210 	return __raw_readl(port->membase + reg);
211 }
212 
213 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
214 {
215 	__raw_writel(value, port->membase + reg);
216 }
217 
218 static inline u8 atmel_uart_read_char(struct uart_port *port)
219 {
220 	return __raw_readb(port->membase + ATMEL_US_RHR);
221 }
222 
223 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
224 {
225 	__raw_writeb(value, port->membase + ATMEL_US_THR);
226 }
227 
228 #ifdef CONFIG_SERIAL_ATMEL_PDC
229 static bool atmel_use_pdc_rx(struct uart_port *port)
230 {
231 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
232 
233 	return atmel_port->use_pdc_rx;
234 }
235 
236 static bool atmel_use_pdc_tx(struct uart_port *port)
237 {
238 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
239 
240 	return atmel_port->use_pdc_tx;
241 }
242 #else
243 static bool atmel_use_pdc_rx(struct uart_port *port)
244 {
245 	return false;
246 }
247 
248 static bool atmel_use_pdc_tx(struct uart_port *port)
249 {
250 	return false;
251 }
252 #endif
253 
254 static bool atmel_use_dma_tx(struct uart_port *port)
255 {
256 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
257 
258 	return atmel_port->use_dma_tx;
259 }
260 
261 static bool atmel_use_dma_rx(struct uart_port *port)
262 {
263 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
264 
265 	return atmel_port->use_dma_rx;
266 }
267 
268 static bool atmel_use_fifo(struct uart_port *port)
269 {
270 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
271 
272 	return atmel_port->fifo_size;
273 }
274 
275 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
276 				   struct tasklet_struct *t)
277 {
278 	if (!atomic_read(&atmel_port->tasklet_shutdown))
279 		tasklet_schedule(t);
280 }
281 
282 static unsigned int atmel_get_lines_status(struct uart_port *port)
283 {
284 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
285 	unsigned int status, ret = 0;
286 
287 	status = atmel_uart_readl(port, ATMEL_US_CSR);
288 
289 	mctrl_gpio_get(atmel_port->gpios, &ret);
290 
291 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
292 						UART_GPIO_CTS))) {
293 		if (ret & TIOCM_CTS)
294 			status &= ~ATMEL_US_CTS;
295 		else
296 			status |= ATMEL_US_CTS;
297 	}
298 
299 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
300 						UART_GPIO_DSR))) {
301 		if (ret & TIOCM_DSR)
302 			status &= ~ATMEL_US_DSR;
303 		else
304 			status |= ATMEL_US_DSR;
305 	}
306 
307 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
308 						UART_GPIO_RI))) {
309 		if (ret & TIOCM_RI)
310 			status &= ~ATMEL_US_RI;
311 		else
312 			status |= ATMEL_US_RI;
313 	}
314 
315 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
316 						UART_GPIO_DCD))) {
317 		if (ret & TIOCM_CD)
318 			status &= ~ATMEL_US_DCD;
319 		else
320 			status |= ATMEL_US_DCD;
321 	}
322 
323 	return status;
324 }
325 
326 /* Enable or disable the rs485 support */
327 static int atmel_config_rs485(struct uart_port *port,
328 			      struct serial_rs485 *rs485conf)
329 {
330 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
331 	unsigned int mode;
332 
333 	/* Disable interrupts */
334 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
335 
336 	mode = atmel_uart_readl(port, ATMEL_US_MR);
337 
338 	/* Resetting serial mode to RS232 (0x0) */
339 	mode &= ~ATMEL_US_USMODE;
340 
341 	port->rs485 = *rs485conf;
342 
343 	if (rs485conf->flags & SER_RS485_ENABLED) {
344 		dev_dbg(port->dev, "Setting UART to RS485\n");
345 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
346 		atmel_uart_writel(port, ATMEL_US_TTGR,
347 				  rs485conf->delay_rts_after_send);
348 		mode |= ATMEL_US_USMODE_RS485;
349 	} else {
350 		dev_dbg(port->dev, "Setting UART to RS232\n");
351 		if (atmel_use_pdc_tx(port))
352 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
353 				ATMEL_US_TXBUFE;
354 		else
355 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
356 	}
357 	atmel_uart_writel(port, ATMEL_US_MR, mode);
358 
359 	/* Enable interrupts */
360 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
361 
362 	return 0;
363 }
364 
365 /*
366  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
367  */
368 static u_int atmel_tx_empty(struct uart_port *port)
369 {
370 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
371 
372 	if (atmel_port->tx_stopped)
373 		return TIOCSER_TEMT;
374 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
375 		TIOCSER_TEMT :
376 		0;
377 }
378 
379 /*
380  * Set state of the modem control output lines
381  */
382 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
383 {
384 	unsigned int control = 0;
385 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
386 	unsigned int rts_paused, rts_ready;
387 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
388 
389 	/* override mode to RS485 if needed, otherwise keep the current mode */
390 	if (port->rs485.flags & SER_RS485_ENABLED) {
391 		atmel_uart_writel(port, ATMEL_US_TTGR,
392 				  port->rs485.delay_rts_after_send);
393 		mode &= ~ATMEL_US_USMODE;
394 		mode |= ATMEL_US_USMODE_RS485;
395 	}
396 
397 	/* set the RTS line state according to the mode */
398 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
399 		/* force RTS line to high level */
400 		rts_paused = ATMEL_US_RTSEN;
401 
402 		/* give the control of the RTS line back to the hardware */
403 		rts_ready = ATMEL_US_RTSDIS;
404 	} else {
405 		/* force RTS line to high level */
406 		rts_paused = ATMEL_US_RTSDIS;
407 
408 		/* force RTS line to low level */
409 		rts_ready = ATMEL_US_RTSEN;
410 	}
411 
412 	if (mctrl & TIOCM_RTS)
413 		control |= rts_ready;
414 	else
415 		control |= rts_paused;
416 
417 	if (mctrl & TIOCM_DTR)
418 		control |= ATMEL_US_DTREN;
419 	else
420 		control |= ATMEL_US_DTRDIS;
421 
422 	atmel_uart_writel(port, ATMEL_US_CR, control);
423 
424 	mctrl_gpio_set(atmel_port->gpios, mctrl);
425 
426 	/* Local loopback mode? */
427 	mode &= ~ATMEL_US_CHMODE;
428 	if (mctrl & TIOCM_LOOP)
429 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
430 	else
431 		mode |= ATMEL_US_CHMODE_NORMAL;
432 
433 	atmel_uart_writel(port, ATMEL_US_MR, mode);
434 }
435 
436 /*
437  * Get state of the modem control input lines
438  */
439 static u_int atmel_get_mctrl(struct uart_port *port)
440 {
441 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
442 	unsigned int ret = 0, status;
443 
444 	status = atmel_uart_readl(port, ATMEL_US_CSR);
445 
446 	/*
447 	 * The control signals are active low.
448 	 */
449 	if (!(status & ATMEL_US_DCD))
450 		ret |= TIOCM_CD;
451 	if (!(status & ATMEL_US_CTS))
452 		ret |= TIOCM_CTS;
453 	if (!(status & ATMEL_US_DSR))
454 		ret |= TIOCM_DSR;
455 	if (!(status & ATMEL_US_RI))
456 		ret |= TIOCM_RI;
457 
458 	return mctrl_gpio_get(atmel_port->gpios, &ret);
459 }
460 
461 /*
462  * Stop transmitting.
463  */
464 static void atmel_stop_tx(struct uart_port *port)
465 {
466 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
467 
468 	if (atmel_use_pdc_tx(port)) {
469 		/* disable PDC transmit */
470 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
471 	}
472 
473 	/*
474 	 * Disable the transmitter.
475 	 * This is mandatory when DMA is used, otherwise the DMA buffer
476 	 * is fully transmitted.
477 	 */
478 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
479 	atmel_port->tx_stopped = true;
480 
481 	/* Disable interrupts */
482 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
483 
484 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
485 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
486 		atmel_start_rx(port);
487 }
488 
489 /*
490  * Start transmitting.
491  */
492 static void atmel_start_tx(struct uart_port *port)
493 {
494 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
495 
496 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
497 				       & ATMEL_PDC_TXTEN))
498 		/* The transmitter is already running.  Yes, we
499 		   really need this.*/
500 		return;
501 
502 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
503 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
504 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
505 			atmel_stop_rx(port);
506 
507 	if (atmel_use_pdc_tx(port))
508 		/* re-enable PDC transmit */
509 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
510 
511 	/* Enable interrupts */
512 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
513 
514 	/* re-enable the transmitter */
515 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
516 	atmel_port->tx_stopped = false;
517 }
518 
519 /*
520  * start receiving - port is in process of being opened.
521  */
522 static void atmel_start_rx(struct uart_port *port)
523 {
524 	/* reset status and receiver */
525 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
526 
527 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
528 
529 	if (atmel_use_pdc_rx(port)) {
530 		/* enable PDC controller */
531 		atmel_uart_writel(port, ATMEL_US_IER,
532 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
533 				  port->read_status_mask);
534 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
535 	} else {
536 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
537 	}
538 }
539 
540 /*
541  * Stop receiving - port is in process of being closed.
542  */
543 static void atmel_stop_rx(struct uart_port *port)
544 {
545 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
546 
547 	if (atmel_use_pdc_rx(port)) {
548 		/* disable PDC receive */
549 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
550 		atmel_uart_writel(port, ATMEL_US_IDR,
551 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
552 				  port->read_status_mask);
553 	} else {
554 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
555 	}
556 }
557 
558 /*
559  * Enable modem status interrupts
560  */
561 static void atmel_enable_ms(struct uart_port *port)
562 {
563 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
564 	uint32_t ier = 0;
565 
566 	/*
567 	 * Interrupt should not be enabled twice
568 	 */
569 	if (atmel_port->ms_irq_enabled)
570 		return;
571 
572 	atmel_port->ms_irq_enabled = true;
573 
574 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
575 		ier |= ATMEL_US_CTSIC;
576 
577 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
578 		ier |= ATMEL_US_DSRIC;
579 
580 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
581 		ier |= ATMEL_US_RIIC;
582 
583 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
584 		ier |= ATMEL_US_DCDIC;
585 
586 	atmel_uart_writel(port, ATMEL_US_IER, ier);
587 
588 	mctrl_gpio_enable_ms(atmel_port->gpios);
589 }
590 
591 /*
592  * Disable modem status interrupts
593  */
594 static void atmel_disable_ms(struct uart_port *port)
595 {
596 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
597 	uint32_t idr = 0;
598 
599 	/*
600 	 * Interrupt should not be disabled twice
601 	 */
602 	if (!atmel_port->ms_irq_enabled)
603 		return;
604 
605 	atmel_port->ms_irq_enabled = false;
606 
607 	mctrl_gpio_disable_ms(atmel_port->gpios);
608 
609 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
610 		idr |= ATMEL_US_CTSIC;
611 
612 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
613 		idr |= ATMEL_US_DSRIC;
614 
615 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
616 		idr |= ATMEL_US_RIIC;
617 
618 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
619 		idr |= ATMEL_US_DCDIC;
620 
621 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
622 }
623 
624 /*
625  * Control the transmission of a break signal
626  */
627 static void atmel_break_ctl(struct uart_port *port, int break_state)
628 {
629 	if (break_state != 0)
630 		/* start break */
631 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
632 	else
633 		/* stop break */
634 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
635 }
636 
637 /*
638  * Stores the incoming character in the ring buffer
639  */
640 static void
641 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
642 		     unsigned int ch)
643 {
644 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
645 	struct circ_buf *ring = &atmel_port->rx_ring;
646 	struct atmel_uart_char *c;
647 
648 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
649 		/* Buffer overflow, ignore char */
650 		return;
651 
652 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
653 	c->status	= status;
654 	c->ch		= ch;
655 
656 	/* Make sure the character is stored before we update head. */
657 	smp_wmb();
658 
659 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
660 }
661 
662 /*
663  * Deal with parity, framing and overrun errors.
664  */
665 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
666 {
667 	/* clear error */
668 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
669 
670 	if (status & ATMEL_US_RXBRK) {
671 		/* ignore side-effect */
672 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
673 		port->icount.brk++;
674 	}
675 	if (status & ATMEL_US_PARE)
676 		port->icount.parity++;
677 	if (status & ATMEL_US_FRAME)
678 		port->icount.frame++;
679 	if (status & ATMEL_US_OVRE)
680 		port->icount.overrun++;
681 }
682 
683 /*
684  * Characters received (called from interrupt handler)
685  */
686 static void atmel_rx_chars(struct uart_port *port)
687 {
688 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
689 	unsigned int status, ch;
690 
691 	status = atmel_uart_readl(port, ATMEL_US_CSR);
692 	while (status & ATMEL_US_RXRDY) {
693 		ch = atmel_uart_read_char(port);
694 
695 		/*
696 		 * note that the error handling code is
697 		 * out of the main execution path
698 		 */
699 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
700 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
701 			     || atmel_port->break_active)) {
702 
703 			/* clear error */
704 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
705 
706 			if (status & ATMEL_US_RXBRK
707 			    && !atmel_port->break_active) {
708 				atmel_port->break_active = 1;
709 				atmel_uart_writel(port, ATMEL_US_IER,
710 						  ATMEL_US_RXBRK);
711 			} else {
712 				/*
713 				 * This is either the end-of-break
714 				 * condition or we've received at
715 				 * least one character without RXBRK
716 				 * being set. In both cases, the next
717 				 * RXBRK will indicate start-of-break.
718 				 */
719 				atmel_uart_writel(port, ATMEL_US_IDR,
720 						  ATMEL_US_RXBRK);
721 				status &= ~ATMEL_US_RXBRK;
722 				atmel_port->break_active = 0;
723 			}
724 		}
725 
726 		atmel_buffer_rx_char(port, status, ch);
727 		status = atmel_uart_readl(port, ATMEL_US_CSR);
728 	}
729 
730 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
731 }
732 
733 /*
734  * Transmit characters (called from tasklet with TXRDY interrupt
735  * disabled)
736  */
737 static void atmel_tx_chars(struct uart_port *port)
738 {
739 	struct circ_buf *xmit = &port->state->xmit;
740 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
741 
742 	if (port->x_char &&
743 	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
744 		atmel_uart_write_char(port, port->x_char);
745 		port->icount.tx++;
746 		port->x_char = 0;
747 	}
748 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
749 		return;
750 
751 	while (atmel_uart_readl(port, ATMEL_US_CSR) &
752 	       atmel_port->tx_done_mask) {
753 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
754 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
755 		port->icount.tx++;
756 		if (uart_circ_empty(xmit))
757 			break;
758 	}
759 
760 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
761 		uart_write_wakeup(port);
762 
763 	if (!uart_circ_empty(xmit))
764 		/* Enable interrupts */
765 		atmel_uart_writel(port, ATMEL_US_IER,
766 				  atmel_port->tx_done_mask);
767 }
768 
769 static void atmel_complete_tx_dma(void *arg)
770 {
771 	struct atmel_uart_port *atmel_port = arg;
772 	struct uart_port *port = &atmel_port->uart;
773 	struct circ_buf *xmit = &port->state->xmit;
774 	struct dma_chan *chan = atmel_port->chan_tx;
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&port->lock, flags);
778 
779 	if (chan)
780 		dmaengine_terminate_all(chan);
781 	xmit->tail += atmel_port->tx_len;
782 	xmit->tail &= UART_XMIT_SIZE - 1;
783 
784 	port->icount.tx += atmel_port->tx_len;
785 
786 	spin_lock_irq(&atmel_port->lock_tx);
787 	async_tx_ack(atmel_port->desc_tx);
788 	atmel_port->cookie_tx = -EINVAL;
789 	atmel_port->desc_tx = NULL;
790 	spin_unlock_irq(&atmel_port->lock_tx);
791 
792 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
793 		uart_write_wakeup(port);
794 
795 	/*
796 	 * xmit is a circular buffer so, if we have just send data from
797 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
798 	 * remaining data from the beginning of xmit->buf to xmit->head.
799 	 */
800 	if (!uart_circ_empty(xmit))
801 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
802 	else if ((port->rs485.flags & SER_RS485_ENABLED) &&
803 		 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
804 		/* DMA done, stop TX, start RX for RS485 */
805 		atmel_start_rx(port);
806 	}
807 
808 	spin_unlock_irqrestore(&port->lock, flags);
809 }
810 
811 static void atmel_release_tx_dma(struct uart_port *port)
812 {
813 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
814 	struct dma_chan *chan = atmel_port->chan_tx;
815 
816 	if (chan) {
817 		dmaengine_terminate_all(chan);
818 		dma_release_channel(chan);
819 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
820 				DMA_TO_DEVICE);
821 	}
822 
823 	atmel_port->desc_tx = NULL;
824 	atmel_port->chan_tx = NULL;
825 	atmel_port->cookie_tx = -EINVAL;
826 }
827 
828 /*
829  * Called from tasklet with TXRDY interrupt is disabled.
830  */
831 static void atmel_tx_dma(struct uart_port *port)
832 {
833 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
834 	struct circ_buf *xmit = &port->state->xmit;
835 	struct dma_chan *chan = atmel_port->chan_tx;
836 	struct dma_async_tx_descriptor *desc;
837 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
838 	unsigned int tx_len, part1_len, part2_len, sg_len;
839 	dma_addr_t phys_addr;
840 
841 	/* Make sure we have an idle channel */
842 	if (atmel_port->desc_tx != NULL)
843 		return;
844 
845 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
846 		/*
847 		 * DMA is idle now.
848 		 * Port xmit buffer is already mapped,
849 		 * and it is one page... Just adjust
850 		 * offsets and lengths. Since it is a circular buffer,
851 		 * we have to transmit till the end, and then the rest.
852 		 * Take the port lock to get a
853 		 * consistent xmit buffer state.
854 		 */
855 		tx_len = CIRC_CNT_TO_END(xmit->head,
856 					 xmit->tail,
857 					 UART_XMIT_SIZE);
858 
859 		if (atmel_port->fifo_size) {
860 			/* multi data mode */
861 			part1_len = (tx_len & ~0x3); /* DWORD access */
862 			part2_len = (tx_len & 0x3); /* BYTE access */
863 		} else {
864 			/* single data (legacy) mode */
865 			part1_len = 0;
866 			part2_len = tx_len; /* BYTE access only */
867 		}
868 
869 		sg_init_table(sgl, 2);
870 		sg_len = 0;
871 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
872 		if (part1_len) {
873 			sg = &sgl[sg_len++];
874 			sg_dma_address(sg) = phys_addr;
875 			sg_dma_len(sg) = part1_len;
876 
877 			phys_addr += part1_len;
878 		}
879 
880 		if (part2_len) {
881 			sg = &sgl[sg_len++];
882 			sg_dma_address(sg) = phys_addr;
883 			sg_dma_len(sg) = part2_len;
884 		}
885 
886 		/*
887 		 * save tx_len so atmel_complete_tx_dma() will increase
888 		 * xmit->tail correctly
889 		 */
890 		atmel_port->tx_len = tx_len;
891 
892 		desc = dmaengine_prep_slave_sg(chan,
893 					       sgl,
894 					       sg_len,
895 					       DMA_MEM_TO_DEV,
896 					       DMA_PREP_INTERRUPT |
897 					       DMA_CTRL_ACK);
898 		if (!desc) {
899 			dev_err(port->dev, "Failed to send via dma!\n");
900 			return;
901 		}
902 
903 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
904 
905 		atmel_port->desc_tx = desc;
906 		desc->callback = atmel_complete_tx_dma;
907 		desc->callback_param = atmel_port;
908 		atmel_port->cookie_tx = dmaengine_submit(desc);
909 	}
910 
911 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
912 		uart_write_wakeup(port);
913 }
914 
915 static int atmel_prepare_tx_dma(struct uart_port *port)
916 {
917 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
918 	dma_cap_mask_t		mask;
919 	struct dma_slave_config config;
920 	int ret, nent;
921 
922 	dma_cap_zero(mask);
923 	dma_cap_set(DMA_SLAVE, mask);
924 
925 	atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
926 	if (atmel_port->chan_tx == NULL)
927 		goto chan_err;
928 	dev_info(port->dev, "using %s for tx DMA transfers\n",
929 		dma_chan_name(atmel_port->chan_tx));
930 
931 	spin_lock_init(&atmel_port->lock_tx);
932 	sg_init_table(&atmel_port->sg_tx, 1);
933 	/* UART circular tx buffer is an aligned page. */
934 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
935 	sg_set_page(&atmel_port->sg_tx,
936 			virt_to_page(port->state->xmit.buf),
937 			UART_XMIT_SIZE,
938 			offset_in_page(port->state->xmit.buf));
939 	nent = dma_map_sg(port->dev,
940 				&atmel_port->sg_tx,
941 				1,
942 				DMA_TO_DEVICE);
943 
944 	if (!nent) {
945 		dev_dbg(port->dev, "need to release resource of dma\n");
946 		goto chan_err;
947 	} else {
948 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
949 			sg_dma_len(&atmel_port->sg_tx),
950 			port->state->xmit.buf,
951 			&sg_dma_address(&atmel_port->sg_tx));
952 	}
953 
954 	/* Configure the slave DMA */
955 	memset(&config, 0, sizeof(config));
956 	config.direction = DMA_MEM_TO_DEV;
957 	config.dst_addr_width = (atmel_port->fifo_size) ?
958 				DMA_SLAVE_BUSWIDTH_4_BYTES :
959 				DMA_SLAVE_BUSWIDTH_1_BYTE;
960 	config.dst_addr = port->mapbase + ATMEL_US_THR;
961 	config.dst_maxburst = 1;
962 
963 	ret = dmaengine_slave_config(atmel_port->chan_tx,
964 				     &config);
965 	if (ret) {
966 		dev_err(port->dev, "DMA tx slave configuration failed\n");
967 		goto chan_err;
968 	}
969 
970 	return 0;
971 
972 chan_err:
973 	dev_err(port->dev, "TX channel not available, switch to pio\n");
974 	atmel_port->use_dma_tx = 0;
975 	if (atmel_port->chan_tx)
976 		atmel_release_tx_dma(port);
977 	return -EINVAL;
978 }
979 
980 static void atmel_complete_rx_dma(void *arg)
981 {
982 	struct uart_port *port = arg;
983 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
984 
985 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
986 }
987 
988 static void atmel_release_rx_dma(struct uart_port *port)
989 {
990 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
991 	struct dma_chan *chan = atmel_port->chan_rx;
992 
993 	if (chan) {
994 		dmaengine_terminate_all(chan);
995 		dma_release_channel(chan);
996 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
997 				DMA_FROM_DEVICE);
998 	}
999 
1000 	atmel_port->desc_rx = NULL;
1001 	atmel_port->chan_rx = NULL;
1002 	atmel_port->cookie_rx = -EINVAL;
1003 }
1004 
1005 static void atmel_rx_from_dma(struct uart_port *port)
1006 {
1007 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1008 	struct tty_port *tport = &port->state->port;
1009 	struct circ_buf *ring = &atmel_port->rx_ring;
1010 	struct dma_chan *chan = atmel_port->chan_rx;
1011 	struct dma_tx_state state;
1012 	enum dma_status dmastat;
1013 	size_t count;
1014 
1015 
1016 	/* Reset the UART timeout early so that we don't miss one */
1017 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1018 	dmastat = dmaengine_tx_status(chan,
1019 				atmel_port->cookie_rx,
1020 				&state);
1021 	/* Restart a new tasklet if DMA status is error */
1022 	if (dmastat == DMA_ERROR) {
1023 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1024 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1025 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1026 		return;
1027 	}
1028 
1029 	/* CPU claims ownership of RX DMA buffer */
1030 	dma_sync_sg_for_cpu(port->dev,
1031 			    &atmel_port->sg_rx,
1032 			    1,
1033 			    DMA_FROM_DEVICE);
1034 
1035 	/*
1036 	 * ring->head points to the end of data already written by the DMA.
1037 	 * ring->tail points to the beginning of data to be read by the
1038 	 * framework.
1039 	 * The current transfer size should not be larger than the dma buffer
1040 	 * length.
1041 	 */
1042 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1043 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1044 	/*
1045 	 * At this point ring->head may point to the first byte right after the
1046 	 * last byte of the dma buffer:
1047 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1048 	 *
1049 	 * However ring->tail must always points inside the dma buffer:
1050 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1051 	 *
1052 	 * Since we use a ring buffer, we have to handle the case
1053 	 * where head is lower than tail. In such a case, we first read from
1054 	 * tail to the end of the buffer then reset tail.
1055 	 */
1056 	if (ring->head < ring->tail) {
1057 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1058 
1059 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1060 		ring->tail = 0;
1061 		port->icount.rx += count;
1062 	}
1063 
1064 	/* Finally we read data from tail to head */
1065 	if (ring->tail < ring->head) {
1066 		count = ring->head - ring->tail;
1067 
1068 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1069 		/* Wrap ring->head if needed */
1070 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1071 			ring->head = 0;
1072 		ring->tail = ring->head;
1073 		port->icount.rx += count;
1074 	}
1075 
1076 	/* USART retreives ownership of RX DMA buffer */
1077 	dma_sync_sg_for_device(port->dev,
1078 			       &atmel_port->sg_rx,
1079 			       1,
1080 			       DMA_FROM_DEVICE);
1081 
1082 	/*
1083 	 * Drop the lock here since it might end up calling
1084 	 * uart_start(), which takes the lock.
1085 	 */
1086 	spin_unlock(&port->lock);
1087 	tty_flip_buffer_push(tport);
1088 	spin_lock(&port->lock);
1089 
1090 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1091 }
1092 
1093 static int atmel_prepare_rx_dma(struct uart_port *port)
1094 {
1095 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1096 	struct dma_async_tx_descriptor *desc;
1097 	dma_cap_mask_t		mask;
1098 	struct dma_slave_config config;
1099 	struct circ_buf		*ring;
1100 	int ret, nent;
1101 
1102 	ring = &atmel_port->rx_ring;
1103 
1104 	dma_cap_zero(mask);
1105 	dma_cap_set(DMA_CYCLIC, mask);
1106 
1107 	atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1108 	if (atmel_port->chan_rx == NULL)
1109 		goto chan_err;
1110 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1111 		dma_chan_name(atmel_port->chan_rx));
1112 
1113 	spin_lock_init(&atmel_port->lock_rx);
1114 	sg_init_table(&atmel_port->sg_rx, 1);
1115 	/* UART circular rx buffer is an aligned page. */
1116 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1117 	sg_set_page(&atmel_port->sg_rx,
1118 		    virt_to_page(ring->buf),
1119 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1120 		    offset_in_page(ring->buf));
1121 	nent = dma_map_sg(port->dev,
1122 			  &atmel_port->sg_rx,
1123 			  1,
1124 			  DMA_FROM_DEVICE);
1125 
1126 	if (!nent) {
1127 		dev_dbg(port->dev, "need to release resource of dma\n");
1128 		goto chan_err;
1129 	} else {
1130 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1131 			sg_dma_len(&atmel_port->sg_rx),
1132 			ring->buf,
1133 			&sg_dma_address(&atmel_port->sg_rx));
1134 	}
1135 
1136 	/* Configure the slave DMA */
1137 	memset(&config, 0, sizeof(config));
1138 	config.direction = DMA_DEV_TO_MEM;
1139 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1140 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1141 	config.src_maxburst = 1;
1142 
1143 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1144 				     &config);
1145 	if (ret) {
1146 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1147 		goto chan_err;
1148 	}
1149 	/*
1150 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1151 	 * each one is half ring buffer size
1152 	 */
1153 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1154 					 sg_dma_address(&atmel_port->sg_rx),
1155 					 sg_dma_len(&atmel_port->sg_rx),
1156 					 sg_dma_len(&atmel_port->sg_rx)/2,
1157 					 DMA_DEV_TO_MEM,
1158 					 DMA_PREP_INTERRUPT);
1159 	desc->callback = atmel_complete_rx_dma;
1160 	desc->callback_param = port;
1161 	atmel_port->desc_rx = desc;
1162 	atmel_port->cookie_rx = dmaengine_submit(desc);
1163 
1164 	return 0;
1165 
1166 chan_err:
1167 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1168 	atmel_port->use_dma_rx = 0;
1169 	if (atmel_port->chan_rx)
1170 		atmel_release_rx_dma(port);
1171 	return -EINVAL;
1172 }
1173 
1174 static void atmel_uart_timer_callback(struct timer_list *t)
1175 {
1176 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1177 							uart_timer);
1178 	struct uart_port *port = &atmel_port->uart;
1179 
1180 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1181 		tasklet_schedule(&atmel_port->tasklet_rx);
1182 		mod_timer(&atmel_port->uart_timer,
1183 			  jiffies + uart_poll_timeout(port));
1184 	}
1185 }
1186 
1187 /*
1188  * receive interrupt handler.
1189  */
1190 static void
1191 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1192 {
1193 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1194 
1195 	if (atmel_use_pdc_rx(port)) {
1196 		/*
1197 		 * PDC receive. Just schedule the tasklet and let it
1198 		 * figure out the details.
1199 		 *
1200 		 * TODO: We're not handling error flags correctly at
1201 		 * the moment.
1202 		 */
1203 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1204 			atmel_uart_writel(port, ATMEL_US_IDR,
1205 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1206 			atmel_tasklet_schedule(atmel_port,
1207 					       &atmel_port->tasklet_rx);
1208 		}
1209 
1210 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1211 				ATMEL_US_FRAME | ATMEL_US_PARE))
1212 			atmel_pdc_rxerr(port, pending);
1213 	}
1214 
1215 	if (atmel_use_dma_rx(port)) {
1216 		if (pending & ATMEL_US_TIMEOUT) {
1217 			atmel_uart_writel(port, ATMEL_US_IDR,
1218 					  ATMEL_US_TIMEOUT);
1219 			atmel_tasklet_schedule(atmel_port,
1220 					       &atmel_port->tasklet_rx);
1221 		}
1222 	}
1223 
1224 	/* Interrupt receive */
1225 	if (pending & ATMEL_US_RXRDY)
1226 		atmel_rx_chars(port);
1227 	else if (pending & ATMEL_US_RXBRK) {
1228 		/*
1229 		 * End of break detected. If it came along with a
1230 		 * character, atmel_rx_chars will handle it.
1231 		 */
1232 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1233 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1234 		atmel_port->break_active = 0;
1235 	}
1236 }
1237 
1238 /*
1239  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1240  */
1241 static void
1242 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1243 {
1244 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1245 
1246 	if (pending & atmel_port->tx_done_mask) {
1247 		/* Either PDC or interrupt transmission */
1248 		atmel_uart_writel(port, ATMEL_US_IDR,
1249 				  atmel_port->tx_done_mask);
1250 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1251 	}
1252 }
1253 
1254 /*
1255  * status flags interrupt handler.
1256  */
1257 static void
1258 atmel_handle_status(struct uart_port *port, unsigned int pending,
1259 		    unsigned int status)
1260 {
1261 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1262 	unsigned int status_change;
1263 
1264 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1265 				| ATMEL_US_CTSIC)) {
1266 		status_change = status ^ atmel_port->irq_status_prev;
1267 		atmel_port->irq_status_prev = status;
1268 
1269 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1270 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1271 			/* TODO: All reads to CSR will clear these interrupts! */
1272 			if (status_change & ATMEL_US_RI)
1273 				port->icount.rng++;
1274 			if (status_change & ATMEL_US_DSR)
1275 				port->icount.dsr++;
1276 			if (status_change & ATMEL_US_DCD)
1277 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1278 			if (status_change & ATMEL_US_CTS)
1279 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1280 
1281 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1282 		}
1283 	}
1284 }
1285 
1286 /*
1287  * Interrupt handler
1288  */
1289 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1290 {
1291 	struct uart_port *port = dev_id;
1292 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1293 	unsigned int status, pending, mask, pass_counter = 0;
1294 
1295 	spin_lock(&atmel_port->lock_suspended);
1296 
1297 	do {
1298 		status = atmel_get_lines_status(port);
1299 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1300 		pending = status & mask;
1301 		if (!pending)
1302 			break;
1303 
1304 		if (atmel_port->suspended) {
1305 			atmel_port->pending |= pending;
1306 			atmel_port->pending_status = status;
1307 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1308 			pm_system_wakeup();
1309 			break;
1310 		}
1311 
1312 		atmel_handle_receive(port, pending);
1313 		atmel_handle_status(port, pending, status);
1314 		atmel_handle_transmit(port, pending);
1315 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1316 
1317 	spin_unlock(&atmel_port->lock_suspended);
1318 
1319 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1320 }
1321 
1322 static void atmel_release_tx_pdc(struct uart_port *port)
1323 {
1324 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1325 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1326 
1327 	dma_unmap_single(port->dev,
1328 			 pdc->dma_addr,
1329 			 pdc->dma_size,
1330 			 DMA_TO_DEVICE);
1331 }
1332 
1333 /*
1334  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1335  */
1336 static void atmel_tx_pdc(struct uart_port *port)
1337 {
1338 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1339 	struct circ_buf *xmit = &port->state->xmit;
1340 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1341 	int count;
1342 
1343 	/* nothing left to transmit? */
1344 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1345 		return;
1346 
1347 	xmit->tail += pdc->ofs;
1348 	xmit->tail &= UART_XMIT_SIZE - 1;
1349 
1350 	port->icount.tx += pdc->ofs;
1351 	pdc->ofs = 0;
1352 
1353 	/* more to transmit - setup next transfer */
1354 
1355 	/* disable PDC transmit */
1356 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1357 
1358 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1359 		dma_sync_single_for_device(port->dev,
1360 					   pdc->dma_addr,
1361 					   pdc->dma_size,
1362 					   DMA_TO_DEVICE);
1363 
1364 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1365 		pdc->ofs = count;
1366 
1367 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1368 				  pdc->dma_addr + xmit->tail);
1369 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1370 		/* re-enable PDC transmit */
1371 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1372 		/* Enable interrupts */
1373 		atmel_uart_writel(port, ATMEL_US_IER,
1374 				  atmel_port->tx_done_mask);
1375 	} else {
1376 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
1377 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1378 			/* DMA done, stop TX, start RX for RS485 */
1379 			atmel_start_rx(port);
1380 		}
1381 	}
1382 
1383 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1384 		uart_write_wakeup(port);
1385 }
1386 
1387 static int atmel_prepare_tx_pdc(struct uart_port *port)
1388 {
1389 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1390 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1391 	struct circ_buf *xmit = &port->state->xmit;
1392 
1393 	pdc->buf = xmit->buf;
1394 	pdc->dma_addr = dma_map_single(port->dev,
1395 					pdc->buf,
1396 					UART_XMIT_SIZE,
1397 					DMA_TO_DEVICE);
1398 	pdc->dma_size = UART_XMIT_SIZE;
1399 	pdc->ofs = 0;
1400 
1401 	return 0;
1402 }
1403 
1404 static void atmel_rx_from_ring(struct uart_port *port)
1405 {
1406 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1407 	struct circ_buf *ring = &atmel_port->rx_ring;
1408 	unsigned int flg;
1409 	unsigned int status;
1410 
1411 	while (ring->head != ring->tail) {
1412 		struct atmel_uart_char c;
1413 
1414 		/* Make sure c is loaded after head. */
1415 		smp_rmb();
1416 
1417 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1418 
1419 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1420 
1421 		port->icount.rx++;
1422 		status = c.status;
1423 		flg = TTY_NORMAL;
1424 
1425 		/*
1426 		 * note that the error handling code is
1427 		 * out of the main execution path
1428 		 */
1429 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1430 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1431 			if (status & ATMEL_US_RXBRK) {
1432 				/* ignore side-effect */
1433 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1434 
1435 				port->icount.brk++;
1436 				if (uart_handle_break(port))
1437 					continue;
1438 			}
1439 			if (status & ATMEL_US_PARE)
1440 				port->icount.parity++;
1441 			if (status & ATMEL_US_FRAME)
1442 				port->icount.frame++;
1443 			if (status & ATMEL_US_OVRE)
1444 				port->icount.overrun++;
1445 
1446 			status &= port->read_status_mask;
1447 
1448 			if (status & ATMEL_US_RXBRK)
1449 				flg = TTY_BREAK;
1450 			else if (status & ATMEL_US_PARE)
1451 				flg = TTY_PARITY;
1452 			else if (status & ATMEL_US_FRAME)
1453 				flg = TTY_FRAME;
1454 		}
1455 
1456 
1457 		if (uart_handle_sysrq_char(port, c.ch))
1458 			continue;
1459 
1460 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1461 	}
1462 
1463 	/*
1464 	 * Drop the lock here since it might end up calling
1465 	 * uart_start(), which takes the lock.
1466 	 */
1467 	spin_unlock(&port->lock);
1468 	tty_flip_buffer_push(&port->state->port);
1469 	spin_lock(&port->lock);
1470 }
1471 
1472 static void atmel_release_rx_pdc(struct uart_port *port)
1473 {
1474 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1475 	int i;
1476 
1477 	for (i = 0; i < 2; i++) {
1478 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1479 
1480 		dma_unmap_single(port->dev,
1481 				 pdc->dma_addr,
1482 				 pdc->dma_size,
1483 				 DMA_FROM_DEVICE);
1484 		kfree(pdc->buf);
1485 	}
1486 }
1487 
1488 static void atmel_rx_from_pdc(struct uart_port *port)
1489 {
1490 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1491 	struct tty_port *tport = &port->state->port;
1492 	struct atmel_dma_buffer *pdc;
1493 	int rx_idx = atmel_port->pdc_rx_idx;
1494 	unsigned int head;
1495 	unsigned int tail;
1496 	unsigned int count;
1497 
1498 	do {
1499 		/* Reset the UART timeout early so that we don't miss one */
1500 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1501 
1502 		pdc = &atmel_port->pdc_rx[rx_idx];
1503 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1504 		tail = pdc->ofs;
1505 
1506 		/* If the PDC has switched buffers, RPR won't contain
1507 		 * any address within the current buffer. Since head
1508 		 * is unsigned, we just need a one-way comparison to
1509 		 * find out.
1510 		 *
1511 		 * In this case, we just need to consume the entire
1512 		 * buffer and resubmit it for DMA. This will clear the
1513 		 * ENDRX bit as well, so that we can safely re-enable
1514 		 * all interrupts below.
1515 		 */
1516 		head = min(head, pdc->dma_size);
1517 
1518 		if (likely(head != tail)) {
1519 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1520 					pdc->dma_size, DMA_FROM_DEVICE);
1521 
1522 			/*
1523 			 * head will only wrap around when we recycle
1524 			 * the DMA buffer, and when that happens, we
1525 			 * explicitly set tail to 0. So head will
1526 			 * always be greater than tail.
1527 			 */
1528 			count = head - tail;
1529 
1530 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1531 						count);
1532 
1533 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1534 					pdc->dma_size, DMA_FROM_DEVICE);
1535 
1536 			port->icount.rx += count;
1537 			pdc->ofs = head;
1538 		}
1539 
1540 		/*
1541 		 * If the current buffer is full, we need to check if
1542 		 * the next one contains any additional data.
1543 		 */
1544 		if (head >= pdc->dma_size) {
1545 			pdc->ofs = 0;
1546 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1547 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1548 
1549 			rx_idx = !rx_idx;
1550 			atmel_port->pdc_rx_idx = rx_idx;
1551 		}
1552 	} while (head >= pdc->dma_size);
1553 
1554 	/*
1555 	 * Drop the lock here since it might end up calling
1556 	 * uart_start(), which takes the lock.
1557 	 */
1558 	spin_unlock(&port->lock);
1559 	tty_flip_buffer_push(tport);
1560 	spin_lock(&port->lock);
1561 
1562 	atmel_uart_writel(port, ATMEL_US_IER,
1563 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1564 }
1565 
1566 static int atmel_prepare_rx_pdc(struct uart_port *port)
1567 {
1568 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1569 	int i;
1570 
1571 	for (i = 0; i < 2; i++) {
1572 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1573 
1574 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1575 		if (pdc->buf == NULL) {
1576 			if (i != 0) {
1577 				dma_unmap_single(port->dev,
1578 					atmel_port->pdc_rx[0].dma_addr,
1579 					PDC_BUFFER_SIZE,
1580 					DMA_FROM_DEVICE);
1581 				kfree(atmel_port->pdc_rx[0].buf);
1582 			}
1583 			atmel_port->use_pdc_rx = 0;
1584 			return -ENOMEM;
1585 		}
1586 		pdc->dma_addr = dma_map_single(port->dev,
1587 						pdc->buf,
1588 						PDC_BUFFER_SIZE,
1589 						DMA_FROM_DEVICE);
1590 		pdc->dma_size = PDC_BUFFER_SIZE;
1591 		pdc->ofs = 0;
1592 	}
1593 
1594 	atmel_port->pdc_rx_idx = 0;
1595 
1596 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1597 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1598 
1599 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1600 			  atmel_port->pdc_rx[1].dma_addr);
1601 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1602 
1603 	return 0;
1604 }
1605 
1606 /*
1607  * tasklet handling tty stuff outside the interrupt handler.
1608  */
1609 static void atmel_tasklet_rx_func(unsigned long data)
1610 {
1611 	struct uart_port *port = (struct uart_port *)data;
1612 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1613 
1614 	/* The interrupt handler does not take the lock */
1615 	spin_lock(&port->lock);
1616 	atmel_port->schedule_rx(port);
1617 	spin_unlock(&port->lock);
1618 }
1619 
1620 static void atmel_tasklet_tx_func(unsigned long data)
1621 {
1622 	struct uart_port *port = (struct uart_port *)data;
1623 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1624 
1625 	/* The interrupt handler does not take the lock */
1626 	spin_lock(&port->lock);
1627 	atmel_port->schedule_tx(port);
1628 	spin_unlock(&port->lock);
1629 }
1630 
1631 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1632 				struct platform_device *pdev)
1633 {
1634 	struct device_node *np = pdev->dev.of_node;
1635 
1636 	/* DMA/PDC usage specification */
1637 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1638 		if (of_property_read_bool(np, "dmas")) {
1639 			atmel_port->use_dma_rx  = true;
1640 			atmel_port->use_pdc_rx  = false;
1641 		} else {
1642 			atmel_port->use_dma_rx  = false;
1643 			atmel_port->use_pdc_rx  = true;
1644 		}
1645 	} else {
1646 		atmel_port->use_dma_rx  = false;
1647 		atmel_port->use_pdc_rx  = false;
1648 	}
1649 
1650 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1651 		if (of_property_read_bool(np, "dmas")) {
1652 			atmel_port->use_dma_tx  = true;
1653 			atmel_port->use_pdc_tx  = false;
1654 		} else {
1655 			atmel_port->use_dma_tx  = false;
1656 			atmel_port->use_pdc_tx  = true;
1657 		}
1658 	} else {
1659 		atmel_port->use_dma_tx  = false;
1660 		atmel_port->use_pdc_tx  = false;
1661 	}
1662 }
1663 
1664 static void atmel_set_ops(struct uart_port *port)
1665 {
1666 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1667 
1668 	if (atmel_use_dma_rx(port)) {
1669 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1670 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1671 		atmel_port->release_rx = &atmel_release_rx_dma;
1672 	} else if (atmel_use_pdc_rx(port)) {
1673 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1674 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1675 		atmel_port->release_rx = &atmel_release_rx_pdc;
1676 	} else {
1677 		atmel_port->prepare_rx = NULL;
1678 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1679 		atmel_port->release_rx = NULL;
1680 	}
1681 
1682 	if (atmel_use_dma_tx(port)) {
1683 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1684 		atmel_port->schedule_tx = &atmel_tx_dma;
1685 		atmel_port->release_tx = &atmel_release_tx_dma;
1686 	} else if (atmel_use_pdc_tx(port)) {
1687 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1688 		atmel_port->schedule_tx = &atmel_tx_pdc;
1689 		atmel_port->release_tx = &atmel_release_tx_pdc;
1690 	} else {
1691 		atmel_port->prepare_tx = NULL;
1692 		atmel_port->schedule_tx = &atmel_tx_chars;
1693 		atmel_port->release_tx = NULL;
1694 	}
1695 }
1696 
1697 /*
1698  * Get ip name usart or uart
1699  */
1700 static void atmel_get_ip_name(struct uart_port *port)
1701 {
1702 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1703 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1704 	u32 version;
1705 	u32 usart, dbgu_uart, new_uart;
1706 	/* ASCII decoding for IP version */
1707 	usart = 0x55534152;	/* USAR(T) */
1708 	dbgu_uart = 0x44424755;	/* DBGU */
1709 	new_uart = 0x55415254;	/* UART */
1710 
1711 	/*
1712 	 * Only USART devices from at91sam9260 SOC implement fractional
1713 	 * baudrate. It is available for all asynchronous modes, with the
1714 	 * following restriction: the sampling clock's duty cycle is not
1715 	 * constant.
1716 	 */
1717 	atmel_port->has_frac_baudrate = false;
1718 	atmel_port->has_hw_timer = false;
1719 
1720 	if (name == new_uart) {
1721 		dev_dbg(port->dev, "Uart with hw timer");
1722 		atmel_port->has_hw_timer = true;
1723 		atmel_port->rtor = ATMEL_UA_RTOR;
1724 	} else if (name == usart) {
1725 		dev_dbg(port->dev, "Usart\n");
1726 		atmel_port->has_frac_baudrate = true;
1727 		atmel_port->has_hw_timer = true;
1728 		atmel_port->rtor = ATMEL_US_RTOR;
1729 	} else if (name == dbgu_uart) {
1730 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1731 	} else {
1732 		/* fallback for older SoCs: use version field */
1733 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1734 		switch (version) {
1735 		case 0x302:
1736 		case 0x10213:
1737 		case 0x10302:
1738 			dev_dbg(port->dev, "This version is usart\n");
1739 			atmel_port->has_frac_baudrate = true;
1740 			atmel_port->has_hw_timer = true;
1741 			atmel_port->rtor = ATMEL_US_RTOR;
1742 			break;
1743 		case 0x203:
1744 		case 0x10202:
1745 			dev_dbg(port->dev, "This version is uart\n");
1746 			break;
1747 		default:
1748 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1749 		}
1750 	}
1751 }
1752 
1753 /*
1754  * Perform initialization and enable port for reception
1755  */
1756 static int atmel_startup(struct uart_port *port)
1757 {
1758 	struct platform_device *pdev = to_platform_device(port->dev);
1759 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1760 	int retval;
1761 
1762 	/*
1763 	 * Ensure that no interrupts are enabled otherwise when
1764 	 * request_irq() is called we could get stuck trying to
1765 	 * handle an unexpected interrupt
1766 	 */
1767 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1768 	atmel_port->ms_irq_enabled = false;
1769 
1770 	/*
1771 	 * Allocate the IRQ
1772 	 */
1773 	retval = request_irq(port->irq, atmel_interrupt,
1774 			     IRQF_SHARED | IRQF_COND_SUSPEND,
1775 			     dev_name(&pdev->dev), port);
1776 	if (retval) {
1777 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1778 		return retval;
1779 	}
1780 
1781 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1782 	tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
1783 			(unsigned long)port);
1784 	tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
1785 			(unsigned long)port);
1786 
1787 	/*
1788 	 * Initialize DMA (if necessary)
1789 	 */
1790 	atmel_init_property(atmel_port, pdev);
1791 	atmel_set_ops(port);
1792 
1793 	if (atmel_port->prepare_rx) {
1794 		retval = atmel_port->prepare_rx(port);
1795 		if (retval < 0)
1796 			atmel_set_ops(port);
1797 	}
1798 
1799 	if (atmel_port->prepare_tx) {
1800 		retval = atmel_port->prepare_tx(port);
1801 		if (retval < 0)
1802 			atmel_set_ops(port);
1803 	}
1804 
1805 	/*
1806 	 * Enable FIFO when available
1807 	 */
1808 	if (atmel_port->fifo_size) {
1809 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1810 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1811 		unsigned int fmr;
1812 
1813 		atmel_uart_writel(port, ATMEL_US_CR,
1814 				  ATMEL_US_FIFOEN |
1815 				  ATMEL_US_RXFCLR |
1816 				  ATMEL_US_TXFLCLR);
1817 
1818 		if (atmel_use_dma_tx(port))
1819 			txrdym = ATMEL_US_FOUR_DATA;
1820 
1821 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1822 		if (atmel_port->rts_high &&
1823 		    atmel_port->rts_low)
1824 			fmr |=	ATMEL_US_FRTSC |
1825 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1826 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1827 
1828 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1829 	}
1830 
1831 	/* Save current CSR for comparison in atmel_tasklet_func() */
1832 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
1833 
1834 	/*
1835 	 * Finally, enable the serial port
1836 	 */
1837 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1838 	/* enable xmit & rcvr */
1839 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1840 	atmel_port->tx_stopped = false;
1841 
1842 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1843 
1844 	if (atmel_use_pdc_rx(port)) {
1845 		/* set UART timeout */
1846 		if (!atmel_port->has_hw_timer) {
1847 			mod_timer(&atmel_port->uart_timer,
1848 					jiffies + uart_poll_timeout(port));
1849 		/* set USART timeout */
1850 		} else {
1851 			atmel_uart_writel(port, atmel_port->rtor,
1852 					  PDC_RX_TIMEOUT);
1853 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1854 
1855 			atmel_uart_writel(port, ATMEL_US_IER,
1856 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1857 		}
1858 		/* enable PDC controller */
1859 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1860 	} else if (atmel_use_dma_rx(port)) {
1861 		/* set UART timeout */
1862 		if (!atmel_port->has_hw_timer) {
1863 			mod_timer(&atmel_port->uart_timer,
1864 					jiffies + uart_poll_timeout(port));
1865 		/* set USART timeout */
1866 		} else {
1867 			atmel_uart_writel(port, atmel_port->rtor,
1868 					  PDC_RX_TIMEOUT);
1869 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1870 
1871 			atmel_uart_writel(port, ATMEL_US_IER,
1872 					  ATMEL_US_TIMEOUT);
1873 		}
1874 	} else {
1875 		/* enable receive only */
1876 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 /*
1883  * Flush any TX data submitted for DMA. Called when the TX circular
1884  * buffer is reset.
1885  */
1886 static void atmel_flush_buffer(struct uart_port *port)
1887 {
1888 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1889 
1890 	if (atmel_use_pdc_tx(port)) {
1891 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1892 		atmel_port->pdc_tx.ofs = 0;
1893 	}
1894 	/*
1895 	 * in uart_flush_buffer(), the xmit circular buffer has just
1896 	 * been cleared, so we have to reset tx_len accordingly.
1897 	 */
1898 	atmel_port->tx_len = 0;
1899 }
1900 
1901 /*
1902  * Disable the port
1903  */
1904 static void atmel_shutdown(struct uart_port *port)
1905 {
1906 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1907 
1908 	/* Disable modem control lines interrupts */
1909 	atmel_disable_ms(port);
1910 
1911 	/* Disable interrupts at device level */
1912 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1913 
1914 	/* Prevent spurious interrupts from scheduling the tasklet */
1915 	atomic_inc(&atmel_port->tasklet_shutdown);
1916 
1917 	/*
1918 	 * Prevent any tasklets being scheduled during
1919 	 * cleanup
1920 	 */
1921 	del_timer_sync(&atmel_port->uart_timer);
1922 
1923 	/* Make sure that no interrupt is on the fly */
1924 	synchronize_irq(port->irq);
1925 
1926 	/*
1927 	 * Clear out any scheduled tasklets before
1928 	 * we destroy the buffers
1929 	 */
1930 	tasklet_kill(&atmel_port->tasklet_rx);
1931 	tasklet_kill(&atmel_port->tasklet_tx);
1932 
1933 	/*
1934 	 * Ensure everything is stopped and
1935 	 * disable port and break condition.
1936 	 */
1937 	atmel_stop_rx(port);
1938 	atmel_stop_tx(port);
1939 
1940 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1941 
1942 	/*
1943 	 * Shut-down the DMA.
1944 	 */
1945 	if (atmel_port->release_rx)
1946 		atmel_port->release_rx(port);
1947 	if (atmel_port->release_tx)
1948 		atmel_port->release_tx(port);
1949 
1950 	/*
1951 	 * Reset ring buffer pointers
1952 	 */
1953 	atmel_port->rx_ring.head = 0;
1954 	atmel_port->rx_ring.tail = 0;
1955 
1956 	/*
1957 	 * Free the interrupts
1958 	 */
1959 	free_irq(port->irq, port);
1960 
1961 	atmel_flush_buffer(port);
1962 }
1963 
1964 /*
1965  * Power / Clock management.
1966  */
1967 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
1968 			    unsigned int oldstate)
1969 {
1970 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1971 
1972 	switch (state) {
1973 	case 0:
1974 		/*
1975 		 * Enable the peripheral clock for this serial port.
1976 		 * This is called on uart_open() or a resume event.
1977 		 */
1978 		clk_prepare_enable(atmel_port->clk);
1979 
1980 		/* re-enable interrupts if we disabled some on suspend */
1981 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
1982 		break;
1983 	case 3:
1984 		/* Back up the interrupt mask and disable all interrupts */
1985 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
1986 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
1987 
1988 		/*
1989 		 * Disable the peripheral clock for this serial port.
1990 		 * This is called on uart_close() or a suspend event.
1991 		 */
1992 		clk_disable_unprepare(atmel_port->clk);
1993 		break;
1994 	default:
1995 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
1996 	}
1997 }
1998 
1999 /*
2000  * Change the port parameters
2001  */
2002 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2003 			      struct ktermios *old)
2004 {
2005 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2006 	unsigned long flags;
2007 	unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2008 
2009 	/* save the current mode register */
2010 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2011 
2012 	/* reset the mode, clock divisor, parity, stop bits and data size */
2013 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2014 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2015 
2016 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2017 
2018 	/* byte size */
2019 	switch (termios->c_cflag & CSIZE) {
2020 	case CS5:
2021 		mode |= ATMEL_US_CHRL_5;
2022 		break;
2023 	case CS6:
2024 		mode |= ATMEL_US_CHRL_6;
2025 		break;
2026 	case CS7:
2027 		mode |= ATMEL_US_CHRL_7;
2028 		break;
2029 	default:
2030 		mode |= ATMEL_US_CHRL_8;
2031 		break;
2032 	}
2033 
2034 	/* stop bits */
2035 	if (termios->c_cflag & CSTOPB)
2036 		mode |= ATMEL_US_NBSTOP_2;
2037 
2038 	/* parity */
2039 	if (termios->c_cflag & PARENB) {
2040 		/* Mark or Space parity */
2041 		if (termios->c_cflag & CMSPAR) {
2042 			if (termios->c_cflag & PARODD)
2043 				mode |= ATMEL_US_PAR_MARK;
2044 			else
2045 				mode |= ATMEL_US_PAR_SPACE;
2046 		} else if (termios->c_cflag & PARODD)
2047 			mode |= ATMEL_US_PAR_ODD;
2048 		else
2049 			mode |= ATMEL_US_PAR_EVEN;
2050 	} else
2051 		mode |= ATMEL_US_PAR_NONE;
2052 
2053 	spin_lock_irqsave(&port->lock, flags);
2054 
2055 	port->read_status_mask = ATMEL_US_OVRE;
2056 	if (termios->c_iflag & INPCK)
2057 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2058 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2059 		port->read_status_mask |= ATMEL_US_RXBRK;
2060 
2061 	if (atmel_use_pdc_rx(port))
2062 		/* need to enable error interrupts */
2063 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2064 
2065 	/*
2066 	 * Characters to ignore
2067 	 */
2068 	port->ignore_status_mask = 0;
2069 	if (termios->c_iflag & IGNPAR)
2070 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2071 	if (termios->c_iflag & IGNBRK) {
2072 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2073 		/*
2074 		 * If we're ignoring parity and break indicators,
2075 		 * ignore overruns too (for real raw support).
2076 		 */
2077 		if (termios->c_iflag & IGNPAR)
2078 			port->ignore_status_mask |= ATMEL_US_OVRE;
2079 	}
2080 	/* TODO: Ignore all characters if CREAD is set.*/
2081 
2082 	/* update the per-port timeout */
2083 	uart_update_timeout(port, termios->c_cflag, baud);
2084 
2085 	/*
2086 	 * save/disable interrupts. The tty layer will ensure that the
2087 	 * transmitter is empty if requested by the caller, so there's
2088 	 * no need to wait for it here.
2089 	 */
2090 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2091 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2092 
2093 	/* disable receiver and transmitter */
2094 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2095 	atmel_port->tx_stopped = true;
2096 
2097 	/* mode */
2098 	if (port->rs485.flags & SER_RS485_ENABLED) {
2099 		atmel_uart_writel(port, ATMEL_US_TTGR,
2100 				  port->rs485.delay_rts_after_send);
2101 		mode |= ATMEL_US_USMODE_RS485;
2102 	} else if (termios->c_cflag & CRTSCTS) {
2103 		/* RS232 with hardware handshake (RTS/CTS) */
2104 		if (atmel_use_fifo(port) &&
2105 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2106 			/*
2107 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2108 			 * be able to drive the RTS pin high/low when the RX
2109 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2110 			 * It will also disable the transmitter when the CTS
2111 			 * pin is high.
2112 			 * This mode is not activated if CTS pin is a GPIO
2113 			 * because in this case, the transmitter is always
2114 			 * disabled (there must be an internal pull-up
2115 			 * responsible for this behaviour).
2116 			 * If the RTS pin is a GPIO, the controller won't be
2117 			 * able to drive it according to the FIFO thresholds,
2118 			 * but it will be handled by the driver.
2119 			 */
2120 			mode |= ATMEL_US_USMODE_HWHS;
2121 		} else {
2122 			/*
2123 			 * For platforms without FIFO, the flow control is
2124 			 * handled by the driver.
2125 			 */
2126 			mode |= ATMEL_US_USMODE_NORMAL;
2127 		}
2128 	} else {
2129 		/* RS232 without hadware handshake */
2130 		mode |= ATMEL_US_USMODE_NORMAL;
2131 	}
2132 
2133 	/* set the mode, clock divisor, parity, stop bits and data size */
2134 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2135 
2136 	/*
2137 	 * when switching the mode, set the RTS line state according to the
2138 	 * new mode, otherwise keep the former state
2139 	 */
2140 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2141 		unsigned int rts_state;
2142 
2143 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2144 			/* let the hardware control the RTS line */
2145 			rts_state = ATMEL_US_RTSDIS;
2146 		} else {
2147 			/* force RTS line to low level */
2148 			rts_state = ATMEL_US_RTSEN;
2149 		}
2150 
2151 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2152 	}
2153 
2154 	/*
2155 	 * Set the baud rate:
2156 	 * Fractional baudrate allows to setup output frequency more
2157 	 * accurately. This feature is enabled only when using normal mode.
2158 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2159 	 * Currently, OVER is always set to 0 so we get
2160 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2161 	 * then
2162 	 * 8 CD + FP = selected clock / (2 * baudrate)
2163 	 */
2164 	if (atmel_port->has_frac_baudrate) {
2165 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2166 		cd = div >> 3;
2167 		fp = div & ATMEL_US_FP_MASK;
2168 	} else {
2169 		cd = uart_get_divisor(port, baud);
2170 	}
2171 
2172 	if (cd > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2173 		cd /= 8;
2174 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2175 	}
2176 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2177 
2178 	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2179 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2180 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2181 	atmel_port->tx_stopped = false;
2182 
2183 	/* restore interrupts */
2184 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2185 
2186 	/* CTS flow-control and modem-status interrupts */
2187 	if (UART_ENABLE_MS(port, termios->c_cflag))
2188 		atmel_enable_ms(port);
2189 	else
2190 		atmel_disable_ms(port);
2191 
2192 	spin_unlock_irqrestore(&port->lock, flags);
2193 }
2194 
2195 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2196 {
2197 	if (termios->c_line == N_PPS) {
2198 		port->flags |= UPF_HARDPPS_CD;
2199 		spin_lock_irq(&port->lock);
2200 		atmel_enable_ms(port);
2201 		spin_unlock_irq(&port->lock);
2202 	} else {
2203 		port->flags &= ~UPF_HARDPPS_CD;
2204 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2205 			spin_lock_irq(&port->lock);
2206 			atmel_disable_ms(port);
2207 			spin_unlock_irq(&port->lock);
2208 		}
2209 	}
2210 }
2211 
2212 /*
2213  * Return string describing the specified port
2214  */
2215 static const char *atmel_type(struct uart_port *port)
2216 {
2217 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2218 }
2219 
2220 /*
2221  * Release the memory region(s) being used by 'port'.
2222  */
2223 static void atmel_release_port(struct uart_port *port)
2224 {
2225 	struct platform_device *pdev = to_platform_device(port->dev);
2226 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2227 
2228 	release_mem_region(port->mapbase, size);
2229 
2230 	if (port->flags & UPF_IOREMAP) {
2231 		iounmap(port->membase);
2232 		port->membase = NULL;
2233 	}
2234 }
2235 
2236 /*
2237  * Request the memory region(s) being used by 'port'.
2238  */
2239 static int atmel_request_port(struct uart_port *port)
2240 {
2241 	struct platform_device *pdev = to_platform_device(port->dev);
2242 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2243 
2244 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2245 		return -EBUSY;
2246 
2247 	if (port->flags & UPF_IOREMAP) {
2248 		port->membase = ioremap(port->mapbase, size);
2249 		if (port->membase == NULL) {
2250 			release_mem_region(port->mapbase, size);
2251 			return -ENOMEM;
2252 		}
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 /*
2259  * Configure/autoconfigure the port.
2260  */
2261 static void atmel_config_port(struct uart_port *port, int flags)
2262 {
2263 	if (flags & UART_CONFIG_TYPE) {
2264 		port->type = PORT_ATMEL;
2265 		atmel_request_port(port);
2266 	}
2267 }
2268 
2269 /*
2270  * Verify the new serial_struct (for TIOCSSERIAL).
2271  */
2272 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2273 {
2274 	int ret = 0;
2275 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2276 		ret = -EINVAL;
2277 	if (port->irq != ser->irq)
2278 		ret = -EINVAL;
2279 	if (ser->io_type != SERIAL_IO_MEM)
2280 		ret = -EINVAL;
2281 	if (port->uartclk / 16 != ser->baud_base)
2282 		ret = -EINVAL;
2283 	if (port->mapbase != (unsigned long)ser->iomem_base)
2284 		ret = -EINVAL;
2285 	if (port->iobase != ser->port)
2286 		ret = -EINVAL;
2287 	if (ser->hub6 != 0)
2288 		ret = -EINVAL;
2289 	return ret;
2290 }
2291 
2292 #ifdef CONFIG_CONSOLE_POLL
2293 static int atmel_poll_get_char(struct uart_port *port)
2294 {
2295 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2296 		cpu_relax();
2297 
2298 	return atmel_uart_read_char(port);
2299 }
2300 
2301 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2302 {
2303 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2304 		cpu_relax();
2305 
2306 	atmel_uart_write_char(port, ch);
2307 }
2308 #endif
2309 
2310 static const struct uart_ops atmel_pops = {
2311 	.tx_empty	= atmel_tx_empty,
2312 	.set_mctrl	= atmel_set_mctrl,
2313 	.get_mctrl	= atmel_get_mctrl,
2314 	.stop_tx	= atmel_stop_tx,
2315 	.start_tx	= atmel_start_tx,
2316 	.stop_rx	= atmel_stop_rx,
2317 	.enable_ms	= atmel_enable_ms,
2318 	.break_ctl	= atmel_break_ctl,
2319 	.startup	= atmel_startup,
2320 	.shutdown	= atmel_shutdown,
2321 	.flush_buffer	= atmel_flush_buffer,
2322 	.set_termios	= atmel_set_termios,
2323 	.set_ldisc	= atmel_set_ldisc,
2324 	.type		= atmel_type,
2325 	.release_port	= atmel_release_port,
2326 	.request_port	= atmel_request_port,
2327 	.config_port	= atmel_config_port,
2328 	.verify_port	= atmel_verify_port,
2329 	.pm		= atmel_serial_pm,
2330 #ifdef CONFIG_CONSOLE_POLL
2331 	.poll_get_char	= atmel_poll_get_char,
2332 	.poll_put_char	= atmel_poll_put_char,
2333 #endif
2334 };
2335 
2336 /*
2337  * Configure the port from the platform device resource info.
2338  */
2339 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2340 				      struct platform_device *pdev)
2341 {
2342 	int ret;
2343 	struct uart_port *port = &atmel_port->uart;
2344 
2345 	atmel_init_property(atmel_port, pdev);
2346 	atmel_set_ops(port);
2347 
2348 	uart_get_rs485_mode(&pdev->dev, &port->rs485);
2349 
2350 	port->iotype		= UPIO_MEM;
2351 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2352 	port->ops		= &atmel_pops;
2353 	port->fifosize		= 1;
2354 	port->dev		= &pdev->dev;
2355 	port->mapbase	= pdev->resource[0].start;
2356 	port->irq	= pdev->resource[1].start;
2357 	port->rs485_config	= atmel_config_rs485;
2358 	port->membase	= NULL;
2359 
2360 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2361 
2362 	/* for console, the clock could already be configured */
2363 	if (!atmel_port->clk) {
2364 		atmel_port->clk = clk_get(&pdev->dev, "usart");
2365 		if (IS_ERR(atmel_port->clk)) {
2366 			ret = PTR_ERR(atmel_port->clk);
2367 			atmel_port->clk = NULL;
2368 			return ret;
2369 		}
2370 		ret = clk_prepare_enable(atmel_port->clk);
2371 		if (ret) {
2372 			clk_put(atmel_port->clk);
2373 			atmel_port->clk = NULL;
2374 			return ret;
2375 		}
2376 		port->uartclk = clk_get_rate(atmel_port->clk);
2377 		clk_disable_unprepare(atmel_port->clk);
2378 		/* only enable clock when USART is in use */
2379 	}
2380 
2381 	/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2382 	if (port->rs485.flags & SER_RS485_ENABLED)
2383 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2384 	else if (atmel_use_pdc_tx(port)) {
2385 		port->fifosize = PDC_BUFFER_SIZE;
2386 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2387 	} else {
2388 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2389 	}
2390 
2391 	return 0;
2392 }
2393 
2394 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2395 static void atmel_console_putchar(struct uart_port *port, int ch)
2396 {
2397 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2398 		cpu_relax();
2399 	atmel_uart_write_char(port, ch);
2400 }
2401 
2402 /*
2403  * Interrupts are disabled on entering
2404  */
2405 static void atmel_console_write(struct console *co, const char *s, u_int count)
2406 {
2407 	struct uart_port *port = &atmel_ports[co->index].uart;
2408 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2409 	unsigned int status, imr;
2410 	unsigned int pdc_tx;
2411 
2412 	/*
2413 	 * First, save IMR and then disable interrupts
2414 	 */
2415 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2416 	atmel_uart_writel(port, ATMEL_US_IDR,
2417 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2418 
2419 	/* Store PDC transmit status and disable it */
2420 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2421 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2422 
2423 	/* Make sure that tx path is actually able to send characters */
2424 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2425 	atmel_port->tx_stopped = false;
2426 
2427 	uart_console_write(port, s, count, atmel_console_putchar);
2428 
2429 	/*
2430 	 * Finally, wait for transmitter to become empty
2431 	 * and restore IMR
2432 	 */
2433 	do {
2434 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2435 	} while (!(status & ATMEL_US_TXRDY));
2436 
2437 	/* Restore PDC transmit status */
2438 	if (pdc_tx)
2439 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2440 
2441 	/* set interrupts back the way they were */
2442 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2443 }
2444 
2445 /*
2446  * If the port was already initialised (eg, by a boot loader),
2447  * try to determine the current setup.
2448  */
2449 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2450 					     int *parity, int *bits)
2451 {
2452 	unsigned int mr, quot;
2453 
2454 	/*
2455 	 * If the baud rate generator isn't running, the port wasn't
2456 	 * initialized by the boot loader.
2457 	 */
2458 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2459 	if (!quot)
2460 		return;
2461 
2462 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2463 	if (mr == ATMEL_US_CHRL_8)
2464 		*bits = 8;
2465 	else
2466 		*bits = 7;
2467 
2468 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2469 	if (mr == ATMEL_US_PAR_EVEN)
2470 		*parity = 'e';
2471 	else if (mr == ATMEL_US_PAR_ODD)
2472 		*parity = 'o';
2473 
2474 	/*
2475 	 * The serial core only rounds down when matching this to a
2476 	 * supported baud rate. Make sure we don't end up slightly
2477 	 * lower than one of those, as it would make us fall through
2478 	 * to a much lower baud rate than we really want.
2479 	 */
2480 	*baud = port->uartclk / (16 * (quot - 1));
2481 }
2482 
2483 static int __init atmel_console_setup(struct console *co, char *options)
2484 {
2485 	int ret;
2486 	struct uart_port *port = &atmel_ports[co->index].uart;
2487 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2488 	int baud = 115200;
2489 	int bits = 8;
2490 	int parity = 'n';
2491 	int flow = 'n';
2492 
2493 	if (port->membase == NULL) {
2494 		/* Port not initialized yet - delay setup */
2495 		return -ENODEV;
2496 	}
2497 
2498 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2499 	if (ret)
2500 		return ret;
2501 
2502 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2503 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2504 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2505 	atmel_port->tx_stopped = false;
2506 
2507 	if (options)
2508 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2509 	else
2510 		atmel_console_get_options(port, &baud, &parity, &bits);
2511 
2512 	return uart_set_options(port, co, baud, parity, bits, flow);
2513 }
2514 
2515 static struct uart_driver atmel_uart;
2516 
2517 static struct console atmel_console = {
2518 	.name		= ATMEL_DEVICENAME,
2519 	.write		= atmel_console_write,
2520 	.device		= uart_console_device,
2521 	.setup		= atmel_console_setup,
2522 	.flags		= CON_PRINTBUFFER,
2523 	.index		= -1,
2524 	.data		= &atmel_uart,
2525 };
2526 
2527 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2528 
2529 static inline bool atmel_is_console_port(struct uart_port *port)
2530 {
2531 	return port->cons && port->cons->index == port->line;
2532 }
2533 
2534 #else
2535 #define ATMEL_CONSOLE_DEVICE	NULL
2536 
2537 static inline bool atmel_is_console_port(struct uart_port *port)
2538 {
2539 	return false;
2540 }
2541 #endif
2542 
2543 static struct uart_driver atmel_uart = {
2544 	.owner		= THIS_MODULE,
2545 	.driver_name	= "atmel_serial",
2546 	.dev_name	= ATMEL_DEVICENAME,
2547 	.major		= SERIAL_ATMEL_MAJOR,
2548 	.minor		= MINOR_START,
2549 	.nr		= ATMEL_MAX_UART,
2550 	.cons		= ATMEL_CONSOLE_DEVICE,
2551 };
2552 
2553 #ifdef CONFIG_PM
2554 static bool atmel_serial_clk_will_stop(void)
2555 {
2556 #ifdef CONFIG_ARCH_AT91
2557 	return at91_suspend_entering_slow_clock();
2558 #else
2559 	return false;
2560 #endif
2561 }
2562 
2563 static int atmel_serial_suspend(struct platform_device *pdev,
2564 				pm_message_t state)
2565 {
2566 	struct uart_port *port = platform_get_drvdata(pdev);
2567 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2568 
2569 	if (atmel_is_console_port(port) && console_suspend_enabled) {
2570 		/* Drain the TX shifter */
2571 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2572 			 ATMEL_US_TXEMPTY))
2573 			cpu_relax();
2574 	}
2575 
2576 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2577 		/* Cache register values as we won't get a full shutdown/startup
2578 		 * cycle
2579 		 */
2580 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2581 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2582 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2583 		atmel_port->cache.rtor = atmel_uart_readl(port,
2584 							  atmel_port->rtor);
2585 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2586 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2587 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2588 	}
2589 
2590 	/* we can not wake up if we're running on slow clock */
2591 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2592 	if (atmel_serial_clk_will_stop()) {
2593 		unsigned long flags;
2594 
2595 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2596 		atmel_port->suspended = true;
2597 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2598 		device_set_wakeup_enable(&pdev->dev, 0);
2599 	}
2600 
2601 	uart_suspend_port(&atmel_uart, port);
2602 
2603 	return 0;
2604 }
2605 
2606 static int atmel_serial_resume(struct platform_device *pdev)
2607 {
2608 	struct uart_port *port = platform_get_drvdata(pdev);
2609 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2610 	unsigned long flags;
2611 
2612 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2613 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2614 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2615 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2616 		atmel_uart_writel(port, atmel_port->rtor,
2617 				  atmel_port->cache.rtor);
2618 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2619 
2620 		if (atmel_port->fifo_size) {
2621 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2622 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2623 			atmel_uart_writel(port, ATMEL_US_FMR,
2624 					  atmel_port->cache.fmr);
2625 			atmel_uart_writel(port, ATMEL_US_FIER,
2626 					  atmel_port->cache.fimr);
2627 		}
2628 		atmel_start_rx(port);
2629 	}
2630 
2631 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2632 	if (atmel_port->pending) {
2633 		atmel_handle_receive(port, atmel_port->pending);
2634 		atmel_handle_status(port, atmel_port->pending,
2635 				    atmel_port->pending_status);
2636 		atmel_handle_transmit(port, atmel_port->pending);
2637 		atmel_port->pending = 0;
2638 	}
2639 	atmel_port->suspended = false;
2640 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2641 
2642 	uart_resume_port(&atmel_uart, port);
2643 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2644 
2645 	return 0;
2646 }
2647 #else
2648 #define atmel_serial_suspend NULL
2649 #define atmel_serial_resume NULL
2650 #endif
2651 
2652 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2653 				     struct platform_device *pdev)
2654 {
2655 	atmel_port->fifo_size = 0;
2656 	atmel_port->rts_low = 0;
2657 	atmel_port->rts_high = 0;
2658 
2659 	if (of_property_read_u32(pdev->dev.of_node,
2660 				 "atmel,fifo-size",
2661 				 &atmel_port->fifo_size))
2662 		return;
2663 
2664 	if (!atmel_port->fifo_size)
2665 		return;
2666 
2667 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2668 		atmel_port->fifo_size = 0;
2669 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2670 		return;
2671 	}
2672 
2673 	/*
2674 	 * 0 <= rts_low <= rts_high <= fifo_size
2675 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2676 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2677 	 * actually stopping to send new data. So we try to set the RTS High
2678 	 * Threshold to a reasonably high value respecting this 16 data
2679 	 * empirical rule when possible.
2680 	 */
2681 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2682 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2683 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2684 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2685 
2686 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2687 		 atmel_port->fifo_size);
2688 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2689 		atmel_port->rts_high);
2690 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2691 		atmel_port->rts_low);
2692 }
2693 
2694 static int atmel_serial_probe(struct platform_device *pdev)
2695 {
2696 	struct atmel_uart_port *atmel_port;
2697 	struct device_node *np = pdev->dev.of_node;
2698 	void *data;
2699 	int ret = -ENODEV;
2700 	bool rs485_enabled;
2701 
2702 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2703 
2704 	ret = of_alias_get_id(np, "serial");
2705 	if (ret < 0)
2706 		/* port id not found in platform data nor device-tree aliases:
2707 		 * auto-enumerate it */
2708 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2709 
2710 	if (ret >= ATMEL_MAX_UART) {
2711 		ret = -ENODEV;
2712 		goto err;
2713 	}
2714 
2715 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2716 		/* port already in use */
2717 		ret = -EBUSY;
2718 		goto err;
2719 	}
2720 
2721 	atmel_port = &atmel_ports[ret];
2722 	atmel_port->backup_imr = 0;
2723 	atmel_port->uart.line = ret;
2724 	atmel_serial_probe_fifos(atmel_port, pdev);
2725 
2726 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2727 	spin_lock_init(&atmel_port->lock_suspended);
2728 
2729 	ret = atmel_init_port(atmel_port, pdev);
2730 	if (ret)
2731 		goto err_clear_bit;
2732 
2733 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2734 	if (IS_ERR(atmel_port->gpios)) {
2735 		ret = PTR_ERR(atmel_port->gpios);
2736 		goto err_clear_bit;
2737 	}
2738 
2739 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2740 		ret = -ENOMEM;
2741 		data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2742 				     sizeof(struct atmel_uart_char),
2743 				     GFP_KERNEL);
2744 		if (!data)
2745 			goto err_alloc_ring;
2746 		atmel_port->rx_ring.buf = data;
2747 	}
2748 
2749 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2750 
2751 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2752 	if (ret)
2753 		goto err_add_port;
2754 
2755 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2756 	if (atmel_is_console_port(&atmel_port->uart)
2757 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2758 		/*
2759 		 * The serial core enabled the clock for us, so undo
2760 		 * the clk_prepare_enable() in atmel_console_setup()
2761 		 */
2762 		clk_disable_unprepare(atmel_port->clk);
2763 	}
2764 #endif
2765 
2766 	device_init_wakeup(&pdev->dev, 1);
2767 	platform_set_drvdata(pdev, atmel_port);
2768 
2769 	/*
2770 	 * The peripheral clock has been disabled by atmel_init_port():
2771 	 * enable it before accessing I/O registers
2772 	 */
2773 	clk_prepare_enable(atmel_port->clk);
2774 
2775 	if (rs485_enabled) {
2776 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2777 				  ATMEL_US_USMODE_NORMAL);
2778 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2779 				  ATMEL_US_RTSEN);
2780 	}
2781 
2782 	/*
2783 	 * Get port name of usart or uart
2784 	 */
2785 	atmel_get_ip_name(&atmel_port->uart);
2786 
2787 	/*
2788 	 * The peripheral clock can now safely be disabled till the port
2789 	 * is used
2790 	 */
2791 	clk_disable_unprepare(atmel_port->clk);
2792 
2793 	return 0;
2794 
2795 err_add_port:
2796 	kfree(atmel_port->rx_ring.buf);
2797 	atmel_port->rx_ring.buf = NULL;
2798 err_alloc_ring:
2799 	if (!atmel_is_console_port(&atmel_port->uart)) {
2800 		clk_put(atmel_port->clk);
2801 		atmel_port->clk = NULL;
2802 	}
2803 err_clear_bit:
2804 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2805 err:
2806 	return ret;
2807 }
2808 
2809 /*
2810  * Even if the driver is not modular, it makes sense to be able to
2811  * unbind a device: there can be many bound devices, and there are
2812  * situations where dynamic binding and unbinding can be useful.
2813  *
2814  * For example, a connected device can require a specific firmware update
2815  * protocol that needs bitbanging on IO lines, but use the regular serial
2816  * port in the normal case.
2817  */
2818 static int atmel_serial_remove(struct platform_device *pdev)
2819 {
2820 	struct uart_port *port = platform_get_drvdata(pdev);
2821 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2822 	int ret = 0;
2823 
2824 	tasklet_kill(&atmel_port->tasklet_rx);
2825 	tasklet_kill(&atmel_port->tasklet_tx);
2826 
2827 	device_init_wakeup(&pdev->dev, 0);
2828 
2829 	ret = uart_remove_one_port(&atmel_uart, port);
2830 
2831 	kfree(atmel_port->rx_ring.buf);
2832 
2833 	/* "port" is allocated statically, so we shouldn't free it */
2834 
2835 	clear_bit(port->line, atmel_ports_in_use);
2836 
2837 	clk_put(atmel_port->clk);
2838 	atmel_port->clk = NULL;
2839 
2840 	return ret;
2841 }
2842 
2843 static struct platform_driver atmel_serial_driver = {
2844 	.probe		= atmel_serial_probe,
2845 	.remove		= atmel_serial_remove,
2846 	.suspend	= atmel_serial_suspend,
2847 	.resume		= atmel_serial_resume,
2848 	.driver		= {
2849 		.name			= "atmel_usart",
2850 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
2851 	},
2852 };
2853 
2854 static int __init atmel_serial_init(void)
2855 {
2856 	int ret;
2857 
2858 	ret = uart_register_driver(&atmel_uart);
2859 	if (ret)
2860 		return ret;
2861 
2862 	ret = platform_driver_register(&atmel_serial_driver);
2863 	if (ret)
2864 		uart_unregister_driver(&atmel_uart);
2865 
2866 	return ret;
2867 }
2868 device_initcall(atmel_serial_init);
2869