1 /*
2  *  Driver for Atmel AT91 / AT32 Serial ports
3  *  Copyright (C) 2003 Rick Bronson
4  *
5  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7  *
8  *  DMA support added by Chip Coldwell.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  */
25 #include <linux/tty.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/serial.h>
30 #include <linux/clk.h>
31 #include <linux/console.h>
32 #include <linux/sysrq.h>
33 #include <linux/tty_flip.h>
34 #include <linux/platform_device.h>
35 #include <linux/of.h>
36 #include <linux/of_device.h>
37 #include <linux/of_gpio.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmaengine.h>
40 #include <linux/atmel_pdc.h>
41 #include <linux/uaccess.h>
42 #include <linux/platform_data/atmel.h>
43 #include <linux/timer.h>
44 #include <linux/gpio.h>
45 #include <linux/gpio/consumer.h>
46 #include <linux/err.h>
47 #include <linux/irq.h>
48 #include <linux/suspend.h>
49 
50 #include <asm/io.h>
51 #include <asm/ioctls.h>
52 
53 #define PDC_BUFFER_SIZE		512
54 /* Revisit: We should calculate this based on the actual port settings */
55 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
56 
57 /* The minium number of data FIFOs should be able to contain */
58 #define ATMEL_MIN_FIFO_SIZE	8
59 /*
60  * These two offsets are substracted from the RX FIFO size to define the RTS
61  * high and low thresholds
62  */
63 #define ATMEL_RTS_HIGH_OFFSET	16
64 #define ATMEL_RTS_LOW_OFFSET	20
65 
66 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
67 #define SUPPORT_SYSRQ
68 #endif
69 
70 #include <linux/serial_core.h>
71 
72 #include "serial_mctrl_gpio.h"
73 #include "atmel_serial.h"
74 
75 static void atmel_start_rx(struct uart_port *port);
76 static void atmel_stop_rx(struct uart_port *port);
77 
78 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
79 
80 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
81  * should coexist with the 8250 driver, such as if we have an external 16C550
82  * UART. */
83 #define SERIAL_ATMEL_MAJOR	204
84 #define MINOR_START		154
85 #define ATMEL_DEVICENAME	"ttyAT"
86 
87 #else
88 
89 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
90  * name, but it is legally reserved for the 8250 driver. */
91 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
92 #define MINOR_START		64
93 #define ATMEL_DEVICENAME	"ttyS"
94 
95 #endif
96 
97 #define ATMEL_ISR_PASS_LIMIT	256
98 
99 struct atmel_dma_buffer {
100 	unsigned char	*buf;
101 	dma_addr_t	dma_addr;
102 	unsigned int	dma_size;
103 	unsigned int	ofs;
104 };
105 
106 struct atmel_uart_char {
107 	u16		status;
108 	u16		ch;
109 };
110 
111 /*
112  * Be careful, the real size of the ring buffer is
113  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
114  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
115  * DMA mode.
116  */
117 #define ATMEL_SERIAL_RINGSIZE 1024
118 
119 /*
120  * at91: 6 USARTs and one DBGU port (SAM9260)
121  * avr32: 4
122  * samx7: 3 USARTs and 5 UARTs
123  */
124 #define ATMEL_MAX_UART		8
125 
126 /*
127  * We wrap our port structure around the generic uart_port.
128  */
129 struct atmel_uart_port {
130 	struct uart_port	uart;		/* uart */
131 	struct clk		*clk;		/* uart clock */
132 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
133 	u32			backup_imr;	/* IMR saved during suspend */
134 	int			break_active;	/* break being received */
135 
136 	bool			use_dma_rx;	/* enable DMA receiver */
137 	bool			use_pdc_rx;	/* enable PDC receiver */
138 	short			pdc_rx_idx;	/* current PDC RX buffer */
139 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
140 
141 	bool			use_dma_tx;     /* enable DMA transmitter */
142 	bool			use_pdc_tx;	/* enable PDC transmitter */
143 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
144 
145 	spinlock_t			lock_tx;	/* port lock */
146 	spinlock_t			lock_rx;	/* port lock */
147 	struct dma_chan			*chan_tx;
148 	struct dma_chan			*chan_rx;
149 	struct dma_async_tx_descriptor	*desc_tx;
150 	struct dma_async_tx_descriptor	*desc_rx;
151 	dma_cookie_t			cookie_tx;
152 	dma_cookie_t			cookie_rx;
153 	struct scatterlist		sg_tx;
154 	struct scatterlist		sg_rx;
155 	struct tasklet_struct	tasklet_rx;
156 	struct tasklet_struct	tasklet_tx;
157 	atomic_t		tasklet_shutdown;
158 	unsigned int		irq_status_prev;
159 	unsigned int		tx_len;
160 
161 	struct circ_buf		rx_ring;
162 
163 	struct mctrl_gpios	*gpios;
164 	unsigned int		tx_done_mask;
165 	u32			fifo_size;
166 	u32			rts_high;
167 	u32			rts_low;
168 	bool			ms_irq_enabled;
169 	u32			rtor;	/* address of receiver timeout register if it exists */
170 	bool			has_frac_baudrate;
171 	bool			has_hw_timer;
172 	struct timer_list	uart_timer;
173 
174 	bool			suspended;
175 	unsigned int		pending;
176 	unsigned int		pending_status;
177 	spinlock_t		lock_suspended;
178 
179 #ifdef CONFIG_PM
180 	struct {
181 		u32		cr;
182 		u32		mr;
183 		u32		imr;
184 		u32		brgr;
185 		u32		rtor;
186 		u32		ttgr;
187 		u32		fmr;
188 		u32		fimr;
189 	} cache;
190 #endif
191 
192 	int (*prepare_rx)(struct uart_port *port);
193 	int (*prepare_tx)(struct uart_port *port);
194 	void (*schedule_rx)(struct uart_port *port);
195 	void (*schedule_tx)(struct uart_port *port);
196 	void (*release_rx)(struct uart_port *port);
197 	void (*release_tx)(struct uart_port *port);
198 };
199 
200 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
201 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
202 
203 #ifdef SUPPORT_SYSRQ
204 static struct console atmel_console;
205 #endif
206 
207 #if defined(CONFIG_OF)
208 static const struct of_device_id atmel_serial_dt_ids[] = {
209 	{ .compatible = "atmel,at91rm9200-usart" },
210 	{ .compatible = "atmel,at91sam9260-usart" },
211 	{ /* sentinel */ }
212 };
213 #endif
214 
215 static inline struct atmel_uart_port *
216 to_atmel_uart_port(struct uart_port *uart)
217 {
218 	return container_of(uart, struct atmel_uart_port, uart);
219 }
220 
221 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
222 {
223 	return __raw_readl(port->membase + reg);
224 }
225 
226 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
227 {
228 	__raw_writel(value, port->membase + reg);
229 }
230 
231 #ifdef CONFIG_AVR32
232 
233 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
234 static inline u8 atmel_uart_read_char(struct uart_port *port)
235 {
236 	return __raw_readl(port->membase + ATMEL_US_RHR);
237 }
238 
239 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
240 {
241 	__raw_writel(value, port->membase + ATMEL_US_THR);
242 }
243 
244 #else
245 
246 static inline u8 atmel_uart_read_char(struct uart_port *port)
247 {
248 	return __raw_readb(port->membase + ATMEL_US_RHR);
249 }
250 
251 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
252 {
253 	__raw_writeb(value, port->membase + ATMEL_US_THR);
254 }
255 
256 #endif
257 
258 #ifdef CONFIG_SERIAL_ATMEL_PDC
259 static bool atmel_use_pdc_rx(struct uart_port *port)
260 {
261 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
262 
263 	return atmel_port->use_pdc_rx;
264 }
265 
266 static bool atmel_use_pdc_tx(struct uart_port *port)
267 {
268 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
269 
270 	return atmel_port->use_pdc_tx;
271 }
272 #else
273 static bool atmel_use_pdc_rx(struct uart_port *port)
274 {
275 	return false;
276 }
277 
278 static bool atmel_use_pdc_tx(struct uart_port *port)
279 {
280 	return false;
281 }
282 #endif
283 
284 static bool atmel_use_dma_tx(struct uart_port *port)
285 {
286 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
287 
288 	return atmel_port->use_dma_tx;
289 }
290 
291 static bool atmel_use_dma_rx(struct uart_port *port)
292 {
293 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
294 
295 	return atmel_port->use_dma_rx;
296 }
297 
298 static bool atmel_use_fifo(struct uart_port *port)
299 {
300 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
301 
302 	return atmel_port->fifo_size;
303 }
304 
305 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
306 				   struct tasklet_struct *t)
307 {
308 	if (!atomic_read(&atmel_port->tasklet_shutdown))
309 		tasklet_schedule(t);
310 }
311 
312 static unsigned int atmel_get_lines_status(struct uart_port *port)
313 {
314 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
315 	unsigned int status, ret = 0;
316 
317 	status = atmel_uart_readl(port, ATMEL_US_CSR);
318 
319 	mctrl_gpio_get(atmel_port->gpios, &ret);
320 
321 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
322 						UART_GPIO_CTS))) {
323 		if (ret & TIOCM_CTS)
324 			status &= ~ATMEL_US_CTS;
325 		else
326 			status |= ATMEL_US_CTS;
327 	}
328 
329 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
330 						UART_GPIO_DSR))) {
331 		if (ret & TIOCM_DSR)
332 			status &= ~ATMEL_US_DSR;
333 		else
334 			status |= ATMEL_US_DSR;
335 	}
336 
337 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
338 						UART_GPIO_RI))) {
339 		if (ret & TIOCM_RI)
340 			status &= ~ATMEL_US_RI;
341 		else
342 			status |= ATMEL_US_RI;
343 	}
344 
345 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
346 						UART_GPIO_DCD))) {
347 		if (ret & TIOCM_CD)
348 			status &= ~ATMEL_US_DCD;
349 		else
350 			status |= ATMEL_US_DCD;
351 	}
352 
353 	return status;
354 }
355 
356 /* Enable or disable the rs485 support */
357 static int atmel_config_rs485(struct uart_port *port,
358 			      struct serial_rs485 *rs485conf)
359 {
360 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
361 	unsigned int mode;
362 
363 	/* Disable interrupts */
364 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
365 
366 	mode = atmel_uart_readl(port, ATMEL_US_MR);
367 
368 	/* Resetting serial mode to RS232 (0x0) */
369 	mode &= ~ATMEL_US_USMODE;
370 
371 	port->rs485 = *rs485conf;
372 
373 	if (rs485conf->flags & SER_RS485_ENABLED) {
374 		dev_dbg(port->dev, "Setting UART to RS485\n");
375 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
376 		atmel_uart_writel(port, ATMEL_US_TTGR,
377 				  rs485conf->delay_rts_after_send);
378 		mode |= ATMEL_US_USMODE_RS485;
379 	} else {
380 		dev_dbg(port->dev, "Setting UART to RS232\n");
381 		if (atmel_use_pdc_tx(port))
382 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
383 				ATMEL_US_TXBUFE;
384 		else
385 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
386 	}
387 	atmel_uart_writel(port, ATMEL_US_MR, mode);
388 
389 	/* Enable interrupts */
390 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
391 
392 	return 0;
393 }
394 
395 /*
396  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
397  */
398 static u_int atmel_tx_empty(struct uart_port *port)
399 {
400 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
401 		TIOCSER_TEMT :
402 		0;
403 }
404 
405 /*
406  * Set state of the modem control output lines
407  */
408 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
409 {
410 	unsigned int control = 0;
411 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
412 	unsigned int rts_paused, rts_ready;
413 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
414 
415 	/* override mode to RS485 if needed, otherwise keep the current mode */
416 	if (port->rs485.flags & SER_RS485_ENABLED) {
417 		atmel_uart_writel(port, ATMEL_US_TTGR,
418 				  port->rs485.delay_rts_after_send);
419 		mode &= ~ATMEL_US_USMODE;
420 		mode |= ATMEL_US_USMODE_RS485;
421 	}
422 
423 	/* set the RTS line state according to the mode */
424 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
425 		/* force RTS line to high level */
426 		rts_paused = ATMEL_US_RTSEN;
427 
428 		/* give the control of the RTS line back to the hardware */
429 		rts_ready = ATMEL_US_RTSDIS;
430 	} else {
431 		/* force RTS line to high level */
432 		rts_paused = ATMEL_US_RTSDIS;
433 
434 		/* force RTS line to low level */
435 		rts_ready = ATMEL_US_RTSEN;
436 	}
437 
438 	if (mctrl & TIOCM_RTS)
439 		control |= rts_ready;
440 	else
441 		control |= rts_paused;
442 
443 	if (mctrl & TIOCM_DTR)
444 		control |= ATMEL_US_DTREN;
445 	else
446 		control |= ATMEL_US_DTRDIS;
447 
448 	atmel_uart_writel(port, ATMEL_US_CR, control);
449 
450 	mctrl_gpio_set(atmel_port->gpios, mctrl);
451 
452 	/* Local loopback mode? */
453 	mode &= ~ATMEL_US_CHMODE;
454 	if (mctrl & TIOCM_LOOP)
455 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
456 	else
457 		mode |= ATMEL_US_CHMODE_NORMAL;
458 
459 	atmel_uart_writel(port, ATMEL_US_MR, mode);
460 }
461 
462 /*
463  * Get state of the modem control input lines
464  */
465 static u_int atmel_get_mctrl(struct uart_port *port)
466 {
467 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
468 	unsigned int ret = 0, status;
469 
470 	status = atmel_uart_readl(port, ATMEL_US_CSR);
471 
472 	/*
473 	 * The control signals are active low.
474 	 */
475 	if (!(status & ATMEL_US_DCD))
476 		ret |= TIOCM_CD;
477 	if (!(status & ATMEL_US_CTS))
478 		ret |= TIOCM_CTS;
479 	if (!(status & ATMEL_US_DSR))
480 		ret |= TIOCM_DSR;
481 	if (!(status & ATMEL_US_RI))
482 		ret |= TIOCM_RI;
483 
484 	return mctrl_gpio_get(atmel_port->gpios, &ret);
485 }
486 
487 /*
488  * Stop transmitting.
489  */
490 static void atmel_stop_tx(struct uart_port *port)
491 {
492 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
493 
494 	if (atmel_use_pdc_tx(port)) {
495 		/* disable PDC transmit */
496 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
497 	}
498 
499 	/*
500 	 * Disable the transmitter.
501 	 * This is mandatory when DMA is used, otherwise the DMA buffer
502 	 * is fully transmitted.
503 	 */
504 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
505 
506 	/* Disable interrupts */
507 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
508 
509 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
510 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
511 		atmel_start_rx(port);
512 }
513 
514 /*
515  * Start transmitting.
516  */
517 static void atmel_start_tx(struct uart_port *port)
518 {
519 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
520 
521 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
522 				       & ATMEL_PDC_TXTEN))
523 		/* The transmitter is already running.  Yes, we
524 		   really need this.*/
525 		return;
526 
527 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
528 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
529 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
530 			atmel_stop_rx(port);
531 
532 	if (atmel_use_pdc_tx(port))
533 		/* re-enable PDC transmit */
534 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
535 
536 	/* Enable interrupts */
537 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
538 
539 	/* re-enable the transmitter */
540 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
541 }
542 
543 /*
544  * start receiving - port is in process of being opened.
545  */
546 static void atmel_start_rx(struct uart_port *port)
547 {
548 	/* reset status and receiver */
549 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
550 
551 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
552 
553 	if (atmel_use_pdc_rx(port)) {
554 		/* enable PDC controller */
555 		atmel_uart_writel(port, ATMEL_US_IER,
556 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
557 				  port->read_status_mask);
558 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
559 	} else {
560 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
561 	}
562 }
563 
564 /*
565  * Stop receiving - port is in process of being closed.
566  */
567 static void atmel_stop_rx(struct uart_port *port)
568 {
569 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
570 
571 	if (atmel_use_pdc_rx(port)) {
572 		/* disable PDC receive */
573 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
574 		atmel_uart_writel(port, ATMEL_US_IDR,
575 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
576 				  port->read_status_mask);
577 	} else {
578 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
579 	}
580 }
581 
582 /*
583  * Enable modem status interrupts
584  */
585 static void atmel_enable_ms(struct uart_port *port)
586 {
587 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
588 	uint32_t ier = 0;
589 
590 	/*
591 	 * Interrupt should not be enabled twice
592 	 */
593 	if (atmel_port->ms_irq_enabled)
594 		return;
595 
596 	atmel_port->ms_irq_enabled = true;
597 
598 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
599 		ier |= ATMEL_US_CTSIC;
600 
601 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
602 		ier |= ATMEL_US_DSRIC;
603 
604 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
605 		ier |= ATMEL_US_RIIC;
606 
607 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
608 		ier |= ATMEL_US_DCDIC;
609 
610 	atmel_uart_writel(port, ATMEL_US_IER, ier);
611 
612 	mctrl_gpio_enable_ms(atmel_port->gpios);
613 }
614 
615 /*
616  * Disable modem status interrupts
617  */
618 static void atmel_disable_ms(struct uart_port *port)
619 {
620 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
621 	uint32_t idr = 0;
622 
623 	/*
624 	 * Interrupt should not be disabled twice
625 	 */
626 	if (!atmel_port->ms_irq_enabled)
627 		return;
628 
629 	atmel_port->ms_irq_enabled = false;
630 
631 	mctrl_gpio_disable_ms(atmel_port->gpios);
632 
633 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
634 		idr |= ATMEL_US_CTSIC;
635 
636 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
637 		idr |= ATMEL_US_DSRIC;
638 
639 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
640 		idr |= ATMEL_US_RIIC;
641 
642 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
643 		idr |= ATMEL_US_DCDIC;
644 
645 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
646 }
647 
648 /*
649  * Control the transmission of a break signal
650  */
651 static void atmel_break_ctl(struct uart_port *port, int break_state)
652 {
653 	if (break_state != 0)
654 		/* start break */
655 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
656 	else
657 		/* stop break */
658 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
659 }
660 
661 /*
662  * Stores the incoming character in the ring buffer
663  */
664 static void
665 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
666 		     unsigned int ch)
667 {
668 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
669 	struct circ_buf *ring = &atmel_port->rx_ring;
670 	struct atmel_uart_char *c;
671 
672 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
673 		/* Buffer overflow, ignore char */
674 		return;
675 
676 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
677 	c->status	= status;
678 	c->ch		= ch;
679 
680 	/* Make sure the character is stored before we update head. */
681 	smp_wmb();
682 
683 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
684 }
685 
686 /*
687  * Deal with parity, framing and overrun errors.
688  */
689 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
690 {
691 	/* clear error */
692 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
693 
694 	if (status & ATMEL_US_RXBRK) {
695 		/* ignore side-effect */
696 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
697 		port->icount.brk++;
698 	}
699 	if (status & ATMEL_US_PARE)
700 		port->icount.parity++;
701 	if (status & ATMEL_US_FRAME)
702 		port->icount.frame++;
703 	if (status & ATMEL_US_OVRE)
704 		port->icount.overrun++;
705 }
706 
707 /*
708  * Characters received (called from interrupt handler)
709  */
710 static void atmel_rx_chars(struct uart_port *port)
711 {
712 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
713 	unsigned int status, ch;
714 
715 	status = atmel_uart_readl(port, ATMEL_US_CSR);
716 	while (status & ATMEL_US_RXRDY) {
717 		ch = atmel_uart_read_char(port);
718 
719 		/*
720 		 * note that the error handling code is
721 		 * out of the main execution path
722 		 */
723 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
724 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
725 			     || atmel_port->break_active)) {
726 
727 			/* clear error */
728 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
729 
730 			if (status & ATMEL_US_RXBRK
731 			    && !atmel_port->break_active) {
732 				atmel_port->break_active = 1;
733 				atmel_uart_writel(port, ATMEL_US_IER,
734 						  ATMEL_US_RXBRK);
735 			} else {
736 				/*
737 				 * This is either the end-of-break
738 				 * condition or we've received at
739 				 * least one character without RXBRK
740 				 * being set. In both cases, the next
741 				 * RXBRK will indicate start-of-break.
742 				 */
743 				atmel_uart_writel(port, ATMEL_US_IDR,
744 						  ATMEL_US_RXBRK);
745 				status &= ~ATMEL_US_RXBRK;
746 				atmel_port->break_active = 0;
747 			}
748 		}
749 
750 		atmel_buffer_rx_char(port, status, ch);
751 		status = atmel_uart_readl(port, ATMEL_US_CSR);
752 	}
753 
754 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
755 }
756 
757 /*
758  * Transmit characters (called from tasklet with TXRDY interrupt
759  * disabled)
760  */
761 static void atmel_tx_chars(struct uart_port *port)
762 {
763 	struct circ_buf *xmit = &port->state->xmit;
764 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
765 
766 	if (port->x_char &&
767 	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
768 		atmel_uart_write_char(port, port->x_char);
769 		port->icount.tx++;
770 		port->x_char = 0;
771 	}
772 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
773 		return;
774 
775 	while (atmel_uart_readl(port, ATMEL_US_CSR) &
776 	       atmel_port->tx_done_mask) {
777 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
778 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
779 		port->icount.tx++;
780 		if (uart_circ_empty(xmit))
781 			break;
782 	}
783 
784 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
785 		uart_write_wakeup(port);
786 
787 	if (!uart_circ_empty(xmit))
788 		/* Enable interrupts */
789 		atmel_uart_writel(port, ATMEL_US_IER,
790 				  atmel_port->tx_done_mask);
791 }
792 
793 static void atmel_complete_tx_dma(void *arg)
794 {
795 	struct atmel_uart_port *atmel_port = arg;
796 	struct uart_port *port = &atmel_port->uart;
797 	struct circ_buf *xmit = &port->state->xmit;
798 	struct dma_chan *chan = atmel_port->chan_tx;
799 	unsigned long flags;
800 
801 	spin_lock_irqsave(&port->lock, flags);
802 
803 	if (chan)
804 		dmaengine_terminate_all(chan);
805 	xmit->tail += atmel_port->tx_len;
806 	xmit->tail &= UART_XMIT_SIZE - 1;
807 
808 	port->icount.tx += atmel_port->tx_len;
809 
810 	spin_lock_irq(&atmel_port->lock_tx);
811 	async_tx_ack(atmel_port->desc_tx);
812 	atmel_port->cookie_tx = -EINVAL;
813 	atmel_port->desc_tx = NULL;
814 	spin_unlock_irq(&atmel_port->lock_tx);
815 
816 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
817 		uart_write_wakeup(port);
818 
819 	/*
820 	 * xmit is a circular buffer so, if we have just send data from
821 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
822 	 * remaining data from the beginning of xmit->buf to xmit->head.
823 	 */
824 	if (!uart_circ_empty(xmit))
825 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
826 	else if ((port->rs485.flags & SER_RS485_ENABLED) &&
827 		 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
828 		/* DMA done, stop TX, start RX for RS485 */
829 		atmel_start_rx(port);
830 	}
831 
832 	spin_unlock_irqrestore(&port->lock, flags);
833 }
834 
835 static void atmel_release_tx_dma(struct uart_port *port)
836 {
837 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
838 	struct dma_chan *chan = atmel_port->chan_tx;
839 
840 	if (chan) {
841 		dmaengine_terminate_all(chan);
842 		dma_release_channel(chan);
843 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
844 				DMA_TO_DEVICE);
845 	}
846 
847 	atmel_port->desc_tx = NULL;
848 	atmel_port->chan_tx = NULL;
849 	atmel_port->cookie_tx = -EINVAL;
850 }
851 
852 /*
853  * Called from tasklet with TXRDY interrupt is disabled.
854  */
855 static void atmel_tx_dma(struct uart_port *port)
856 {
857 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
858 	struct circ_buf *xmit = &port->state->xmit;
859 	struct dma_chan *chan = atmel_port->chan_tx;
860 	struct dma_async_tx_descriptor *desc;
861 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
862 	unsigned int tx_len, part1_len, part2_len, sg_len;
863 	dma_addr_t phys_addr;
864 
865 	/* Make sure we have an idle channel */
866 	if (atmel_port->desc_tx != NULL)
867 		return;
868 
869 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
870 		/*
871 		 * DMA is idle now.
872 		 * Port xmit buffer is already mapped,
873 		 * and it is one page... Just adjust
874 		 * offsets and lengths. Since it is a circular buffer,
875 		 * we have to transmit till the end, and then the rest.
876 		 * Take the port lock to get a
877 		 * consistent xmit buffer state.
878 		 */
879 		tx_len = CIRC_CNT_TO_END(xmit->head,
880 					 xmit->tail,
881 					 UART_XMIT_SIZE);
882 
883 		if (atmel_port->fifo_size) {
884 			/* multi data mode */
885 			part1_len = (tx_len & ~0x3); /* DWORD access */
886 			part2_len = (tx_len & 0x3); /* BYTE access */
887 		} else {
888 			/* single data (legacy) mode */
889 			part1_len = 0;
890 			part2_len = tx_len; /* BYTE access only */
891 		}
892 
893 		sg_init_table(sgl, 2);
894 		sg_len = 0;
895 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
896 		if (part1_len) {
897 			sg = &sgl[sg_len++];
898 			sg_dma_address(sg) = phys_addr;
899 			sg_dma_len(sg) = part1_len;
900 
901 			phys_addr += part1_len;
902 		}
903 
904 		if (part2_len) {
905 			sg = &sgl[sg_len++];
906 			sg_dma_address(sg) = phys_addr;
907 			sg_dma_len(sg) = part2_len;
908 		}
909 
910 		/*
911 		 * save tx_len so atmel_complete_tx_dma() will increase
912 		 * xmit->tail correctly
913 		 */
914 		atmel_port->tx_len = tx_len;
915 
916 		desc = dmaengine_prep_slave_sg(chan,
917 					       sgl,
918 					       sg_len,
919 					       DMA_MEM_TO_DEV,
920 					       DMA_PREP_INTERRUPT |
921 					       DMA_CTRL_ACK);
922 		if (!desc) {
923 			dev_err(port->dev, "Failed to send via dma!\n");
924 			return;
925 		}
926 
927 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
928 
929 		atmel_port->desc_tx = desc;
930 		desc->callback = atmel_complete_tx_dma;
931 		desc->callback_param = atmel_port;
932 		atmel_port->cookie_tx = dmaengine_submit(desc);
933 	}
934 
935 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
936 		uart_write_wakeup(port);
937 }
938 
939 static int atmel_prepare_tx_dma(struct uart_port *port)
940 {
941 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
942 	dma_cap_mask_t		mask;
943 	struct dma_slave_config config;
944 	int ret, nent;
945 
946 	dma_cap_zero(mask);
947 	dma_cap_set(DMA_SLAVE, mask);
948 
949 	atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
950 	if (atmel_port->chan_tx == NULL)
951 		goto chan_err;
952 	dev_info(port->dev, "using %s for tx DMA transfers\n",
953 		dma_chan_name(atmel_port->chan_tx));
954 
955 	spin_lock_init(&atmel_port->lock_tx);
956 	sg_init_table(&atmel_port->sg_tx, 1);
957 	/* UART circular tx buffer is an aligned page. */
958 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
959 	sg_set_page(&atmel_port->sg_tx,
960 			virt_to_page(port->state->xmit.buf),
961 			UART_XMIT_SIZE,
962 			(unsigned long)port->state->xmit.buf & ~PAGE_MASK);
963 	nent = dma_map_sg(port->dev,
964 				&atmel_port->sg_tx,
965 				1,
966 				DMA_TO_DEVICE);
967 
968 	if (!nent) {
969 		dev_dbg(port->dev, "need to release resource of dma\n");
970 		goto chan_err;
971 	} else {
972 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
973 			sg_dma_len(&atmel_port->sg_tx),
974 			port->state->xmit.buf,
975 			&sg_dma_address(&atmel_port->sg_tx));
976 	}
977 
978 	/* Configure the slave DMA */
979 	memset(&config, 0, sizeof(config));
980 	config.direction = DMA_MEM_TO_DEV;
981 	config.dst_addr_width = (atmel_port->fifo_size) ?
982 				DMA_SLAVE_BUSWIDTH_4_BYTES :
983 				DMA_SLAVE_BUSWIDTH_1_BYTE;
984 	config.dst_addr = port->mapbase + ATMEL_US_THR;
985 	config.dst_maxburst = 1;
986 
987 	ret = dmaengine_slave_config(atmel_port->chan_tx,
988 				     &config);
989 	if (ret) {
990 		dev_err(port->dev, "DMA tx slave configuration failed\n");
991 		goto chan_err;
992 	}
993 
994 	return 0;
995 
996 chan_err:
997 	dev_err(port->dev, "TX channel not available, switch to pio\n");
998 	atmel_port->use_dma_tx = 0;
999 	if (atmel_port->chan_tx)
1000 		atmel_release_tx_dma(port);
1001 	return -EINVAL;
1002 }
1003 
1004 static void atmel_complete_rx_dma(void *arg)
1005 {
1006 	struct uart_port *port = arg;
1007 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1008 
1009 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1010 }
1011 
1012 static void atmel_release_rx_dma(struct uart_port *port)
1013 {
1014 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1015 	struct dma_chan *chan = atmel_port->chan_rx;
1016 
1017 	if (chan) {
1018 		dmaengine_terminate_all(chan);
1019 		dma_release_channel(chan);
1020 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1021 				DMA_FROM_DEVICE);
1022 	}
1023 
1024 	atmel_port->desc_rx = NULL;
1025 	atmel_port->chan_rx = NULL;
1026 	atmel_port->cookie_rx = -EINVAL;
1027 }
1028 
1029 static void atmel_rx_from_dma(struct uart_port *port)
1030 {
1031 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1032 	struct tty_port *tport = &port->state->port;
1033 	struct circ_buf *ring = &atmel_port->rx_ring;
1034 	struct dma_chan *chan = atmel_port->chan_rx;
1035 	struct dma_tx_state state;
1036 	enum dma_status dmastat;
1037 	size_t count;
1038 
1039 
1040 	/* Reset the UART timeout early so that we don't miss one */
1041 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1042 	dmastat = dmaengine_tx_status(chan,
1043 				atmel_port->cookie_rx,
1044 				&state);
1045 	/* Restart a new tasklet if DMA status is error */
1046 	if (dmastat == DMA_ERROR) {
1047 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1048 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1049 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1050 		return;
1051 	}
1052 
1053 	/* CPU claims ownership of RX DMA buffer */
1054 	dma_sync_sg_for_cpu(port->dev,
1055 			    &atmel_port->sg_rx,
1056 			    1,
1057 			    DMA_FROM_DEVICE);
1058 
1059 	/*
1060 	 * ring->head points to the end of data already written by the DMA.
1061 	 * ring->tail points to the beginning of data to be read by the
1062 	 * framework.
1063 	 * The current transfer size should not be larger than the dma buffer
1064 	 * length.
1065 	 */
1066 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1067 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1068 	/*
1069 	 * At this point ring->head may point to the first byte right after the
1070 	 * last byte of the dma buffer:
1071 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1072 	 *
1073 	 * However ring->tail must always points inside the dma buffer:
1074 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1075 	 *
1076 	 * Since we use a ring buffer, we have to handle the case
1077 	 * where head is lower than tail. In such a case, we first read from
1078 	 * tail to the end of the buffer then reset tail.
1079 	 */
1080 	if (ring->head < ring->tail) {
1081 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1082 
1083 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1084 		ring->tail = 0;
1085 		port->icount.rx += count;
1086 	}
1087 
1088 	/* Finally we read data from tail to head */
1089 	if (ring->tail < ring->head) {
1090 		count = ring->head - ring->tail;
1091 
1092 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1093 		/* Wrap ring->head if needed */
1094 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1095 			ring->head = 0;
1096 		ring->tail = ring->head;
1097 		port->icount.rx += count;
1098 	}
1099 
1100 	/* USART retreives ownership of RX DMA buffer */
1101 	dma_sync_sg_for_device(port->dev,
1102 			       &atmel_port->sg_rx,
1103 			       1,
1104 			       DMA_FROM_DEVICE);
1105 
1106 	/*
1107 	 * Drop the lock here since it might end up calling
1108 	 * uart_start(), which takes the lock.
1109 	 */
1110 	spin_unlock(&port->lock);
1111 	tty_flip_buffer_push(tport);
1112 	spin_lock(&port->lock);
1113 
1114 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1115 }
1116 
1117 static int atmel_prepare_rx_dma(struct uart_port *port)
1118 {
1119 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1120 	struct dma_async_tx_descriptor *desc;
1121 	dma_cap_mask_t		mask;
1122 	struct dma_slave_config config;
1123 	struct circ_buf		*ring;
1124 	int ret, nent;
1125 
1126 	ring = &atmel_port->rx_ring;
1127 
1128 	dma_cap_zero(mask);
1129 	dma_cap_set(DMA_CYCLIC, mask);
1130 
1131 	atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1132 	if (atmel_port->chan_rx == NULL)
1133 		goto chan_err;
1134 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1135 		dma_chan_name(atmel_port->chan_rx));
1136 
1137 	spin_lock_init(&atmel_port->lock_rx);
1138 	sg_init_table(&atmel_port->sg_rx, 1);
1139 	/* UART circular rx buffer is an aligned page. */
1140 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1141 	sg_set_page(&atmel_port->sg_rx,
1142 		    virt_to_page(ring->buf),
1143 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1144 		    (unsigned long)ring->buf & ~PAGE_MASK);
1145 	nent = dma_map_sg(port->dev,
1146 			  &atmel_port->sg_rx,
1147 			  1,
1148 			  DMA_FROM_DEVICE);
1149 
1150 	if (!nent) {
1151 		dev_dbg(port->dev, "need to release resource of dma\n");
1152 		goto chan_err;
1153 	} else {
1154 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1155 			sg_dma_len(&atmel_port->sg_rx),
1156 			ring->buf,
1157 			&sg_dma_address(&atmel_port->sg_rx));
1158 	}
1159 
1160 	/* Configure the slave DMA */
1161 	memset(&config, 0, sizeof(config));
1162 	config.direction = DMA_DEV_TO_MEM;
1163 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1164 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1165 	config.src_maxburst = 1;
1166 
1167 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1168 				     &config);
1169 	if (ret) {
1170 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1171 		goto chan_err;
1172 	}
1173 	/*
1174 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1175 	 * each one is half ring buffer size
1176 	 */
1177 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1178 					 sg_dma_address(&atmel_port->sg_rx),
1179 					 sg_dma_len(&atmel_port->sg_rx),
1180 					 sg_dma_len(&atmel_port->sg_rx)/2,
1181 					 DMA_DEV_TO_MEM,
1182 					 DMA_PREP_INTERRUPT);
1183 	desc->callback = atmel_complete_rx_dma;
1184 	desc->callback_param = port;
1185 	atmel_port->desc_rx = desc;
1186 	atmel_port->cookie_rx = dmaengine_submit(desc);
1187 
1188 	return 0;
1189 
1190 chan_err:
1191 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1192 	atmel_port->use_dma_rx = 0;
1193 	if (atmel_port->chan_rx)
1194 		atmel_release_rx_dma(port);
1195 	return -EINVAL;
1196 }
1197 
1198 static void atmel_uart_timer_callback(unsigned long data)
1199 {
1200 	struct uart_port *port = (void *)data;
1201 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1202 
1203 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1204 		tasklet_schedule(&atmel_port->tasklet_rx);
1205 		mod_timer(&atmel_port->uart_timer,
1206 			  jiffies + uart_poll_timeout(port));
1207 	}
1208 }
1209 
1210 /*
1211  * receive interrupt handler.
1212  */
1213 static void
1214 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1215 {
1216 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1217 
1218 	if (atmel_use_pdc_rx(port)) {
1219 		/*
1220 		 * PDC receive. Just schedule the tasklet and let it
1221 		 * figure out the details.
1222 		 *
1223 		 * TODO: We're not handling error flags correctly at
1224 		 * the moment.
1225 		 */
1226 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1227 			atmel_uart_writel(port, ATMEL_US_IDR,
1228 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1229 			atmel_tasklet_schedule(atmel_port,
1230 					       &atmel_port->tasklet_rx);
1231 		}
1232 
1233 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1234 				ATMEL_US_FRAME | ATMEL_US_PARE))
1235 			atmel_pdc_rxerr(port, pending);
1236 	}
1237 
1238 	if (atmel_use_dma_rx(port)) {
1239 		if (pending & ATMEL_US_TIMEOUT) {
1240 			atmel_uart_writel(port, ATMEL_US_IDR,
1241 					  ATMEL_US_TIMEOUT);
1242 			atmel_tasklet_schedule(atmel_port,
1243 					       &atmel_port->tasklet_rx);
1244 		}
1245 	}
1246 
1247 	/* Interrupt receive */
1248 	if (pending & ATMEL_US_RXRDY)
1249 		atmel_rx_chars(port);
1250 	else if (pending & ATMEL_US_RXBRK) {
1251 		/*
1252 		 * End of break detected. If it came along with a
1253 		 * character, atmel_rx_chars will handle it.
1254 		 */
1255 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1256 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1257 		atmel_port->break_active = 0;
1258 	}
1259 }
1260 
1261 /*
1262  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1263  */
1264 static void
1265 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1266 {
1267 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1268 
1269 	if (pending & atmel_port->tx_done_mask) {
1270 		/* Either PDC or interrupt transmission */
1271 		atmel_uart_writel(port, ATMEL_US_IDR,
1272 				  atmel_port->tx_done_mask);
1273 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1274 	}
1275 }
1276 
1277 /*
1278  * status flags interrupt handler.
1279  */
1280 static void
1281 atmel_handle_status(struct uart_port *port, unsigned int pending,
1282 		    unsigned int status)
1283 {
1284 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1285 	unsigned int status_change;
1286 
1287 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1288 				| ATMEL_US_CTSIC)) {
1289 		status_change = status ^ atmel_port->irq_status_prev;
1290 		atmel_port->irq_status_prev = status;
1291 
1292 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1293 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1294 			/* TODO: All reads to CSR will clear these interrupts! */
1295 			if (status_change & ATMEL_US_RI)
1296 				port->icount.rng++;
1297 			if (status_change & ATMEL_US_DSR)
1298 				port->icount.dsr++;
1299 			if (status_change & ATMEL_US_DCD)
1300 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1301 			if (status_change & ATMEL_US_CTS)
1302 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1303 
1304 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1305 		}
1306 	}
1307 }
1308 
1309 /*
1310  * Interrupt handler
1311  */
1312 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1313 {
1314 	struct uart_port *port = dev_id;
1315 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1316 	unsigned int status, pending, mask, pass_counter = 0;
1317 
1318 	spin_lock(&atmel_port->lock_suspended);
1319 
1320 	do {
1321 		status = atmel_get_lines_status(port);
1322 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1323 		pending = status & mask;
1324 		if (!pending)
1325 			break;
1326 
1327 		if (atmel_port->suspended) {
1328 			atmel_port->pending |= pending;
1329 			atmel_port->pending_status = status;
1330 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1331 			pm_system_wakeup();
1332 			break;
1333 		}
1334 
1335 		atmel_handle_receive(port, pending);
1336 		atmel_handle_status(port, pending, status);
1337 		atmel_handle_transmit(port, pending);
1338 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1339 
1340 	spin_unlock(&atmel_port->lock_suspended);
1341 
1342 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1343 }
1344 
1345 static void atmel_release_tx_pdc(struct uart_port *port)
1346 {
1347 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1348 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1349 
1350 	dma_unmap_single(port->dev,
1351 			 pdc->dma_addr,
1352 			 pdc->dma_size,
1353 			 DMA_TO_DEVICE);
1354 }
1355 
1356 /*
1357  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1358  */
1359 static void atmel_tx_pdc(struct uart_port *port)
1360 {
1361 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1362 	struct circ_buf *xmit = &port->state->xmit;
1363 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1364 	int count;
1365 
1366 	/* nothing left to transmit? */
1367 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1368 		return;
1369 
1370 	xmit->tail += pdc->ofs;
1371 	xmit->tail &= UART_XMIT_SIZE - 1;
1372 
1373 	port->icount.tx += pdc->ofs;
1374 	pdc->ofs = 0;
1375 
1376 	/* more to transmit - setup next transfer */
1377 
1378 	/* disable PDC transmit */
1379 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1380 
1381 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1382 		dma_sync_single_for_device(port->dev,
1383 					   pdc->dma_addr,
1384 					   pdc->dma_size,
1385 					   DMA_TO_DEVICE);
1386 
1387 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1388 		pdc->ofs = count;
1389 
1390 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1391 				  pdc->dma_addr + xmit->tail);
1392 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1393 		/* re-enable PDC transmit */
1394 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1395 		/* Enable interrupts */
1396 		atmel_uart_writel(port, ATMEL_US_IER,
1397 				  atmel_port->tx_done_mask);
1398 	} else {
1399 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
1400 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1401 			/* DMA done, stop TX, start RX for RS485 */
1402 			atmel_start_rx(port);
1403 		}
1404 	}
1405 
1406 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1407 		uart_write_wakeup(port);
1408 }
1409 
1410 static int atmel_prepare_tx_pdc(struct uart_port *port)
1411 {
1412 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1413 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1414 	struct circ_buf *xmit = &port->state->xmit;
1415 
1416 	pdc->buf = xmit->buf;
1417 	pdc->dma_addr = dma_map_single(port->dev,
1418 					pdc->buf,
1419 					UART_XMIT_SIZE,
1420 					DMA_TO_DEVICE);
1421 	pdc->dma_size = UART_XMIT_SIZE;
1422 	pdc->ofs = 0;
1423 
1424 	return 0;
1425 }
1426 
1427 static void atmel_rx_from_ring(struct uart_port *port)
1428 {
1429 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1430 	struct circ_buf *ring = &atmel_port->rx_ring;
1431 	unsigned int flg;
1432 	unsigned int status;
1433 
1434 	while (ring->head != ring->tail) {
1435 		struct atmel_uart_char c;
1436 
1437 		/* Make sure c is loaded after head. */
1438 		smp_rmb();
1439 
1440 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1441 
1442 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1443 
1444 		port->icount.rx++;
1445 		status = c.status;
1446 		flg = TTY_NORMAL;
1447 
1448 		/*
1449 		 * note that the error handling code is
1450 		 * out of the main execution path
1451 		 */
1452 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1453 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1454 			if (status & ATMEL_US_RXBRK) {
1455 				/* ignore side-effect */
1456 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1457 
1458 				port->icount.brk++;
1459 				if (uart_handle_break(port))
1460 					continue;
1461 			}
1462 			if (status & ATMEL_US_PARE)
1463 				port->icount.parity++;
1464 			if (status & ATMEL_US_FRAME)
1465 				port->icount.frame++;
1466 			if (status & ATMEL_US_OVRE)
1467 				port->icount.overrun++;
1468 
1469 			status &= port->read_status_mask;
1470 
1471 			if (status & ATMEL_US_RXBRK)
1472 				flg = TTY_BREAK;
1473 			else if (status & ATMEL_US_PARE)
1474 				flg = TTY_PARITY;
1475 			else if (status & ATMEL_US_FRAME)
1476 				flg = TTY_FRAME;
1477 		}
1478 
1479 
1480 		if (uart_handle_sysrq_char(port, c.ch))
1481 			continue;
1482 
1483 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1484 	}
1485 
1486 	/*
1487 	 * Drop the lock here since it might end up calling
1488 	 * uart_start(), which takes the lock.
1489 	 */
1490 	spin_unlock(&port->lock);
1491 	tty_flip_buffer_push(&port->state->port);
1492 	spin_lock(&port->lock);
1493 }
1494 
1495 static void atmel_release_rx_pdc(struct uart_port *port)
1496 {
1497 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1498 	int i;
1499 
1500 	for (i = 0; i < 2; i++) {
1501 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1502 
1503 		dma_unmap_single(port->dev,
1504 				 pdc->dma_addr,
1505 				 pdc->dma_size,
1506 				 DMA_FROM_DEVICE);
1507 		kfree(pdc->buf);
1508 	}
1509 }
1510 
1511 static void atmel_rx_from_pdc(struct uart_port *port)
1512 {
1513 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1514 	struct tty_port *tport = &port->state->port;
1515 	struct atmel_dma_buffer *pdc;
1516 	int rx_idx = atmel_port->pdc_rx_idx;
1517 	unsigned int head;
1518 	unsigned int tail;
1519 	unsigned int count;
1520 
1521 	do {
1522 		/* Reset the UART timeout early so that we don't miss one */
1523 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1524 
1525 		pdc = &atmel_port->pdc_rx[rx_idx];
1526 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1527 		tail = pdc->ofs;
1528 
1529 		/* If the PDC has switched buffers, RPR won't contain
1530 		 * any address within the current buffer. Since head
1531 		 * is unsigned, we just need a one-way comparison to
1532 		 * find out.
1533 		 *
1534 		 * In this case, we just need to consume the entire
1535 		 * buffer and resubmit it for DMA. This will clear the
1536 		 * ENDRX bit as well, so that we can safely re-enable
1537 		 * all interrupts below.
1538 		 */
1539 		head = min(head, pdc->dma_size);
1540 
1541 		if (likely(head != tail)) {
1542 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1543 					pdc->dma_size, DMA_FROM_DEVICE);
1544 
1545 			/*
1546 			 * head will only wrap around when we recycle
1547 			 * the DMA buffer, and when that happens, we
1548 			 * explicitly set tail to 0. So head will
1549 			 * always be greater than tail.
1550 			 */
1551 			count = head - tail;
1552 
1553 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1554 						count);
1555 
1556 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1557 					pdc->dma_size, DMA_FROM_DEVICE);
1558 
1559 			port->icount.rx += count;
1560 			pdc->ofs = head;
1561 		}
1562 
1563 		/*
1564 		 * If the current buffer is full, we need to check if
1565 		 * the next one contains any additional data.
1566 		 */
1567 		if (head >= pdc->dma_size) {
1568 			pdc->ofs = 0;
1569 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1570 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1571 
1572 			rx_idx = !rx_idx;
1573 			atmel_port->pdc_rx_idx = rx_idx;
1574 		}
1575 	} while (head >= pdc->dma_size);
1576 
1577 	/*
1578 	 * Drop the lock here since it might end up calling
1579 	 * uart_start(), which takes the lock.
1580 	 */
1581 	spin_unlock(&port->lock);
1582 	tty_flip_buffer_push(tport);
1583 	spin_lock(&port->lock);
1584 
1585 	atmel_uart_writel(port, ATMEL_US_IER,
1586 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1587 }
1588 
1589 static int atmel_prepare_rx_pdc(struct uart_port *port)
1590 {
1591 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1592 	int i;
1593 
1594 	for (i = 0; i < 2; i++) {
1595 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1596 
1597 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1598 		if (pdc->buf == NULL) {
1599 			if (i != 0) {
1600 				dma_unmap_single(port->dev,
1601 					atmel_port->pdc_rx[0].dma_addr,
1602 					PDC_BUFFER_SIZE,
1603 					DMA_FROM_DEVICE);
1604 				kfree(atmel_port->pdc_rx[0].buf);
1605 			}
1606 			atmel_port->use_pdc_rx = 0;
1607 			return -ENOMEM;
1608 		}
1609 		pdc->dma_addr = dma_map_single(port->dev,
1610 						pdc->buf,
1611 						PDC_BUFFER_SIZE,
1612 						DMA_FROM_DEVICE);
1613 		pdc->dma_size = PDC_BUFFER_SIZE;
1614 		pdc->ofs = 0;
1615 	}
1616 
1617 	atmel_port->pdc_rx_idx = 0;
1618 
1619 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1620 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1621 
1622 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1623 			  atmel_port->pdc_rx[1].dma_addr);
1624 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1625 
1626 	return 0;
1627 }
1628 
1629 /*
1630  * tasklet handling tty stuff outside the interrupt handler.
1631  */
1632 static void atmel_tasklet_rx_func(unsigned long data)
1633 {
1634 	struct uart_port *port = (struct uart_port *)data;
1635 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1636 
1637 	/* The interrupt handler does not take the lock */
1638 	spin_lock(&port->lock);
1639 	atmel_port->schedule_rx(port);
1640 	spin_unlock(&port->lock);
1641 }
1642 
1643 static void atmel_tasklet_tx_func(unsigned long data)
1644 {
1645 	struct uart_port *port = (struct uart_port *)data;
1646 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1647 
1648 	/* The interrupt handler does not take the lock */
1649 	spin_lock(&port->lock);
1650 	atmel_port->schedule_tx(port);
1651 	spin_unlock(&port->lock);
1652 }
1653 
1654 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1655 				struct platform_device *pdev)
1656 {
1657 	struct device_node *np = pdev->dev.of_node;
1658 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1659 
1660 	if (np) {
1661 		/* DMA/PDC usage specification */
1662 		if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1663 			if (of_property_read_bool(np, "dmas")) {
1664 				atmel_port->use_dma_rx  = true;
1665 				atmel_port->use_pdc_rx  = false;
1666 			} else {
1667 				atmel_port->use_dma_rx  = false;
1668 				atmel_port->use_pdc_rx  = true;
1669 			}
1670 		} else {
1671 			atmel_port->use_dma_rx  = false;
1672 			atmel_port->use_pdc_rx  = false;
1673 		}
1674 
1675 		if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1676 			if (of_property_read_bool(np, "dmas")) {
1677 				atmel_port->use_dma_tx  = true;
1678 				atmel_port->use_pdc_tx  = false;
1679 			} else {
1680 				atmel_port->use_dma_tx  = false;
1681 				atmel_port->use_pdc_tx  = true;
1682 			}
1683 		} else {
1684 			atmel_port->use_dma_tx  = false;
1685 			atmel_port->use_pdc_tx  = false;
1686 		}
1687 
1688 	} else {
1689 		atmel_port->use_pdc_rx  = pdata->use_dma_rx;
1690 		atmel_port->use_pdc_tx  = pdata->use_dma_tx;
1691 		atmel_port->use_dma_rx  = false;
1692 		atmel_port->use_dma_tx  = false;
1693 	}
1694 
1695 }
1696 
1697 static void atmel_init_rs485(struct uart_port *port,
1698 				struct platform_device *pdev)
1699 {
1700 	struct device_node *np = pdev->dev.of_node;
1701 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1702 
1703 	if (np) {
1704 		struct serial_rs485 *rs485conf = &port->rs485;
1705 		u32 rs485_delay[2];
1706 		/* rs485 properties */
1707 		if (of_property_read_u32_array(np, "rs485-rts-delay",
1708 					rs485_delay, 2) == 0) {
1709 			rs485conf->delay_rts_before_send = rs485_delay[0];
1710 			rs485conf->delay_rts_after_send = rs485_delay[1];
1711 			rs485conf->flags = 0;
1712 		}
1713 
1714 		if (of_get_property(np, "rs485-rx-during-tx", NULL))
1715 			rs485conf->flags |= SER_RS485_RX_DURING_TX;
1716 
1717 		if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
1718 								NULL))
1719 			rs485conf->flags |= SER_RS485_ENABLED;
1720 	} else {
1721 		port->rs485       = pdata->rs485;
1722 	}
1723 
1724 }
1725 
1726 static void atmel_set_ops(struct uart_port *port)
1727 {
1728 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1729 
1730 	if (atmel_use_dma_rx(port)) {
1731 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1732 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1733 		atmel_port->release_rx = &atmel_release_rx_dma;
1734 	} else if (atmel_use_pdc_rx(port)) {
1735 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1736 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1737 		atmel_port->release_rx = &atmel_release_rx_pdc;
1738 	} else {
1739 		atmel_port->prepare_rx = NULL;
1740 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1741 		atmel_port->release_rx = NULL;
1742 	}
1743 
1744 	if (atmel_use_dma_tx(port)) {
1745 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1746 		atmel_port->schedule_tx = &atmel_tx_dma;
1747 		atmel_port->release_tx = &atmel_release_tx_dma;
1748 	} else if (atmel_use_pdc_tx(port)) {
1749 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1750 		atmel_port->schedule_tx = &atmel_tx_pdc;
1751 		atmel_port->release_tx = &atmel_release_tx_pdc;
1752 	} else {
1753 		atmel_port->prepare_tx = NULL;
1754 		atmel_port->schedule_tx = &atmel_tx_chars;
1755 		atmel_port->release_tx = NULL;
1756 	}
1757 }
1758 
1759 /*
1760  * Get ip name usart or uart
1761  */
1762 static void atmel_get_ip_name(struct uart_port *port)
1763 {
1764 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1765 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1766 	u32 version;
1767 	u32 usart, dbgu_uart, new_uart;
1768 	/* ASCII decoding for IP version */
1769 	usart = 0x55534152;	/* USAR(T) */
1770 	dbgu_uart = 0x44424755;	/* DBGU */
1771 	new_uart = 0x55415254;	/* UART */
1772 
1773 	/*
1774 	 * Only USART devices from at91sam9260 SOC implement fractional
1775 	 * baudrate. It is available for all asynchronous modes, with the
1776 	 * following restriction: the sampling clock's duty cycle is not
1777 	 * constant.
1778 	 */
1779 	atmel_port->has_frac_baudrate = false;
1780 	atmel_port->has_hw_timer = false;
1781 
1782 	if (name == new_uart) {
1783 		dev_dbg(port->dev, "Uart with hw timer");
1784 		atmel_port->has_hw_timer = true;
1785 		atmel_port->rtor = ATMEL_UA_RTOR;
1786 	} else if (name == usart) {
1787 		dev_dbg(port->dev, "Usart\n");
1788 		atmel_port->has_frac_baudrate = true;
1789 		atmel_port->has_hw_timer = true;
1790 		atmel_port->rtor = ATMEL_US_RTOR;
1791 	} else if (name == dbgu_uart) {
1792 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1793 	} else {
1794 		/* fallback for older SoCs: use version field */
1795 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1796 		switch (version) {
1797 		case 0x302:
1798 		case 0x10213:
1799 			dev_dbg(port->dev, "This version is usart\n");
1800 			atmel_port->has_frac_baudrate = true;
1801 			atmel_port->has_hw_timer = true;
1802 			atmel_port->rtor = ATMEL_US_RTOR;
1803 			break;
1804 		case 0x203:
1805 		case 0x10202:
1806 			dev_dbg(port->dev, "This version is uart\n");
1807 			break;
1808 		default:
1809 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1810 		}
1811 	}
1812 }
1813 
1814 /*
1815  * Perform initialization and enable port for reception
1816  */
1817 static int atmel_startup(struct uart_port *port)
1818 {
1819 	struct platform_device *pdev = to_platform_device(port->dev);
1820 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1821 	struct tty_struct *tty = port->state->port.tty;
1822 	int retval;
1823 
1824 	/*
1825 	 * Ensure that no interrupts are enabled otherwise when
1826 	 * request_irq() is called we could get stuck trying to
1827 	 * handle an unexpected interrupt
1828 	 */
1829 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1830 	atmel_port->ms_irq_enabled = false;
1831 
1832 	/*
1833 	 * Allocate the IRQ
1834 	 */
1835 	retval = request_irq(port->irq, atmel_interrupt,
1836 			IRQF_SHARED | IRQF_COND_SUSPEND,
1837 			tty ? tty->name : "atmel_serial", port);
1838 	if (retval) {
1839 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1840 		return retval;
1841 	}
1842 
1843 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1844 	tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
1845 			(unsigned long)port);
1846 	tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
1847 			(unsigned long)port);
1848 
1849 	/*
1850 	 * Initialize DMA (if necessary)
1851 	 */
1852 	atmel_init_property(atmel_port, pdev);
1853 	atmel_set_ops(port);
1854 
1855 	if (atmel_port->prepare_rx) {
1856 		retval = atmel_port->prepare_rx(port);
1857 		if (retval < 0)
1858 			atmel_set_ops(port);
1859 	}
1860 
1861 	if (atmel_port->prepare_tx) {
1862 		retval = atmel_port->prepare_tx(port);
1863 		if (retval < 0)
1864 			atmel_set_ops(port);
1865 	}
1866 
1867 	/*
1868 	 * Enable FIFO when available
1869 	 */
1870 	if (atmel_port->fifo_size) {
1871 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1872 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1873 		unsigned int fmr;
1874 
1875 		atmel_uart_writel(port, ATMEL_US_CR,
1876 				  ATMEL_US_FIFOEN |
1877 				  ATMEL_US_RXFCLR |
1878 				  ATMEL_US_TXFLCLR);
1879 
1880 		if (atmel_use_dma_tx(port))
1881 			txrdym = ATMEL_US_FOUR_DATA;
1882 
1883 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1884 		if (atmel_port->rts_high &&
1885 		    atmel_port->rts_low)
1886 			fmr |=	ATMEL_US_FRTSC |
1887 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1888 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1889 
1890 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1891 	}
1892 
1893 	/* Save current CSR for comparison in atmel_tasklet_func() */
1894 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
1895 
1896 	/*
1897 	 * Finally, enable the serial port
1898 	 */
1899 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1900 	/* enable xmit & rcvr */
1901 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1902 
1903 	setup_timer(&atmel_port->uart_timer,
1904 			atmel_uart_timer_callback,
1905 			(unsigned long)port);
1906 
1907 	if (atmel_use_pdc_rx(port)) {
1908 		/* set UART timeout */
1909 		if (!atmel_port->has_hw_timer) {
1910 			mod_timer(&atmel_port->uart_timer,
1911 					jiffies + uart_poll_timeout(port));
1912 		/* set USART timeout */
1913 		} else {
1914 			atmel_uart_writel(port, atmel_port->rtor,
1915 					  PDC_RX_TIMEOUT);
1916 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1917 
1918 			atmel_uart_writel(port, ATMEL_US_IER,
1919 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1920 		}
1921 		/* enable PDC controller */
1922 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1923 	} else if (atmel_use_dma_rx(port)) {
1924 		/* set UART timeout */
1925 		if (!atmel_port->has_hw_timer) {
1926 			mod_timer(&atmel_port->uart_timer,
1927 					jiffies + uart_poll_timeout(port));
1928 		/* set USART timeout */
1929 		} else {
1930 			atmel_uart_writel(port, atmel_port->rtor,
1931 					  PDC_RX_TIMEOUT);
1932 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1933 
1934 			atmel_uart_writel(port, ATMEL_US_IER,
1935 					  ATMEL_US_TIMEOUT);
1936 		}
1937 	} else {
1938 		/* enable receive only */
1939 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1940 	}
1941 
1942 	return 0;
1943 }
1944 
1945 /*
1946  * Flush any TX data submitted for DMA. Called when the TX circular
1947  * buffer is reset.
1948  */
1949 static void atmel_flush_buffer(struct uart_port *port)
1950 {
1951 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1952 
1953 	if (atmel_use_pdc_tx(port)) {
1954 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1955 		atmel_port->pdc_tx.ofs = 0;
1956 	}
1957 	/*
1958 	 * in uart_flush_buffer(), the xmit circular buffer has just
1959 	 * been cleared, so we have to reset tx_len accordingly.
1960 	 */
1961 	atmel_port->tx_len = 0;
1962 }
1963 
1964 /*
1965  * Disable the port
1966  */
1967 static void atmel_shutdown(struct uart_port *port)
1968 {
1969 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1970 
1971 	/* Disable modem control lines interrupts */
1972 	atmel_disable_ms(port);
1973 
1974 	/* Disable interrupts at device level */
1975 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1976 
1977 	/* Prevent spurious interrupts from scheduling the tasklet */
1978 	atomic_inc(&atmel_port->tasklet_shutdown);
1979 
1980 	/*
1981 	 * Prevent any tasklets being scheduled during
1982 	 * cleanup
1983 	 */
1984 	del_timer_sync(&atmel_port->uart_timer);
1985 
1986 	/* Make sure that no interrupt is on the fly */
1987 	synchronize_irq(port->irq);
1988 
1989 	/*
1990 	 * Clear out any scheduled tasklets before
1991 	 * we destroy the buffers
1992 	 */
1993 	tasklet_kill(&atmel_port->tasklet_rx);
1994 	tasklet_kill(&atmel_port->tasklet_tx);
1995 
1996 	/*
1997 	 * Ensure everything is stopped and
1998 	 * disable port and break condition.
1999 	 */
2000 	atmel_stop_rx(port);
2001 	atmel_stop_tx(port);
2002 
2003 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2004 
2005 	/*
2006 	 * Shut-down the DMA.
2007 	 */
2008 	if (atmel_port->release_rx)
2009 		atmel_port->release_rx(port);
2010 	if (atmel_port->release_tx)
2011 		atmel_port->release_tx(port);
2012 
2013 	/*
2014 	 * Reset ring buffer pointers
2015 	 */
2016 	atmel_port->rx_ring.head = 0;
2017 	atmel_port->rx_ring.tail = 0;
2018 
2019 	/*
2020 	 * Free the interrupts
2021 	 */
2022 	free_irq(port->irq, port);
2023 
2024 	atmel_flush_buffer(port);
2025 }
2026 
2027 /*
2028  * Power / Clock management.
2029  */
2030 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2031 			    unsigned int oldstate)
2032 {
2033 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2034 
2035 	switch (state) {
2036 	case 0:
2037 		/*
2038 		 * Enable the peripheral clock for this serial port.
2039 		 * This is called on uart_open() or a resume event.
2040 		 */
2041 		clk_prepare_enable(atmel_port->clk);
2042 
2043 		/* re-enable interrupts if we disabled some on suspend */
2044 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2045 		break;
2046 	case 3:
2047 		/* Back up the interrupt mask and disable all interrupts */
2048 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2049 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2050 
2051 		/*
2052 		 * Disable the peripheral clock for this serial port.
2053 		 * This is called on uart_close() or a suspend event.
2054 		 */
2055 		clk_disable_unprepare(atmel_port->clk);
2056 		break;
2057 	default:
2058 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2059 	}
2060 }
2061 
2062 /*
2063  * Change the port parameters
2064  */
2065 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2066 			      struct ktermios *old)
2067 {
2068 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2069 	unsigned long flags;
2070 	unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2071 
2072 	/* save the current mode register */
2073 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2074 
2075 	/* reset the mode, clock divisor, parity, stop bits and data size */
2076 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2077 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2078 
2079 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2080 
2081 	/* byte size */
2082 	switch (termios->c_cflag & CSIZE) {
2083 	case CS5:
2084 		mode |= ATMEL_US_CHRL_5;
2085 		break;
2086 	case CS6:
2087 		mode |= ATMEL_US_CHRL_6;
2088 		break;
2089 	case CS7:
2090 		mode |= ATMEL_US_CHRL_7;
2091 		break;
2092 	default:
2093 		mode |= ATMEL_US_CHRL_8;
2094 		break;
2095 	}
2096 
2097 	/* stop bits */
2098 	if (termios->c_cflag & CSTOPB)
2099 		mode |= ATMEL_US_NBSTOP_2;
2100 
2101 	/* parity */
2102 	if (termios->c_cflag & PARENB) {
2103 		/* Mark or Space parity */
2104 		if (termios->c_cflag & CMSPAR) {
2105 			if (termios->c_cflag & PARODD)
2106 				mode |= ATMEL_US_PAR_MARK;
2107 			else
2108 				mode |= ATMEL_US_PAR_SPACE;
2109 		} else if (termios->c_cflag & PARODD)
2110 			mode |= ATMEL_US_PAR_ODD;
2111 		else
2112 			mode |= ATMEL_US_PAR_EVEN;
2113 	} else
2114 		mode |= ATMEL_US_PAR_NONE;
2115 
2116 	spin_lock_irqsave(&port->lock, flags);
2117 
2118 	port->read_status_mask = ATMEL_US_OVRE;
2119 	if (termios->c_iflag & INPCK)
2120 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2121 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2122 		port->read_status_mask |= ATMEL_US_RXBRK;
2123 
2124 	if (atmel_use_pdc_rx(port))
2125 		/* need to enable error interrupts */
2126 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2127 
2128 	/*
2129 	 * Characters to ignore
2130 	 */
2131 	port->ignore_status_mask = 0;
2132 	if (termios->c_iflag & IGNPAR)
2133 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2134 	if (termios->c_iflag & IGNBRK) {
2135 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2136 		/*
2137 		 * If we're ignoring parity and break indicators,
2138 		 * ignore overruns too (for real raw support).
2139 		 */
2140 		if (termios->c_iflag & IGNPAR)
2141 			port->ignore_status_mask |= ATMEL_US_OVRE;
2142 	}
2143 	/* TODO: Ignore all characters if CREAD is set.*/
2144 
2145 	/* update the per-port timeout */
2146 	uart_update_timeout(port, termios->c_cflag, baud);
2147 
2148 	/*
2149 	 * save/disable interrupts. The tty layer will ensure that the
2150 	 * transmitter is empty if requested by the caller, so there's
2151 	 * no need to wait for it here.
2152 	 */
2153 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2154 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2155 
2156 	/* disable receiver and transmitter */
2157 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2158 
2159 	/* mode */
2160 	if (port->rs485.flags & SER_RS485_ENABLED) {
2161 		atmel_uart_writel(port, ATMEL_US_TTGR,
2162 				  port->rs485.delay_rts_after_send);
2163 		mode |= ATMEL_US_USMODE_RS485;
2164 	} else if (termios->c_cflag & CRTSCTS) {
2165 		/* RS232 with hardware handshake (RTS/CTS) */
2166 		if (atmel_use_fifo(port) &&
2167 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2168 			/*
2169 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2170 			 * be able to drive the RTS pin high/low when the RX
2171 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2172 			 * It will also disable the transmitter when the CTS
2173 			 * pin is high.
2174 			 * This mode is not activated if CTS pin is a GPIO
2175 			 * because in this case, the transmitter is always
2176 			 * disabled (there must be an internal pull-up
2177 			 * responsible for this behaviour).
2178 			 * If the RTS pin is a GPIO, the controller won't be
2179 			 * able to drive it according to the FIFO thresholds,
2180 			 * but it will be handled by the driver.
2181 			 */
2182 			mode |= ATMEL_US_USMODE_HWHS;
2183 		} else {
2184 			/*
2185 			 * For platforms without FIFO, the flow control is
2186 			 * handled by the driver.
2187 			 */
2188 			mode |= ATMEL_US_USMODE_NORMAL;
2189 		}
2190 	} else {
2191 		/* RS232 without hadware handshake */
2192 		mode |= ATMEL_US_USMODE_NORMAL;
2193 	}
2194 
2195 	/* set the mode, clock divisor, parity, stop bits and data size */
2196 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2197 
2198 	/*
2199 	 * when switching the mode, set the RTS line state according to the
2200 	 * new mode, otherwise keep the former state
2201 	 */
2202 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2203 		unsigned int rts_state;
2204 
2205 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2206 			/* let the hardware control the RTS line */
2207 			rts_state = ATMEL_US_RTSDIS;
2208 		} else {
2209 			/* force RTS line to low level */
2210 			rts_state = ATMEL_US_RTSEN;
2211 		}
2212 
2213 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2214 	}
2215 
2216 	/*
2217 	 * Set the baud rate:
2218 	 * Fractional baudrate allows to setup output frequency more
2219 	 * accurately. This feature is enabled only when using normal mode.
2220 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2221 	 * Currently, OVER is always set to 0 so we get
2222 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2223 	 * then
2224 	 * 8 CD + FP = selected clock / (2 * baudrate)
2225 	 */
2226 	if (atmel_port->has_frac_baudrate) {
2227 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2228 		cd = div >> 3;
2229 		fp = div & ATMEL_US_FP_MASK;
2230 	} else {
2231 		cd = uart_get_divisor(port, baud);
2232 	}
2233 
2234 	if (cd > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2235 		cd /= 8;
2236 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2237 	}
2238 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2239 
2240 	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2241 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2242 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2243 
2244 	/* restore interrupts */
2245 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2246 
2247 	/* CTS flow-control and modem-status interrupts */
2248 	if (UART_ENABLE_MS(port, termios->c_cflag))
2249 		atmel_enable_ms(port);
2250 	else
2251 		atmel_disable_ms(port);
2252 
2253 	spin_unlock_irqrestore(&port->lock, flags);
2254 }
2255 
2256 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2257 {
2258 	if (termios->c_line == N_PPS) {
2259 		port->flags |= UPF_HARDPPS_CD;
2260 		spin_lock_irq(&port->lock);
2261 		atmel_enable_ms(port);
2262 		spin_unlock_irq(&port->lock);
2263 	} else {
2264 		port->flags &= ~UPF_HARDPPS_CD;
2265 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2266 			spin_lock_irq(&port->lock);
2267 			atmel_disable_ms(port);
2268 			spin_unlock_irq(&port->lock);
2269 		}
2270 	}
2271 }
2272 
2273 /*
2274  * Return string describing the specified port
2275  */
2276 static const char *atmel_type(struct uart_port *port)
2277 {
2278 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2279 }
2280 
2281 /*
2282  * Release the memory region(s) being used by 'port'.
2283  */
2284 static void atmel_release_port(struct uart_port *port)
2285 {
2286 	struct platform_device *pdev = to_platform_device(port->dev);
2287 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2288 
2289 	release_mem_region(port->mapbase, size);
2290 
2291 	if (port->flags & UPF_IOREMAP) {
2292 		iounmap(port->membase);
2293 		port->membase = NULL;
2294 	}
2295 }
2296 
2297 /*
2298  * Request the memory region(s) being used by 'port'.
2299  */
2300 static int atmel_request_port(struct uart_port *port)
2301 {
2302 	struct platform_device *pdev = to_platform_device(port->dev);
2303 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2304 
2305 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2306 		return -EBUSY;
2307 
2308 	if (port->flags & UPF_IOREMAP) {
2309 		port->membase = ioremap(port->mapbase, size);
2310 		if (port->membase == NULL) {
2311 			release_mem_region(port->mapbase, size);
2312 			return -ENOMEM;
2313 		}
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 /*
2320  * Configure/autoconfigure the port.
2321  */
2322 static void atmel_config_port(struct uart_port *port, int flags)
2323 {
2324 	if (flags & UART_CONFIG_TYPE) {
2325 		port->type = PORT_ATMEL;
2326 		atmel_request_port(port);
2327 	}
2328 }
2329 
2330 /*
2331  * Verify the new serial_struct (for TIOCSSERIAL).
2332  */
2333 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2334 {
2335 	int ret = 0;
2336 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2337 		ret = -EINVAL;
2338 	if (port->irq != ser->irq)
2339 		ret = -EINVAL;
2340 	if (ser->io_type != SERIAL_IO_MEM)
2341 		ret = -EINVAL;
2342 	if (port->uartclk / 16 != ser->baud_base)
2343 		ret = -EINVAL;
2344 	if (port->mapbase != (unsigned long)ser->iomem_base)
2345 		ret = -EINVAL;
2346 	if (port->iobase != ser->port)
2347 		ret = -EINVAL;
2348 	if (ser->hub6 != 0)
2349 		ret = -EINVAL;
2350 	return ret;
2351 }
2352 
2353 #ifdef CONFIG_CONSOLE_POLL
2354 static int atmel_poll_get_char(struct uart_port *port)
2355 {
2356 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2357 		cpu_relax();
2358 
2359 	return atmel_uart_read_char(port);
2360 }
2361 
2362 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2363 {
2364 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2365 		cpu_relax();
2366 
2367 	atmel_uart_write_char(port, ch);
2368 }
2369 #endif
2370 
2371 static const struct uart_ops atmel_pops = {
2372 	.tx_empty	= atmel_tx_empty,
2373 	.set_mctrl	= atmel_set_mctrl,
2374 	.get_mctrl	= atmel_get_mctrl,
2375 	.stop_tx	= atmel_stop_tx,
2376 	.start_tx	= atmel_start_tx,
2377 	.stop_rx	= atmel_stop_rx,
2378 	.enable_ms	= atmel_enable_ms,
2379 	.break_ctl	= atmel_break_ctl,
2380 	.startup	= atmel_startup,
2381 	.shutdown	= atmel_shutdown,
2382 	.flush_buffer	= atmel_flush_buffer,
2383 	.set_termios	= atmel_set_termios,
2384 	.set_ldisc	= atmel_set_ldisc,
2385 	.type		= atmel_type,
2386 	.release_port	= atmel_release_port,
2387 	.request_port	= atmel_request_port,
2388 	.config_port	= atmel_config_port,
2389 	.verify_port	= atmel_verify_port,
2390 	.pm		= atmel_serial_pm,
2391 #ifdef CONFIG_CONSOLE_POLL
2392 	.poll_get_char	= atmel_poll_get_char,
2393 	.poll_put_char	= atmel_poll_put_char,
2394 #endif
2395 };
2396 
2397 /*
2398  * Configure the port from the platform device resource info.
2399  */
2400 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2401 				      struct platform_device *pdev)
2402 {
2403 	int ret;
2404 	struct uart_port *port = &atmel_port->uart;
2405 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2406 
2407 	atmel_init_property(atmel_port, pdev);
2408 	atmel_set_ops(port);
2409 
2410 	atmel_init_rs485(port, pdev);
2411 
2412 	port->iotype		= UPIO_MEM;
2413 	port->flags		= UPF_BOOT_AUTOCONF;
2414 	port->ops		= &atmel_pops;
2415 	port->fifosize		= 1;
2416 	port->dev		= &pdev->dev;
2417 	port->mapbase	= pdev->resource[0].start;
2418 	port->irq	= pdev->resource[1].start;
2419 	port->rs485_config	= atmel_config_rs485;
2420 
2421 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2422 
2423 	if (pdata && pdata->regs) {
2424 		/* Already mapped by setup code */
2425 		port->membase = pdata->regs;
2426 	} else {
2427 		port->flags	|= UPF_IOREMAP;
2428 		port->membase	= NULL;
2429 	}
2430 
2431 	/* for console, the clock could already be configured */
2432 	if (!atmel_port->clk) {
2433 		atmel_port->clk = clk_get(&pdev->dev, "usart");
2434 		if (IS_ERR(atmel_port->clk)) {
2435 			ret = PTR_ERR(atmel_port->clk);
2436 			atmel_port->clk = NULL;
2437 			return ret;
2438 		}
2439 		ret = clk_prepare_enable(atmel_port->clk);
2440 		if (ret) {
2441 			clk_put(atmel_port->clk);
2442 			atmel_port->clk = NULL;
2443 			return ret;
2444 		}
2445 		port->uartclk = clk_get_rate(atmel_port->clk);
2446 		clk_disable_unprepare(atmel_port->clk);
2447 		/* only enable clock when USART is in use */
2448 	}
2449 
2450 	/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2451 	if (port->rs485.flags & SER_RS485_ENABLED)
2452 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2453 	else if (atmel_use_pdc_tx(port)) {
2454 		port->fifosize = PDC_BUFFER_SIZE;
2455 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2456 	} else {
2457 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 struct platform_device *atmel_default_console_device;	/* the serial console device */
2464 
2465 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2466 static void atmel_console_putchar(struct uart_port *port, int ch)
2467 {
2468 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2469 		cpu_relax();
2470 	atmel_uart_write_char(port, ch);
2471 }
2472 
2473 /*
2474  * Interrupts are disabled on entering
2475  */
2476 static void atmel_console_write(struct console *co, const char *s, u_int count)
2477 {
2478 	struct uart_port *port = &atmel_ports[co->index].uart;
2479 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2480 	unsigned int status, imr;
2481 	unsigned int pdc_tx;
2482 
2483 	/*
2484 	 * First, save IMR and then disable interrupts
2485 	 */
2486 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2487 	atmel_uart_writel(port, ATMEL_US_IDR,
2488 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2489 
2490 	/* Store PDC transmit status and disable it */
2491 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2492 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2493 
2494 	/* Make sure that tx path is actually able to send characters */
2495 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2496 
2497 	uart_console_write(port, s, count, atmel_console_putchar);
2498 
2499 	/*
2500 	 * Finally, wait for transmitter to become empty
2501 	 * and restore IMR
2502 	 */
2503 	do {
2504 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2505 	} while (!(status & ATMEL_US_TXRDY));
2506 
2507 	/* Restore PDC transmit status */
2508 	if (pdc_tx)
2509 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2510 
2511 	/* set interrupts back the way they were */
2512 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2513 }
2514 
2515 /*
2516  * If the port was already initialised (eg, by a boot loader),
2517  * try to determine the current setup.
2518  */
2519 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2520 					     int *parity, int *bits)
2521 {
2522 	unsigned int mr, quot;
2523 
2524 	/*
2525 	 * If the baud rate generator isn't running, the port wasn't
2526 	 * initialized by the boot loader.
2527 	 */
2528 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2529 	if (!quot)
2530 		return;
2531 
2532 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2533 	if (mr == ATMEL_US_CHRL_8)
2534 		*bits = 8;
2535 	else
2536 		*bits = 7;
2537 
2538 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2539 	if (mr == ATMEL_US_PAR_EVEN)
2540 		*parity = 'e';
2541 	else if (mr == ATMEL_US_PAR_ODD)
2542 		*parity = 'o';
2543 
2544 	/*
2545 	 * The serial core only rounds down when matching this to a
2546 	 * supported baud rate. Make sure we don't end up slightly
2547 	 * lower than one of those, as it would make us fall through
2548 	 * to a much lower baud rate than we really want.
2549 	 */
2550 	*baud = port->uartclk / (16 * (quot - 1));
2551 }
2552 
2553 static int __init atmel_console_setup(struct console *co, char *options)
2554 {
2555 	int ret;
2556 	struct uart_port *port = &atmel_ports[co->index].uart;
2557 	int baud = 115200;
2558 	int bits = 8;
2559 	int parity = 'n';
2560 	int flow = 'n';
2561 
2562 	if (port->membase == NULL) {
2563 		/* Port not initialized yet - delay setup */
2564 		return -ENODEV;
2565 	}
2566 
2567 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2568 	if (ret)
2569 		return ret;
2570 
2571 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2572 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2573 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2574 
2575 	if (options)
2576 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2577 	else
2578 		atmel_console_get_options(port, &baud, &parity, &bits);
2579 
2580 	return uart_set_options(port, co, baud, parity, bits, flow);
2581 }
2582 
2583 static struct uart_driver atmel_uart;
2584 
2585 static struct console atmel_console = {
2586 	.name		= ATMEL_DEVICENAME,
2587 	.write		= atmel_console_write,
2588 	.device		= uart_console_device,
2589 	.setup		= atmel_console_setup,
2590 	.flags		= CON_PRINTBUFFER,
2591 	.index		= -1,
2592 	.data		= &atmel_uart,
2593 };
2594 
2595 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2596 
2597 /*
2598  * Early console initialization (before VM subsystem initialized).
2599  */
2600 static int __init atmel_console_init(void)
2601 {
2602 	int ret;
2603 	if (atmel_default_console_device) {
2604 		struct atmel_uart_data *pdata =
2605 			dev_get_platdata(&atmel_default_console_device->dev);
2606 		int id = pdata->num;
2607 		struct atmel_uart_port *atmel_port = &atmel_ports[id];
2608 
2609 		atmel_port->backup_imr = 0;
2610 		atmel_port->uart.line = id;
2611 
2612 		add_preferred_console(ATMEL_DEVICENAME, id, NULL);
2613 		ret = atmel_init_port(atmel_port, atmel_default_console_device);
2614 		if (ret)
2615 			return ret;
2616 		register_console(&atmel_console);
2617 	}
2618 
2619 	return 0;
2620 }
2621 
2622 console_initcall(atmel_console_init);
2623 
2624 /*
2625  * Late console initialization.
2626  */
2627 static int __init atmel_late_console_init(void)
2628 {
2629 	if (atmel_default_console_device
2630 	    && !(atmel_console.flags & CON_ENABLED))
2631 		register_console(&atmel_console);
2632 
2633 	return 0;
2634 }
2635 
2636 core_initcall(atmel_late_console_init);
2637 
2638 static inline bool atmel_is_console_port(struct uart_port *port)
2639 {
2640 	return port->cons && port->cons->index == port->line;
2641 }
2642 
2643 #else
2644 #define ATMEL_CONSOLE_DEVICE	NULL
2645 
2646 static inline bool atmel_is_console_port(struct uart_port *port)
2647 {
2648 	return false;
2649 }
2650 #endif
2651 
2652 static struct uart_driver atmel_uart = {
2653 	.owner		= THIS_MODULE,
2654 	.driver_name	= "atmel_serial",
2655 	.dev_name	= ATMEL_DEVICENAME,
2656 	.major		= SERIAL_ATMEL_MAJOR,
2657 	.minor		= MINOR_START,
2658 	.nr		= ATMEL_MAX_UART,
2659 	.cons		= ATMEL_CONSOLE_DEVICE,
2660 };
2661 
2662 #ifdef CONFIG_PM
2663 static bool atmel_serial_clk_will_stop(void)
2664 {
2665 #ifdef CONFIG_ARCH_AT91
2666 	return at91_suspend_entering_slow_clock();
2667 #else
2668 	return false;
2669 #endif
2670 }
2671 
2672 static int atmel_serial_suspend(struct platform_device *pdev,
2673 				pm_message_t state)
2674 {
2675 	struct uart_port *port = platform_get_drvdata(pdev);
2676 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2677 
2678 	if (atmel_is_console_port(port) && console_suspend_enabled) {
2679 		/* Drain the TX shifter */
2680 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2681 			 ATMEL_US_TXEMPTY))
2682 			cpu_relax();
2683 	}
2684 
2685 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2686 		/* Cache register values as we won't get a full shutdown/startup
2687 		 * cycle
2688 		 */
2689 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2690 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2691 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2692 		atmel_port->cache.rtor = atmel_uart_readl(port,
2693 							  atmel_port->rtor);
2694 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2695 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2696 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2697 	}
2698 
2699 	/* we can not wake up if we're running on slow clock */
2700 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2701 	if (atmel_serial_clk_will_stop()) {
2702 		unsigned long flags;
2703 
2704 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2705 		atmel_port->suspended = true;
2706 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2707 		device_set_wakeup_enable(&pdev->dev, 0);
2708 	}
2709 
2710 	uart_suspend_port(&atmel_uart, port);
2711 
2712 	return 0;
2713 }
2714 
2715 static int atmel_serial_resume(struct platform_device *pdev)
2716 {
2717 	struct uart_port *port = platform_get_drvdata(pdev);
2718 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2719 	unsigned long flags;
2720 
2721 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2722 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2723 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2724 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2725 		atmel_uart_writel(port, atmel_port->rtor,
2726 				  atmel_port->cache.rtor);
2727 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2728 
2729 		if (atmel_port->fifo_size) {
2730 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2731 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2732 			atmel_uart_writel(port, ATMEL_US_FMR,
2733 					  atmel_port->cache.fmr);
2734 			atmel_uart_writel(port, ATMEL_US_FIER,
2735 					  atmel_port->cache.fimr);
2736 		}
2737 		atmel_start_rx(port);
2738 	}
2739 
2740 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2741 	if (atmel_port->pending) {
2742 		atmel_handle_receive(port, atmel_port->pending);
2743 		atmel_handle_status(port, atmel_port->pending,
2744 				    atmel_port->pending_status);
2745 		atmel_handle_transmit(port, atmel_port->pending);
2746 		atmel_port->pending = 0;
2747 	}
2748 	atmel_port->suspended = false;
2749 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2750 
2751 	uart_resume_port(&atmel_uart, port);
2752 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2753 
2754 	return 0;
2755 }
2756 #else
2757 #define atmel_serial_suspend NULL
2758 #define atmel_serial_resume NULL
2759 #endif
2760 
2761 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2762 				     struct platform_device *pdev)
2763 {
2764 	atmel_port->fifo_size = 0;
2765 	atmel_port->rts_low = 0;
2766 	atmel_port->rts_high = 0;
2767 
2768 	if (of_property_read_u32(pdev->dev.of_node,
2769 				 "atmel,fifo-size",
2770 				 &atmel_port->fifo_size))
2771 		return;
2772 
2773 	if (!atmel_port->fifo_size)
2774 		return;
2775 
2776 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2777 		atmel_port->fifo_size = 0;
2778 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2779 		return;
2780 	}
2781 
2782 	/*
2783 	 * 0 <= rts_low <= rts_high <= fifo_size
2784 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2785 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2786 	 * actually stopping to send new data. So we try to set the RTS High
2787 	 * Threshold to a reasonably high value respecting this 16 data
2788 	 * empirical rule when possible.
2789 	 */
2790 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2791 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2792 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2793 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2794 
2795 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2796 		 atmel_port->fifo_size);
2797 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2798 		atmel_port->rts_high);
2799 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2800 		atmel_port->rts_low);
2801 }
2802 
2803 static int atmel_serial_probe(struct platform_device *pdev)
2804 {
2805 	struct atmel_uart_port *atmel_port;
2806 	struct device_node *np = pdev->dev.of_node;
2807 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2808 	void *data;
2809 	int ret = -ENODEV;
2810 	bool rs485_enabled;
2811 
2812 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2813 
2814 	if (np)
2815 		ret = of_alias_get_id(np, "serial");
2816 	else
2817 		if (pdata)
2818 			ret = pdata->num;
2819 
2820 	if (ret < 0)
2821 		/* port id not found in platform data nor device-tree aliases:
2822 		 * auto-enumerate it */
2823 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2824 
2825 	if (ret >= ATMEL_MAX_UART) {
2826 		ret = -ENODEV;
2827 		goto err;
2828 	}
2829 
2830 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2831 		/* port already in use */
2832 		ret = -EBUSY;
2833 		goto err;
2834 	}
2835 
2836 	atmel_port = &atmel_ports[ret];
2837 	atmel_port->backup_imr = 0;
2838 	atmel_port->uart.line = ret;
2839 	atmel_serial_probe_fifos(atmel_port, pdev);
2840 
2841 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2842 	spin_lock_init(&atmel_port->lock_suspended);
2843 
2844 	ret = atmel_init_port(atmel_port, pdev);
2845 	if (ret)
2846 		goto err_clear_bit;
2847 
2848 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2849 	if (IS_ERR(atmel_port->gpios)) {
2850 		ret = PTR_ERR(atmel_port->gpios);
2851 		goto err_clear_bit;
2852 	}
2853 
2854 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2855 		ret = -ENOMEM;
2856 		data = kmalloc(sizeof(struct atmel_uart_char)
2857 				* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2858 		if (!data)
2859 			goto err_alloc_ring;
2860 		atmel_port->rx_ring.buf = data;
2861 	}
2862 
2863 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2864 
2865 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2866 	if (ret)
2867 		goto err_add_port;
2868 
2869 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2870 	if (atmel_is_console_port(&atmel_port->uart)
2871 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2872 		/*
2873 		 * The serial core enabled the clock for us, so undo
2874 		 * the clk_prepare_enable() in atmel_console_setup()
2875 		 */
2876 		clk_disable_unprepare(atmel_port->clk);
2877 	}
2878 #endif
2879 
2880 	device_init_wakeup(&pdev->dev, 1);
2881 	platform_set_drvdata(pdev, atmel_port);
2882 
2883 	/*
2884 	 * The peripheral clock has been disabled by atmel_init_port():
2885 	 * enable it before accessing I/O registers
2886 	 */
2887 	clk_prepare_enable(atmel_port->clk);
2888 
2889 	if (rs485_enabled) {
2890 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2891 				  ATMEL_US_USMODE_NORMAL);
2892 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2893 				  ATMEL_US_RTSEN);
2894 	}
2895 
2896 	/*
2897 	 * Get port name of usart or uart
2898 	 */
2899 	atmel_get_ip_name(&atmel_port->uart);
2900 
2901 	/*
2902 	 * The peripheral clock can now safely be disabled till the port
2903 	 * is used
2904 	 */
2905 	clk_disable_unprepare(atmel_port->clk);
2906 
2907 	return 0;
2908 
2909 err_add_port:
2910 	kfree(atmel_port->rx_ring.buf);
2911 	atmel_port->rx_ring.buf = NULL;
2912 err_alloc_ring:
2913 	if (!atmel_is_console_port(&atmel_port->uart)) {
2914 		clk_put(atmel_port->clk);
2915 		atmel_port->clk = NULL;
2916 	}
2917 err_clear_bit:
2918 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2919 err:
2920 	return ret;
2921 }
2922 
2923 /*
2924  * Even if the driver is not modular, it makes sense to be able to
2925  * unbind a device: there can be many bound devices, and there are
2926  * situations where dynamic binding and unbinding can be useful.
2927  *
2928  * For example, a connected device can require a specific firmware update
2929  * protocol that needs bitbanging on IO lines, but use the regular serial
2930  * port in the normal case.
2931  */
2932 static int atmel_serial_remove(struct platform_device *pdev)
2933 {
2934 	struct uart_port *port = platform_get_drvdata(pdev);
2935 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2936 	int ret = 0;
2937 
2938 	tasklet_kill(&atmel_port->tasklet_rx);
2939 	tasklet_kill(&atmel_port->tasklet_tx);
2940 
2941 	device_init_wakeup(&pdev->dev, 0);
2942 
2943 	ret = uart_remove_one_port(&atmel_uart, port);
2944 
2945 	kfree(atmel_port->rx_ring.buf);
2946 
2947 	/* "port" is allocated statically, so we shouldn't free it */
2948 
2949 	clear_bit(port->line, atmel_ports_in_use);
2950 
2951 	clk_put(atmel_port->clk);
2952 	atmel_port->clk = NULL;
2953 
2954 	return ret;
2955 }
2956 
2957 static struct platform_driver atmel_serial_driver = {
2958 	.probe		= atmel_serial_probe,
2959 	.remove		= atmel_serial_remove,
2960 	.suspend	= atmel_serial_suspend,
2961 	.resume		= atmel_serial_resume,
2962 	.driver		= {
2963 		.name			= "atmel_usart",
2964 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
2965 	},
2966 };
2967 
2968 static int __init atmel_serial_init(void)
2969 {
2970 	int ret;
2971 
2972 	ret = uart_register_driver(&atmel_uart);
2973 	if (ret)
2974 		return ret;
2975 
2976 	ret = platform_driver_register(&atmel_serial_driver);
2977 	if (ret)
2978 		uart_unregister_driver(&atmel_uart);
2979 
2980 	return ret;
2981 }
2982 device_initcall(atmel_serial_init);
2983