xref: /openbmc/linux/drivers/tty/serial/atmel_serial.c (revision 26d0dfbb16fcb17d128a79dc70f3020ea6992af0)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   *  Driver for Atmel AT91 Serial ports
4   *  Copyright (C) 2003 Rick Bronson
5   *
6   *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7   *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8   *
9   *  DMA support added by Chip Coldwell.
10   */
11  #include <linux/circ_buf.h>
12  #include <linux/tty.h>
13  #include <linux/ioport.h>
14  #include <linux/slab.h>
15  #include <linux/init.h>
16  #include <linux/serial.h>
17  #include <linux/clk.h>
18  #include <linux/clk-provider.h>
19  #include <linux/console.h>
20  #include <linux/sysrq.h>
21  #include <linux/tty_flip.h>
22  #include <linux/platform_device.h>
23  #include <linux/of.h>
24  #include <linux/dma-mapping.h>
25  #include <linux/dmaengine.h>
26  #include <linux/atmel_pdc.h>
27  #include <linux/uaccess.h>
28  #include <linux/platform_data/atmel.h>
29  #include <linux/timer.h>
30  #include <linux/err.h>
31  #include <linux/irq.h>
32  #include <linux/suspend.h>
33  #include <linux/mm.h>
34  #include <linux/io.h>
35  
36  #include <asm/div64.h>
37  #include <asm/ioctls.h>
38  
39  #define PDC_BUFFER_SIZE		512
40  /* Revisit: We should calculate this based on the actual port settings */
41  #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
42  
43  /* The minium number of data FIFOs should be able to contain */
44  #define ATMEL_MIN_FIFO_SIZE	8
45  /*
46   * These two offsets are substracted from the RX FIFO size to define the RTS
47   * high and low thresholds
48   */
49  #define ATMEL_RTS_HIGH_OFFSET	16
50  #define ATMEL_RTS_LOW_OFFSET	20
51  
52  #include <linux/serial_core.h>
53  
54  #include "serial_mctrl_gpio.h"
55  #include "atmel_serial.h"
56  
57  static void atmel_start_rx(struct uart_port *port);
58  static void atmel_stop_rx(struct uart_port *port);
59  
60  #ifdef CONFIG_SERIAL_ATMEL_TTYAT
61  
62  /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
63   * should coexist with the 8250 driver, such as if we have an external 16C550
64   * UART. */
65  #define SERIAL_ATMEL_MAJOR	204
66  #define MINOR_START		154
67  #define ATMEL_DEVICENAME	"ttyAT"
68  
69  #else
70  
71  /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
72   * name, but it is legally reserved for the 8250 driver. */
73  #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
74  #define MINOR_START		64
75  #define ATMEL_DEVICENAME	"ttyS"
76  
77  #endif
78  
79  #define ATMEL_ISR_PASS_LIMIT	256
80  
81  struct atmel_dma_buffer {
82  	unsigned char	*buf;
83  	dma_addr_t	dma_addr;
84  	unsigned int	dma_size;
85  	unsigned int	ofs;
86  };
87  
88  struct atmel_uart_char {
89  	u16		status;
90  	u16		ch;
91  };
92  
93  /*
94   * Be careful, the real size of the ring buffer is
95   * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
96   * can contain up to 1024 characters in PIO mode and up to 4096 characters in
97   * DMA mode.
98   */
99  #define ATMEL_SERIAL_RINGSIZE 1024
100  
101  /*
102   * at91: 6 USARTs and one DBGU port (SAM9260)
103   * samx7: 3 USARTs and 5 UARTs
104   */
105  #define ATMEL_MAX_UART		8
106  
107  /*
108   * We wrap our port structure around the generic uart_port.
109   */
110  struct atmel_uart_port {
111  	struct uart_port	uart;		/* uart */
112  	struct clk		*clk;		/* uart clock */
113  	struct clk		*gclk;		/* uart generic clock */
114  	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
115  	u32			backup_imr;	/* IMR saved during suspend */
116  	int			break_active;	/* break being received */
117  
118  	bool			use_dma_rx;	/* enable DMA receiver */
119  	bool			use_pdc_rx;	/* enable PDC receiver */
120  	short			pdc_rx_idx;	/* current PDC RX buffer */
121  	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
122  
123  	bool			use_dma_tx;     /* enable DMA transmitter */
124  	bool			use_pdc_tx;	/* enable PDC transmitter */
125  	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
126  
127  	spinlock_t			lock_tx;	/* port lock */
128  	spinlock_t			lock_rx;	/* port lock */
129  	struct dma_chan			*chan_tx;
130  	struct dma_chan			*chan_rx;
131  	struct dma_async_tx_descriptor	*desc_tx;
132  	struct dma_async_tx_descriptor	*desc_rx;
133  	dma_cookie_t			cookie_tx;
134  	dma_cookie_t			cookie_rx;
135  	struct scatterlist		sg_tx;
136  	struct scatterlist		sg_rx;
137  	struct tasklet_struct	tasklet_rx;
138  	struct tasklet_struct	tasklet_tx;
139  	atomic_t		tasklet_shutdown;
140  	unsigned int		irq_status_prev;
141  	unsigned int		tx_len;
142  
143  	struct circ_buf		rx_ring;
144  
145  	struct mctrl_gpios	*gpios;
146  	u32			backup_mode;	/* MR saved during iso7816 operations */
147  	u32			backup_brgr;	/* BRGR saved during iso7816 operations */
148  	unsigned int		tx_done_mask;
149  	u32			fifo_size;
150  	u32			rts_high;
151  	u32			rts_low;
152  	bool			ms_irq_enabled;
153  	u32			rtor;	/* address of receiver timeout register if it exists */
154  	bool			is_usart;
155  	bool			has_frac_baudrate;
156  	bool			has_hw_timer;
157  	struct timer_list	uart_timer;
158  
159  	bool			tx_stopped;
160  	bool			suspended;
161  	unsigned int		pending;
162  	unsigned int		pending_status;
163  	spinlock_t		lock_suspended;
164  
165  	bool			hd_start_rx;	/* can start RX during half-duplex operation */
166  
167  	/* ISO7816 */
168  	unsigned int		fidi_min;
169  	unsigned int		fidi_max;
170  
171  	struct {
172  		u32		cr;
173  		u32		mr;
174  		u32		imr;
175  		u32		brgr;
176  		u32		rtor;
177  		u32		ttgr;
178  		u32		fmr;
179  		u32		fimr;
180  	} cache;
181  
182  	int (*prepare_rx)(struct uart_port *port);
183  	int (*prepare_tx)(struct uart_port *port);
184  	void (*schedule_rx)(struct uart_port *port);
185  	void (*schedule_tx)(struct uart_port *port);
186  	void (*release_rx)(struct uart_port *port);
187  	void (*release_tx)(struct uart_port *port);
188  };
189  
190  static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
191  static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
192  
193  #if defined(CONFIG_OF)
194  static const struct of_device_id atmel_serial_dt_ids[] = {
195  	{ .compatible = "atmel,at91rm9200-usart-serial" },
196  	{ /* sentinel */ }
197  };
198  #endif
199  
200  static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port * uart)201  to_atmel_uart_port(struct uart_port *uart)
202  {
203  	return container_of(uart, struct atmel_uart_port, uart);
204  }
205  
atmel_uart_readl(struct uart_port * port,u32 reg)206  static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
207  {
208  	return __raw_readl(port->membase + reg);
209  }
210  
atmel_uart_writel(struct uart_port * port,u32 reg,u32 value)211  static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
212  {
213  	__raw_writel(value, port->membase + reg);
214  }
215  
atmel_uart_read_char(struct uart_port * port)216  static inline u8 atmel_uart_read_char(struct uart_port *port)
217  {
218  	return __raw_readb(port->membase + ATMEL_US_RHR);
219  }
220  
atmel_uart_write_char(struct uart_port * port,u8 value)221  static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
222  {
223  	__raw_writeb(value, port->membase + ATMEL_US_THR);
224  }
225  
atmel_uart_is_half_duplex(struct uart_port * port)226  static inline int atmel_uart_is_half_duplex(struct uart_port *port)
227  {
228  	return ((port->rs485.flags & SER_RS485_ENABLED) &&
229  		!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
230  		(port->iso7816.flags & SER_ISO7816_ENABLED);
231  }
232  
atmel_error_rate(int desired_value,int actual_value)233  static inline int atmel_error_rate(int desired_value, int actual_value)
234  {
235  	return 100 - (desired_value * 100) / actual_value;
236  }
237  
238  #ifdef CONFIG_SERIAL_ATMEL_PDC
atmel_use_pdc_rx(struct uart_port * port)239  static bool atmel_use_pdc_rx(struct uart_port *port)
240  {
241  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
242  
243  	return atmel_port->use_pdc_rx;
244  }
245  
atmel_use_pdc_tx(struct uart_port * port)246  static bool atmel_use_pdc_tx(struct uart_port *port)
247  {
248  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
249  
250  	return atmel_port->use_pdc_tx;
251  }
252  #else
atmel_use_pdc_rx(struct uart_port * port)253  static bool atmel_use_pdc_rx(struct uart_port *port)
254  {
255  	return false;
256  }
257  
atmel_use_pdc_tx(struct uart_port * port)258  static bool atmel_use_pdc_tx(struct uart_port *port)
259  {
260  	return false;
261  }
262  #endif
263  
atmel_use_dma_tx(struct uart_port * port)264  static bool atmel_use_dma_tx(struct uart_port *port)
265  {
266  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
267  
268  	return atmel_port->use_dma_tx;
269  }
270  
atmel_use_dma_rx(struct uart_port * port)271  static bool atmel_use_dma_rx(struct uart_port *port)
272  {
273  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
274  
275  	return atmel_port->use_dma_rx;
276  }
277  
atmel_use_fifo(struct uart_port * port)278  static bool atmel_use_fifo(struct uart_port *port)
279  {
280  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
281  
282  	return atmel_port->fifo_size;
283  }
284  
atmel_tasklet_schedule(struct atmel_uart_port * atmel_port,struct tasklet_struct * t)285  static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
286  				   struct tasklet_struct *t)
287  {
288  	if (!atomic_read(&atmel_port->tasklet_shutdown))
289  		tasklet_schedule(t);
290  }
291  
292  /* Enable or disable the rs485 support */
atmel_config_rs485(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485conf)293  static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
294  			      struct serial_rs485 *rs485conf)
295  {
296  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
297  	unsigned int mode;
298  
299  	/* Disable interrupts */
300  	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
301  
302  	mode = atmel_uart_readl(port, ATMEL_US_MR);
303  
304  	if (rs485conf->flags & SER_RS485_ENABLED) {
305  		dev_dbg(port->dev, "Setting UART to RS485\n");
306  		if (rs485conf->flags & SER_RS485_RX_DURING_TX)
307  			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
308  		else
309  			atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
310  
311  		atmel_uart_writel(port, ATMEL_US_TTGR,
312  				  rs485conf->delay_rts_after_send);
313  		mode &= ~ATMEL_US_USMODE;
314  		mode |= ATMEL_US_USMODE_RS485;
315  	} else {
316  		dev_dbg(port->dev, "Setting UART to RS232\n");
317  		if (atmel_use_pdc_tx(port))
318  			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
319  				ATMEL_US_TXBUFE;
320  		else
321  			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
322  	}
323  	atmel_uart_writel(port, ATMEL_US_MR, mode);
324  
325  	/* Enable interrupts */
326  	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
327  
328  	return 0;
329  }
330  
atmel_calc_cd(struct uart_port * port,struct serial_iso7816 * iso7816conf)331  static unsigned int atmel_calc_cd(struct uart_port *port,
332  				  struct serial_iso7816 *iso7816conf)
333  {
334  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
335  	unsigned int cd;
336  	u64 mck_rate;
337  
338  	mck_rate = (u64)clk_get_rate(atmel_port->clk);
339  	do_div(mck_rate, iso7816conf->clk);
340  	cd = mck_rate;
341  	return cd;
342  }
343  
atmel_calc_fidi(struct uart_port * port,struct serial_iso7816 * iso7816conf)344  static unsigned int atmel_calc_fidi(struct uart_port *port,
345  				    struct serial_iso7816 *iso7816conf)
346  {
347  	u64 fidi = 0;
348  
349  	if (iso7816conf->sc_fi && iso7816conf->sc_di) {
350  		fidi = (u64)iso7816conf->sc_fi;
351  		do_div(fidi, iso7816conf->sc_di);
352  	}
353  	return (u32)fidi;
354  }
355  
356  /* Enable or disable the iso7816 support */
357  /* Called with interrupts disabled */
atmel_config_iso7816(struct uart_port * port,struct serial_iso7816 * iso7816conf)358  static int atmel_config_iso7816(struct uart_port *port,
359  				struct serial_iso7816 *iso7816conf)
360  {
361  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
362  	unsigned int mode;
363  	unsigned int cd, fidi;
364  	int ret = 0;
365  
366  	/* Disable interrupts */
367  	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
368  
369  	mode = atmel_uart_readl(port, ATMEL_US_MR);
370  
371  	if (iso7816conf->flags & SER_ISO7816_ENABLED) {
372  		mode &= ~ATMEL_US_USMODE;
373  
374  		if (iso7816conf->tg > 255) {
375  			dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
376  			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
377  			ret = -EINVAL;
378  			goto err_out;
379  		}
380  
381  		if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
382  		    == SER_ISO7816_T(0)) {
383  			mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
384  		} else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
385  			   == SER_ISO7816_T(1)) {
386  			mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
387  		} else {
388  			dev_err(port->dev, "ISO7816: Type not supported\n");
389  			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
390  			ret = -EINVAL;
391  			goto err_out;
392  		}
393  
394  		mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
395  
396  		/* select mck clock, and output  */
397  		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
398  		/* set parity for normal/inverse mode + max iterations */
399  		mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
400  
401  		cd = atmel_calc_cd(port, iso7816conf);
402  		fidi = atmel_calc_fidi(port, iso7816conf);
403  		if (fidi == 0) {
404  			dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
405  		} else if (fidi < atmel_port->fidi_min
406  			   || fidi > atmel_port->fidi_max) {
407  			dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
408  			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
409  			ret = -EINVAL;
410  			goto err_out;
411  		}
412  
413  		if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
414  			/* port not yet in iso7816 mode: store configuration */
415  			atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
416  			atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
417  		}
418  
419  		atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
420  		atmel_uart_writel(port, ATMEL_US_BRGR, cd);
421  		atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
422  
423  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
424  		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
425  	} else {
426  		dev_dbg(port->dev, "Setting UART back to RS232\n");
427  		/* back to last RS232 settings */
428  		mode = atmel_port->backup_mode;
429  		memset(iso7816conf, 0, sizeof(struct serial_iso7816));
430  		atmel_uart_writel(port, ATMEL_US_TTGR, 0);
431  		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
432  		atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
433  
434  		if (atmel_use_pdc_tx(port))
435  			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
436  						   ATMEL_US_TXBUFE;
437  		else
438  			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
439  	}
440  
441  	port->iso7816 = *iso7816conf;
442  
443  	atmel_uart_writel(port, ATMEL_US_MR, mode);
444  
445  err_out:
446  	/* Enable interrupts */
447  	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
448  
449  	return ret;
450  }
451  
452  /*
453   * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
454   */
atmel_tx_empty(struct uart_port * port)455  static u_int atmel_tx_empty(struct uart_port *port)
456  {
457  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
458  
459  	if (atmel_port->tx_stopped)
460  		return TIOCSER_TEMT;
461  	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
462  		TIOCSER_TEMT :
463  		0;
464  }
465  
466  /*
467   * Set state of the modem control output lines
468   */
atmel_set_mctrl(struct uart_port * port,u_int mctrl)469  static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
470  {
471  	unsigned int control = 0;
472  	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
473  	unsigned int rts_paused, rts_ready;
474  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
475  
476  	/* override mode to RS485 if needed, otherwise keep the current mode */
477  	if (port->rs485.flags & SER_RS485_ENABLED) {
478  		atmel_uart_writel(port, ATMEL_US_TTGR,
479  				  port->rs485.delay_rts_after_send);
480  		mode &= ~ATMEL_US_USMODE;
481  		mode |= ATMEL_US_USMODE_RS485;
482  	}
483  
484  	/* set the RTS line state according to the mode */
485  	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
486  		/* force RTS line to high level */
487  		rts_paused = ATMEL_US_RTSEN;
488  
489  		/* give the control of the RTS line back to the hardware */
490  		rts_ready = ATMEL_US_RTSDIS;
491  	} else {
492  		/* force RTS line to high level */
493  		rts_paused = ATMEL_US_RTSDIS;
494  
495  		/* force RTS line to low level */
496  		rts_ready = ATMEL_US_RTSEN;
497  	}
498  
499  	if (mctrl & TIOCM_RTS)
500  		control |= rts_ready;
501  	else
502  		control |= rts_paused;
503  
504  	if (mctrl & TIOCM_DTR)
505  		control |= ATMEL_US_DTREN;
506  	else
507  		control |= ATMEL_US_DTRDIS;
508  
509  	atmel_uart_writel(port, ATMEL_US_CR, control);
510  
511  	mctrl_gpio_set(atmel_port->gpios, mctrl);
512  
513  	/* Local loopback mode? */
514  	mode &= ~ATMEL_US_CHMODE;
515  	if (mctrl & TIOCM_LOOP)
516  		mode |= ATMEL_US_CHMODE_LOC_LOOP;
517  	else
518  		mode |= ATMEL_US_CHMODE_NORMAL;
519  
520  	atmel_uart_writel(port, ATMEL_US_MR, mode);
521  }
522  
523  /*
524   * Get state of the modem control input lines
525   */
atmel_get_mctrl(struct uart_port * port)526  static u_int atmel_get_mctrl(struct uart_port *port)
527  {
528  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
529  	unsigned int ret = 0, status;
530  
531  	status = atmel_uart_readl(port, ATMEL_US_CSR);
532  
533  	/*
534  	 * The control signals are active low.
535  	 */
536  	if (!(status & ATMEL_US_DCD))
537  		ret |= TIOCM_CD;
538  	if (!(status & ATMEL_US_CTS))
539  		ret |= TIOCM_CTS;
540  	if (!(status & ATMEL_US_DSR))
541  		ret |= TIOCM_DSR;
542  	if (!(status & ATMEL_US_RI))
543  		ret |= TIOCM_RI;
544  
545  	return mctrl_gpio_get(atmel_port->gpios, &ret);
546  }
547  
548  /*
549   * Stop transmitting.
550   */
atmel_stop_tx(struct uart_port * port)551  static void atmel_stop_tx(struct uart_port *port)
552  {
553  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
554  	bool is_pdc = atmel_use_pdc_tx(port);
555  	bool is_dma = is_pdc || atmel_use_dma_tx(port);
556  
557  	if (is_pdc) {
558  		/* disable PDC transmit */
559  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
560  	}
561  
562  	if (is_dma) {
563  		/*
564  		 * Disable the transmitter.
565  		 * This is mandatory when DMA is used, otherwise the DMA buffer
566  		 * is fully transmitted.
567  		 */
568  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
569  		atmel_port->tx_stopped = true;
570  	}
571  
572  	/* Disable interrupts */
573  	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
574  
575  	if (atmel_uart_is_half_duplex(port))
576  		if (!atomic_read(&atmel_port->tasklet_shutdown))
577  			atmel_start_rx(port);
578  }
579  
580  /*
581   * Start transmitting.
582   */
atmel_start_tx(struct uart_port * port)583  static void atmel_start_tx(struct uart_port *port)
584  {
585  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
586  	bool is_pdc = atmel_use_pdc_tx(port);
587  	bool is_dma = is_pdc || atmel_use_dma_tx(port);
588  
589  	if (is_pdc && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
590  				       & ATMEL_PDC_TXTEN))
591  		/* The transmitter is already running.  Yes, we
592  		   really need this.*/
593  		return;
594  
595  	if (is_dma && atmel_uart_is_half_duplex(port))
596  		atmel_stop_rx(port);
597  
598  	if (is_pdc) {
599  		/* re-enable PDC transmit */
600  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
601  	}
602  
603  	/* Enable interrupts */
604  	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
605  
606  	if (is_dma) {
607  		/* re-enable the transmitter */
608  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
609  		atmel_port->tx_stopped = false;
610  	}
611  }
612  
613  /*
614   * start receiving - port is in process of being opened.
615   */
atmel_start_rx(struct uart_port * port)616  static void atmel_start_rx(struct uart_port *port)
617  {
618  	/* reset status and receiver */
619  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
620  
621  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
622  
623  	if (atmel_use_pdc_rx(port)) {
624  		/* enable PDC controller */
625  		atmel_uart_writel(port, ATMEL_US_IER,
626  				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
627  				  port->read_status_mask);
628  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
629  	} else {
630  		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
631  	}
632  }
633  
634  /*
635   * Stop receiving - port is in process of being closed.
636   */
atmel_stop_rx(struct uart_port * port)637  static void atmel_stop_rx(struct uart_port *port)
638  {
639  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
640  
641  	if (atmel_use_pdc_rx(port)) {
642  		/* disable PDC receive */
643  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
644  		atmel_uart_writel(port, ATMEL_US_IDR,
645  				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
646  				  port->read_status_mask);
647  	} else {
648  		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
649  	}
650  }
651  
652  /*
653   * Enable modem status interrupts
654   */
atmel_enable_ms(struct uart_port * port)655  static void atmel_enable_ms(struct uart_port *port)
656  {
657  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
658  	uint32_t ier = 0;
659  
660  	/*
661  	 * Interrupt should not be enabled twice
662  	 */
663  	if (atmel_port->ms_irq_enabled)
664  		return;
665  
666  	atmel_port->ms_irq_enabled = true;
667  
668  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
669  		ier |= ATMEL_US_CTSIC;
670  
671  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
672  		ier |= ATMEL_US_DSRIC;
673  
674  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
675  		ier |= ATMEL_US_RIIC;
676  
677  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
678  		ier |= ATMEL_US_DCDIC;
679  
680  	atmel_uart_writel(port, ATMEL_US_IER, ier);
681  
682  	mctrl_gpio_enable_ms(atmel_port->gpios);
683  }
684  
685  /*
686   * Disable modem status interrupts
687   */
atmel_disable_ms(struct uart_port * port)688  static void atmel_disable_ms(struct uart_port *port)
689  {
690  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
691  	uint32_t idr = 0;
692  
693  	/*
694  	 * Interrupt should not be disabled twice
695  	 */
696  	if (!atmel_port->ms_irq_enabled)
697  		return;
698  
699  	atmel_port->ms_irq_enabled = false;
700  
701  	mctrl_gpio_disable_ms(atmel_port->gpios);
702  
703  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
704  		idr |= ATMEL_US_CTSIC;
705  
706  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
707  		idr |= ATMEL_US_DSRIC;
708  
709  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
710  		idr |= ATMEL_US_RIIC;
711  
712  	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
713  		idr |= ATMEL_US_DCDIC;
714  
715  	atmel_uart_writel(port, ATMEL_US_IDR, idr);
716  }
717  
718  /*
719   * Control the transmission of a break signal
720   */
atmel_break_ctl(struct uart_port * port,int break_state)721  static void atmel_break_ctl(struct uart_port *port, int break_state)
722  {
723  	if (break_state != 0)
724  		/* start break */
725  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
726  	else
727  		/* stop break */
728  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
729  }
730  
731  /*
732   * Stores the incoming character in the ring buffer
733   */
734  static void
atmel_buffer_rx_char(struct uart_port * port,unsigned int status,unsigned int ch)735  atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
736  		     unsigned int ch)
737  {
738  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
739  	struct circ_buf *ring = &atmel_port->rx_ring;
740  	struct atmel_uart_char *c;
741  
742  	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
743  		/* Buffer overflow, ignore char */
744  		return;
745  
746  	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
747  	c->status	= status;
748  	c->ch		= ch;
749  
750  	/* Make sure the character is stored before we update head. */
751  	smp_wmb();
752  
753  	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
754  }
755  
756  /*
757   * Deal with parity, framing and overrun errors.
758   */
atmel_pdc_rxerr(struct uart_port * port,unsigned int status)759  static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
760  {
761  	/* clear error */
762  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
763  
764  	if (status & ATMEL_US_RXBRK) {
765  		/* ignore side-effect */
766  		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
767  		port->icount.brk++;
768  	}
769  	if (status & ATMEL_US_PARE)
770  		port->icount.parity++;
771  	if (status & ATMEL_US_FRAME)
772  		port->icount.frame++;
773  	if (status & ATMEL_US_OVRE)
774  		port->icount.overrun++;
775  }
776  
777  /*
778   * Characters received (called from interrupt handler)
779   */
atmel_rx_chars(struct uart_port * port)780  static void atmel_rx_chars(struct uart_port *port)
781  {
782  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
783  	unsigned int status, ch;
784  
785  	status = atmel_uart_readl(port, ATMEL_US_CSR);
786  	while (status & ATMEL_US_RXRDY) {
787  		ch = atmel_uart_read_char(port);
788  
789  		/*
790  		 * note that the error handling code is
791  		 * out of the main execution path
792  		 */
793  		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
794  				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
795  			     || atmel_port->break_active)) {
796  
797  			/* clear error */
798  			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
799  
800  			if (status & ATMEL_US_RXBRK
801  			    && !atmel_port->break_active) {
802  				atmel_port->break_active = 1;
803  				atmel_uart_writel(port, ATMEL_US_IER,
804  						  ATMEL_US_RXBRK);
805  			} else {
806  				/*
807  				 * This is either the end-of-break
808  				 * condition or we've received at
809  				 * least one character without RXBRK
810  				 * being set. In both cases, the next
811  				 * RXBRK will indicate start-of-break.
812  				 */
813  				atmel_uart_writel(port, ATMEL_US_IDR,
814  						  ATMEL_US_RXBRK);
815  				status &= ~ATMEL_US_RXBRK;
816  				atmel_port->break_active = 0;
817  			}
818  		}
819  
820  		atmel_buffer_rx_char(port, status, ch);
821  		status = atmel_uart_readl(port, ATMEL_US_CSR);
822  	}
823  
824  	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
825  }
826  
827  /*
828   * Transmit characters (called from tasklet with TXRDY interrupt
829   * disabled)
830   */
atmel_tx_chars(struct uart_port * port)831  static void atmel_tx_chars(struct uart_port *port)
832  {
833  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
834  	bool pending;
835  	u8 ch;
836  
837  	pending = uart_port_tx(port, ch,
838  		atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY,
839  		atmel_uart_write_char(port, ch));
840  	if (pending) {
841  		/* we still have characters to transmit, so we should continue
842  		 * transmitting them when TX is ready, regardless of
843  		 * mode or duplexity
844  		 */
845  		atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
846  
847  		/* Enable interrupts */
848  		atmel_uart_writel(port, ATMEL_US_IER,
849  				  atmel_port->tx_done_mask);
850  	} else {
851  		if (atmel_uart_is_half_duplex(port))
852  			atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
853  	}
854  }
855  
atmel_complete_tx_dma(void * arg)856  static void atmel_complete_tx_dma(void *arg)
857  {
858  	struct atmel_uart_port *atmel_port = arg;
859  	struct uart_port *port = &atmel_port->uart;
860  	struct circ_buf *xmit = &port->state->xmit;
861  	struct dma_chan *chan = atmel_port->chan_tx;
862  	unsigned long flags;
863  
864  	spin_lock_irqsave(&port->lock, flags);
865  
866  	if (chan)
867  		dmaengine_terminate_all(chan);
868  	uart_xmit_advance(port, atmel_port->tx_len);
869  
870  	spin_lock(&atmel_port->lock_tx);
871  	async_tx_ack(atmel_port->desc_tx);
872  	atmel_port->cookie_tx = -EINVAL;
873  	atmel_port->desc_tx = NULL;
874  	spin_unlock(&atmel_port->lock_tx);
875  
876  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
877  		uart_write_wakeup(port);
878  
879  	/*
880  	 * xmit is a circular buffer so, if we have just send data from
881  	 * xmit->tail to the end of xmit->buf, now we have to transmit the
882  	 * remaining data from the beginning of xmit->buf to xmit->head.
883  	 */
884  	if (!uart_circ_empty(xmit))
885  		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
886  	else if (atmel_uart_is_half_duplex(port)) {
887  		/*
888  		 * DMA done, re-enable TXEMPTY and signal that we can stop
889  		 * TX and start RX for RS485
890  		 */
891  		atmel_port->hd_start_rx = true;
892  		atmel_uart_writel(port, ATMEL_US_IER,
893  				  atmel_port->tx_done_mask);
894  	}
895  
896  	spin_unlock_irqrestore(&port->lock, flags);
897  }
898  
atmel_release_tx_dma(struct uart_port * port)899  static void atmel_release_tx_dma(struct uart_port *port)
900  {
901  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
902  	struct dma_chan *chan = atmel_port->chan_tx;
903  
904  	if (chan) {
905  		dmaengine_terminate_all(chan);
906  		dma_release_channel(chan);
907  		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
908  				DMA_TO_DEVICE);
909  	}
910  
911  	atmel_port->desc_tx = NULL;
912  	atmel_port->chan_tx = NULL;
913  	atmel_port->cookie_tx = -EINVAL;
914  }
915  
916  /*
917   * Called from tasklet with TXRDY interrupt is disabled.
918   */
atmel_tx_dma(struct uart_port * port)919  static void atmel_tx_dma(struct uart_port *port)
920  {
921  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
922  	struct circ_buf *xmit = &port->state->xmit;
923  	struct dma_chan *chan = atmel_port->chan_tx;
924  	struct dma_async_tx_descriptor *desc;
925  	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
926  	unsigned int tx_len, part1_len, part2_len, sg_len;
927  	dma_addr_t phys_addr;
928  
929  	/* Make sure we have an idle channel */
930  	if (atmel_port->desc_tx != NULL)
931  		return;
932  
933  	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
934  		/*
935  		 * DMA is idle now.
936  		 * Port xmit buffer is already mapped,
937  		 * and it is one page... Just adjust
938  		 * offsets and lengths. Since it is a circular buffer,
939  		 * we have to transmit till the end, and then the rest.
940  		 * Take the port lock to get a
941  		 * consistent xmit buffer state.
942  		 */
943  		tx_len = CIRC_CNT_TO_END(xmit->head,
944  					 xmit->tail,
945  					 UART_XMIT_SIZE);
946  
947  		if (atmel_port->fifo_size) {
948  			/* multi data mode */
949  			part1_len = (tx_len & ~0x3); /* DWORD access */
950  			part2_len = (tx_len & 0x3); /* BYTE access */
951  		} else {
952  			/* single data (legacy) mode */
953  			part1_len = 0;
954  			part2_len = tx_len; /* BYTE access only */
955  		}
956  
957  		sg_init_table(sgl, 2);
958  		sg_len = 0;
959  		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
960  		if (part1_len) {
961  			sg = &sgl[sg_len++];
962  			sg_dma_address(sg) = phys_addr;
963  			sg_dma_len(sg) = part1_len;
964  
965  			phys_addr += part1_len;
966  		}
967  
968  		if (part2_len) {
969  			sg = &sgl[sg_len++];
970  			sg_dma_address(sg) = phys_addr;
971  			sg_dma_len(sg) = part2_len;
972  		}
973  
974  		/*
975  		 * save tx_len so atmel_complete_tx_dma() will increase
976  		 * xmit->tail correctly
977  		 */
978  		atmel_port->tx_len = tx_len;
979  
980  		desc = dmaengine_prep_slave_sg(chan,
981  					       sgl,
982  					       sg_len,
983  					       DMA_MEM_TO_DEV,
984  					       DMA_PREP_INTERRUPT |
985  					       DMA_CTRL_ACK);
986  		if (!desc) {
987  			dev_err(port->dev, "Failed to send via dma!\n");
988  			return;
989  		}
990  
991  		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
992  
993  		atmel_port->desc_tx = desc;
994  		desc->callback = atmel_complete_tx_dma;
995  		desc->callback_param = atmel_port;
996  		atmel_port->cookie_tx = dmaengine_submit(desc);
997  		if (dma_submit_error(atmel_port->cookie_tx)) {
998  			dev_err(port->dev, "dma_submit_error %d\n",
999  				atmel_port->cookie_tx);
1000  			return;
1001  		}
1002  
1003  		dma_async_issue_pending(chan);
1004  	}
1005  
1006  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1007  		uart_write_wakeup(port);
1008  }
1009  
atmel_prepare_tx_dma(struct uart_port * port)1010  static int atmel_prepare_tx_dma(struct uart_port *port)
1011  {
1012  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1013  	struct device *mfd_dev = port->dev->parent;
1014  	dma_cap_mask_t		mask;
1015  	struct dma_slave_config config;
1016  	int ret, nent;
1017  
1018  	dma_cap_zero(mask);
1019  	dma_cap_set(DMA_SLAVE, mask);
1020  
1021  	atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
1022  	if (atmel_port->chan_tx == NULL)
1023  		goto chan_err;
1024  	dev_info(port->dev, "using %s for tx DMA transfers\n",
1025  		dma_chan_name(atmel_port->chan_tx));
1026  
1027  	spin_lock_init(&atmel_port->lock_tx);
1028  	sg_init_table(&atmel_port->sg_tx, 1);
1029  	/* UART circular tx buffer is an aligned page. */
1030  	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
1031  	sg_set_page(&atmel_port->sg_tx,
1032  			virt_to_page(port->state->xmit.buf),
1033  			UART_XMIT_SIZE,
1034  			offset_in_page(port->state->xmit.buf));
1035  	nent = dma_map_sg(port->dev,
1036  				&atmel_port->sg_tx,
1037  				1,
1038  				DMA_TO_DEVICE);
1039  
1040  	if (!nent) {
1041  		dev_dbg(port->dev, "need to release resource of dma\n");
1042  		goto chan_err;
1043  	} else {
1044  		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1045  			sg_dma_len(&atmel_port->sg_tx),
1046  			port->state->xmit.buf,
1047  			&sg_dma_address(&atmel_port->sg_tx));
1048  	}
1049  
1050  	/* Configure the slave DMA */
1051  	memset(&config, 0, sizeof(config));
1052  	config.direction = DMA_MEM_TO_DEV;
1053  	config.dst_addr_width = (atmel_port->fifo_size) ?
1054  				DMA_SLAVE_BUSWIDTH_4_BYTES :
1055  				DMA_SLAVE_BUSWIDTH_1_BYTE;
1056  	config.dst_addr = port->mapbase + ATMEL_US_THR;
1057  	config.dst_maxburst = 1;
1058  
1059  	ret = dmaengine_slave_config(atmel_port->chan_tx,
1060  				     &config);
1061  	if (ret) {
1062  		dev_err(port->dev, "DMA tx slave configuration failed\n");
1063  		goto chan_err;
1064  	}
1065  
1066  	return 0;
1067  
1068  chan_err:
1069  	dev_err(port->dev, "TX channel not available, switch to pio\n");
1070  	atmel_port->use_dma_tx = false;
1071  	if (atmel_port->chan_tx)
1072  		atmel_release_tx_dma(port);
1073  	return -EINVAL;
1074  }
1075  
atmel_complete_rx_dma(void * arg)1076  static void atmel_complete_rx_dma(void *arg)
1077  {
1078  	struct uart_port *port = arg;
1079  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1080  
1081  	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1082  }
1083  
atmel_release_rx_dma(struct uart_port * port)1084  static void atmel_release_rx_dma(struct uart_port *port)
1085  {
1086  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1087  	struct dma_chan *chan = atmel_port->chan_rx;
1088  
1089  	if (chan) {
1090  		dmaengine_terminate_all(chan);
1091  		dma_release_channel(chan);
1092  		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1093  				DMA_FROM_DEVICE);
1094  	}
1095  
1096  	atmel_port->desc_rx = NULL;
1097  	atmel_port->chan_rx = NULL;
1098  	atmel_port->cookie_rx = -EINVAL;
1099  }
1100  
atmel_rx_from_dma(struct uart_port * port)1101  static void atmel_rx_from_dma(struct uart_port *port)
1102  {
1103  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1104  	struct tty_port *tport = &port->state->port;
1105  	struct circ_buf *ring = &atmel_port->rx_ring;
1106  	struct dma_chan *chan = atmel_port->chan_rx;
1107  	struct dma_tx_state state;
1108  	enum dma_status dmastat;
1109  	size_t count;
1110  
1111  
1112  	/* Reset the UART timeout early so that we don't miss one */
1113  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1114  	dmastat = dmaengine_tx_status(chan,
1115  				atmel_port->cookie_rx,
1116  				&state);
1117  	/* Restart a new tasklet if DMA status is error */
1118  	if (dmastat == DMA_ERROR) {
1119  		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1120  		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1121  		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1122  		return;
1123  	}
1124  
1125  	/* CPU claims ownership of RX DMA buffer */
1126  	dma_sync_sg_for_cpu(port->dev,
1127  			    &atmel_port->sg_rx,
1128  			    1,
1129  			    DMA_FROM_DEVICE);
1130  
1131  	/*
1132  	 * ring->head points to the end of data already written by the DMA.
1133  	 * ring->tail points to the beginning of data to be read by the
1134  	 * framework.
1135  	 * The current transfer size should not be larger than the dma buffer
1136  	 * length.
1137  	 */
1138  	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1139  	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1140  	/*
1141  	 * At this point ring->head may point to the first byte right after the
1142  	 * last byte of the dma buffer:
1143  	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1144  	 *
1145  	 * However ring->tail must always points inside the dma buffer:
1146  	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1147  	 *
1148  	 * Since we use a ring buffer, we have to handle the case
1149  	 * where head is lower than tail. In such a case, we first read from
1150  	 * tail to the end of the buffer then reset tail.
1151  	 */
1152  	if (ring->head < ring->tail) {
1153  		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1154  
1155  		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1156  		ring->tail = 0;
1157  		port->icount.rx += count;
1158  	}
1159  
1160  	/* Finally we read data from tail to head */
1161  	if (ring->tail < ring->head) {
1162  		count = ring->head - ring->tail;
1163  
1164  		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1165  		/* Wrap ring->head if needed */
1166  		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1167  			ring->head = 0;
1168  		ring->tail = ring->head;
1169  		port->icount.rx += count;
1170  	}
1171  
1172  	/* USART retreives ownership of RX DMA buffer */
1173  	dma_sync_sg_for_device(port->dev,
1174  			       &atmel_port->sg_rx,
1175  			       1,
1176  			       DMA_FROM_DEVICE);
1177  
1178  	tty_flip_buffer_push(tport);
1179  
1180  	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1181  }
1182  
atmel_prepare_rx_dma(struct uart_port * port)1183  static int atmel_prepare_rx_dma(struct uart_port *port)
1184  {
1185  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1186  	struct device *mfd_dev = port->dev->parent;
1187  	struct dma_async_tx_descriptor *desc;
1188  	dma_cap_mask_t		mask;
1189  	struct dma_slave_config config;
1190  	struct circ_buf		*ring;
1191  	int ret, nent;
1192  
1193  	ring = &atmel_port->rx_ring;
1194  
1195  	dma_cap_zero(mask);
1196  	dma_cap_set(DMA_CYCLIC, mask);
1197  
1198  	atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
1199  	if (atmel_port->chan_rx == NULL)
1200  		goto chan_err;
1201  	dev_info(port->dev, "using %s for rx DMA transfers\n",
1202  		dma_chan_name(atmel_port->chan_rx));
1203  
1204  	spin_lock_init(&atmel_port->lock_rx);
1205  	sg_init_table(&atmel_port->sg_rx, 1);
1206  	/* UART circular rx buffer is an aligned page. */
1207  	BUG_ON(!PAGE_ALIGNED(ring->buf));
1208  	sg_set_page(&atmel_port->sg_rx,
1209  		    virt_to_page(ring->buf),
1210  		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1211  		    offset_in_page(ring->buf));
1212  	nent = dma_map_sg(port->dev,
1213  			  &atmel_port->sg_rx,
1214  			  1,
1215  			  DMA_FROM_DEVICE);
1216  
1217  	if (!nent) {
1218  		dev_dbg(port->dev, "need to release resource of dma\n");
1219  		goto chan_err;
1220  	} else {
1221  		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1222  			sg_dma_len(&atmel_port->sg_rx),
1223  			ring->buf,
1224  			&sg_dma_address(&atmel_port->sg_rx));
1225  	}
1226  
1227  	/* Configure the slave DMA */
1228  	memset(&config, 0, sizeof(config));
1229  	config.direction = DMA_DEV_TO_MEM;
1230  	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1231  	config.src_addr = port->mapbase + ATMEL_US_RHR;
1232  	config.src_maxburst = 1;
1233  
1234  	ret = dmaengine_slave_config(atmel_port->chan_rx,
1235  				     &config);
1236  	if (ret) {
1237  		dev_err(port->dev, "DMA rx slave configuration failed\n");
1238  		goto chan_err;
1239  	}
1240  	/*
1241  	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1242  	 * each one is half ring buffer size
1243  	 */
1244  	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1245  					 sg_dma_address(&atmel_port->sg_rx),
1246  					 sg_dma_len(&atmel_port->sg_rx),
1247  					 sg_dma_len(&atmel_port->sg_rx)/2,
1248  					 DMA_DEV_TO_MEM,
1249  					 DMA_PREP_INTERRUPT);
1250  	if (!desc) {
1251  		dev_err(port->dev, "Preparing DMA cyclic failed\n");
1252  		goto chan_err;
1253  	}
1254  	desc->callback = atmel_complete_rx_dma;
1255  	desc->callback_param = port;
1256  	atmel_port->desc_rx = desc;
1257  	atmel_port->cookie_rx = dmaengine_submit(desc);
1258  	if (dma_submit_error(atmel_port->cookie_rx)) {
1259  		dev_err(port->dev, "dma_submit_error %d\n",
1260  			atmel_port->cookie_rx);
1261  		goto chan_err;
1262  	}
1263  
1264  	dma_async_issue_pending(atmel_port->chan_rx);
1265  
1266  	return 0;
1267  
1268  chan_err:
1269  	dev_err(port->dev, "RX channel not available, switch to pio\n");
1270  	atmel_port->use_dma_rx = false;
1271  	if (atmel_port->chan_rx)
1272  		atmel_release_rx_dma(port);
1273  	return -EINVAL;
1274  }
1275  
atmel_uart_timer_callback(struct timer_list * t)1276  static void atmel_uart_timer_callback(struct timer_list *t)
1277  {
1278  	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1279  							uart_timer);
1280  	struct uart_port *port = &atmel_port->uart;
1281  
1282  	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1283  		tasklet_schedule(&atmel_port->tasklet_rx);
1284  		mod_timer(&atmel_port->uart_timer,
1285  			  jiffies + uart_poll_timeout(port));
1286  	}
1287  }
1288  
1289  /*
1290   * receive interrupt handler.
1291   */
1292  static void
atmel_handle_receive(struct uart_port * port,unsigned int pending)1293  atmel_handle_receive(struct uart_port *port, unsigned int pending)
1294  {
1295  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1296  
1297  	if (atmel_use_pdc_rx(port)) {
1298  		/*
1299  		 * PDC receive. Just schedule the tasklet and let it
1300  		 * figure out the details.
1301  		 *
1302  		 * TODO: We're not handling error flags correctly at
1303  		 * the moment.
1304  		 */
1305  		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1306  			atmel_uart_writel(port, ATMEL_US_IDR,
1307  					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1308  			atmel_tasklet_schedule(atmel_port,
1309  					       &atmel_port->tasklet_rx);
1310  		}
1311  
1312  		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1313  				ATMEL_US_FRAME | ATMEL_US_PARE))
1314  			atmel_pdc_rxerr(port, pending);
1315  	}
1316  
1317  	if (atmel_use_dma_rx(port)) {
1318  		if (pending & ATMEL_US_TIMEOUT) {
1319  			atmel_uart_writel(port, ATMEL_US_IDR,
1320  					  ATMEL_US_TIMEOUT);
1321  			atmel_tasklet_schedule(atmel_port,
1322  					       &atmel_port->tasklet_rx);
1323  		}
1324  	}
1325  
1326  	/* Interrupt receive */
1327  	if (pending & ATMEL_US_RXRDY)
1328  		atmel_rx_chars(port);
1329  	else if (pending & ATMEL_US_RXBRK) {
1330  		/*
1331  		 * End of break detected. If it came along with a
1332  		 * character, atmel_rx_chars will handle it.
1333  		 */
1334  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1335  		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1336  		atmel_port->break_active = 0;
1337  	}
1338  }
1339  
1340  /*
1341   * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1342   */
1343  static void
atmel_handle_transmit(struct uart_port * port,unsigned int pending)1344  atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1345  {
1346  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1347  
1348  	if (pending & atmel_port->tx_done_mask) {
1349  		atmel_uart_writel(port, ATMEL_US_IDR,
1350  				  atmel_port->tx_done_mask);
1351  
1352  		/* Start RX if flag was set and FIFO is empty */
1353  		if (atmel_port->hd_start_rx) {
1354  			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1355  					& ATMEL_US_TXEMPTY))
1356  				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1357  
1358  			atmel_port->hd_start_rx = false;
1359  			atmel_start_rx(port);
1360  		}
1361  
1362  		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1363  	}
1364  }
1365  
1366  /*
1367   * status flags interrupt handler.
1368   */
1369  static void
atmel_handle_status(struct uart_port * port,unsigned int pending,unsigned int status)1370  atmel_handle_status(struct uart_port *port, unsigned int pending,
1371  		    unsigned int status)
1372  {
1373  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1374  	unsigned int status_change;
1375  
1376  	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1377  				| ATMEL_US_CTSIC)) {
1378  		status_change = status ^ atmel_port->irq_status_prev;
1379  		atmel_port->irq_status_prev = status;
1380  
1381  		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1382  					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1383  			/* TODO: All reads to CSR will clear these interrupts! */
1384  			if (status_change & ATMEL_US_RI)
1385  				port->icount.rng++;
1386  			if (status_change & ATMEL_US_DSR)
1387  				port->icount.dsr++;
1388  			if (status_change & ATMEL_US_DCD)
1389  				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1390  			if (status_change & ATMEL_US_CTS)
1391  				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1392  
1393  			wake_up_interruptible(&port->state->port.delta_msr_wait);
1394  		}
1395  	}
1396  
1397  	if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
1398  		dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
1399  }
1400  
1401  /*
1402   * Interrupt handler
1403   */
atmel_interrupt(int irq,void * dev_id)1404  static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1405  {
1406  	struct uart_port *port = dev_id;
1407  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1408  	unsigned int status, pending, mask, pass_counter = 0;
1409  
1410  	spin_lock(&atmel_port->lock_suspended);
1411  
1412  	do {
1413  		status = atmel_uart_readl(port, ATMEL_US_CSR);
1414  		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1415  		pending = status & mask;
1416  		if (!pending)
1417  			break;
1418  
1419  		if (atmel_port->suspended) {
1420  			atmel_port->pending |= pending;
1421  			atmel_port->pending_status = status;
1422  			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1423  			pm_system_wakeup();
1424  			break;
1425  		}
1426  
1427  		atmel_handle_receive(port, pending);
1428  		atmel_handle_status(port, pending, status);
1429  		atmel_handle_transmit(port, pending);
1430  	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1431  
1432  	spin_unlock(&atmel_port->lock_suspended);
1433  
1434  	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1435  }
1436  
atmel_release_tx_pdc(struct uart_port * port)1437  static void atmel_release_tx_pdc(struct uart_port *port)
1438  {
1439  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1440  	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1441  
1442  	dma_unmap_single(port->dev,
1443  			 pdc->dma_addr,
1444  			 pdc->dma_size,
1445  			 DMA_TO_DEVICE);
1446  }
1447  
1448  /*
1449   * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1450   */
atmel_tx_pdc(struct uart_port * port)1451  static void atmel_tx_pdc(struct uart_port *port)
1452  {
1453  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1454  	struct circ_buf *xmit = &port->state->xmit;
1455  	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1456  	int count;
1457  
1458  	/* nothing left to transmit? */
1459  	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1460  		return;
1461  	uart_xmit_advance(port, pdc->ofs);
1462  	pdc->ofs = 0;
1463  
1464  	/* more to transmit - setup next transfer */
1465  
1466  	/* disable PDC transmit */
1467  	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1468  
1469  	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1470  		dma_sync_single_for_device(port->dev,
1471  					   pdc->dma_addr,
1472  					   pdc->dma_size,
1473  					   DMA_TO_DEVICE);
1474  
1475  		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1476  		pdc->ofs = count;
1477  
1478  		atmel_uart_writel(port, ATMEL_PDC_TPR,
1479  				  pdc->dma_addr + xmit->tail);
1480  		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1481  		/* re-enable PDC transmit */
1482  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1483  		/* Enable interrupts */
1484  		atmel_uart_writel(port, ATMEL_US_IER,
1485  				  atmel_port->tx_done_mask);
1486  	} else {
1487  		if (atmel_uart_is_half_duplex(port)) {
1488  			/* DMA done, stop TX, start RX for RS485 */
1489  			atmel_start_rx(port);
1490  		}
1491  	}
1492  
1493  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1494  		uart_write_wakeup(port);
1495  }
1496  
atmel_prepare_tx_pdc(struct uart_port * port)1497  static int atmel_prepare_tx_pdc(struct uart_port *port)
1498  {
1499  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1500  	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1501  	struct circ_buf *xmit = &port->state->xmit;
1502  
1503  	pdc->buf = xmit->buf;
1504  	pdc->dma_addr = dma_map_single(port->dev,
1505  					pdc->buf,
1506  					UART_XMIT_SIZE,
1507  					DMA_TO_DEVICE);
1508  	pdc->dma_size = UART_XMIT_SIZE;
1509  	pdc->ofs = 0;
1510  
1511  	return 0;
1512  }
1513  
atmel_rx_from_ring(struct uart_port * port)1514  static void atmel_rx_from_ring(struct uart_port *port)
1515  {
1516  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1517  	struct circ_buf *ring = &atmel_port->rx_ring;
1518  	unsigned int status;
1519  	u8 flg;
1520  
1521  	while (ring->head != ring->tail) {
1522  		struct atmel_uart_char c;
1523  
1524  		/* Make sure c is loaded after head. */
1525  		smp_rmb();
1526  
1527  		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1528  
1529  		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1530  
1531  		port->icount.rx++;
1532  		status = c.status;
1533  		flg = TTY_NORMAL;
1534  
1535  		/*
1536  		 * note that the error handling code is
1537  		 * out of the main execution path
1538  		 */
1539  		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1540  				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1541  			if (status & ATMEL_US_RXBRK) {
1542  				/* ignore side-effect */
1543  				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1544  
1545  				port->icount.brk++;
1546  				if (uart_handle_break(port))
1547  					continue;
1548  			}
1549  			if (status & ATMEL_US_PARE)
1550  				port->icount.parity++;
1551  			if (status & ATMEL_US_FRAME)
1552  				port->icount.frame++;
1553  			if (status & ATMEL_US_OVRE)
1554  				port->icount.overrun++;
1555  
1556  			status &= port->read_status_mask;
1557  
1558  			if (status & ATMEL_US_RXBRK)
1559  				flg = TTY_BREAK;
1560  			else if (status & ATMEL_US_PARE)
1561  				flg = TTY_PARITY;
1562  			else if (status & ATMEL_US_FRAME)
1563  				flg = TTY_FRAME;
1564  		}
1565  
1566  
1567  		if (uart_handle_sysrq_char(port, c.ch))
1568  			continue;
1569  
1570  		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1571  	}
1572  
1573  	tty_flip_buffer_push(&port->state->port);
1574  }
1575  
atmel_release_rx_pdc(struct uart_port * port)1576  static void atmel_release_rx_pdc(struct uart_port *port)
1577  {
1578  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1579  	int i;
1580  
1581  	for (i = 0; i < 2; i++) {
1582  		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1583  
1584  		dma_unmap_single(port->dev,
1585  				 pdc->dma_addr,
1586  				 pdc->dma_size,
1587  				 DMA_FROM_DEVICE);
1588  		kfree(pdc->buf);
1589  	}
1590  }
1591  
atmel_rx_from_pdc(struct uart_port * port)1592  static void atmel_rx_from_pdc(struct uart_port *port)
1593  {
1594  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1595  	struct tty_port *tport = &port->state->port;
1596  	struct atmel_dma_buffer *pdc;
1597  	int rx_idx = atmel_port->pdc_rx_idx;
1598  	unsigned int head;
1599  	unsigned int tail;
1600  	unsigned int count;
1601  
1602  	do {
1603  		/* Reset the UART timeout early so that we don't miss one */
1604  		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1605  
1606  		pdc = &atmel_port->pdc_rx[rx_idx];
1607  		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1608  		tail = pdc->ofs;
1609  
1610  		/* If the PDC has switched buffers, RPR won't contain
1611  		 * any address within the current buffer. Since head
1612  		 * is unsigned, we just need a one-way comparison to
1613  		 * find out.
1614  		 *
1615  		 * In this case, we just need to consume the entire
1616  		 * buffer and resubmit it for DMA. This will clear the
1617  		 * ENDRX bit as well, so that we can safely re-enable
1618  		 * all interrupts below.
1619  		 */
1620  		head = min(head, pdc->dma_size);
1621  
1622  		if (likely(head != tail)) {
1623  			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1624  					pdc->dma_size, DMA_FROM_DEVICE);
1625  
1626  			/*
1627  			 * head will only wrap around when we recycle
1628  			 * the DMA buffer, and when that happens, we
1629  			 * explicitly set tail to 0. So head will
1630  			 * always be greater than tail.
1631  			 */
1632  			count = head - tail;
1633  
1634  			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1635  						count);
1636  
1637  			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1638  					pdc->dma_size, DMA_FROM_DEVICE);
1639  
1640  			port->icount.rx += count;
1641  			pdc->ofs = head;
1642  		}
1643  
1644  		/*
1645  		 * If the current buffer is full, we need to check if
1646  		 * the next one contains any additional data.
1647  		 */
1648  		if (head >= pdc->dma_size) {
1649  			pdc->ofs = 0;
1650  			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1651  			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1652  
1653  			rx_idx = !rx_idx;
1654  			atmel_port->pdc_rx_idx = rx_idx;
1655  		}
1656  	} while (head >= pdc->dma_size);
1657  
1658  	tty_flip_buffer_push(tport);
1659  
1660  	atmel_uart_writel(port, ATMEL_US_IER,
1661  			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1662  }
1663  
atmel_prepare_rx_pdc(struct uart_port * port)1664  static int atmel_prepare_rx_pdc(struct uart_port *port)
1665  {
1666  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1667  	int i;
1668  
1669  	for (i = 0; i < 2; i++) {
1670  		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1671  
1672  		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1673  		if (pdc->buf == NULL) {
1674  			if (i != 0) {
1675  				dma_unmap_single(port->dev,
1676  					atmel_port->pdc_rx[0].dma_addr,
1677  					PDC_BUFFER_SIZE,
1678  					DMA_FROM_DEVICE);
1679  				kfree(atmel_port->pdc_rx[0].buf);
1680  			}
1681  			atmel_port->use_pdc_rx = false;
1682  			return -ENOMEM;
1683  		}
1684  		pdc->dma_addr = dma_map_single(port->dev,
1685  						pdc->buf,
1686  						PDC_BUFFER_SIZE,
1687  						DMA_FROM_DEVICE);
1688  		pdc->dma_size = PDC_BUFFER_SIZE;
1689  		pdc->ofs = 0;
1690  	}
1691  
1692  	atmel_port->pdc_rx_idx = 0;
1693  
1694  	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1695  	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1696  
1697  	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1698  			  atmel_port->pdc_rx[1].dma_addr);
1699  	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1700  
1701  	return 0;
1702  }
1703  
1704  /*
1705   * tasklet handling tty stuff outside the interrupt handler.
1706   */
atmel_tasklet_rx_func(struct tasklet_struct * t)1707  static void atmel_tasklet_rx_func(struct tasklet_struct *t)
1708  {
1709  	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1710  							  tasklet_rx);
1711  	struct uart_port *port = &atmel_port->uart;
1712  
1713  	/* The interrupt handler does not take the lock */
1714  	spin_lock(&port->lock);
1715  	atmel_port->schedule_rx(port);
1716  	spin_unlock(&port->lock);
1717  }
1718  
atmel_tasklet_tx_func(struct tasklet_struct * t)1719  static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1720  {
1721  	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1722  							  tasklet_tx);
1723  	struct uart_port *port = &atmel_port->uart;
1724  
1725  	/* The interrupt handler does not take the lock */
1726  	spin_lock(&port->lock);
1727  	atmel_port->schedule_tx(port);
1728  	spin_unlock(&port->lock);
1729  }
1730  
atmel_init_property(struct atmel_uart_port * atmel_port,struct platform_device * pdev)1731  static void atmel_init_property(struct atmel_uart_port *atmel_port,
1732  				struct platform_device *pdev)
1733  {
1734  	struct device_node *np = pdev->dev.of_node;
1735  
1736  	/* DMA/PDC usage specification */
1737  	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1738  		if (of_property_read_bool(np, "dmas")) {
1739  			atmel_port->use_dma_rx  = true;
1740  			atmel_port->use_pdc_rx  = false;
1741  		} else {
1742  			atmel_port->use_dma_rx  = false;
1743  			atmel_port->use_pdc_rx  = true;
1744  		}
1745  	} else {
1746  		atmel_port->use_dma_rx  = false;
1747  		atmel_port->use_pdc_rx  = false;
1748  	}
1749  
1750  	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1751  		if (of_property_read_bool(np, "dmas")) {
1752  			atmel_port->use_dma_tx  = true;
1753  			atmel_port->use_pdc_tx  = false;
1754  		} else {
1755  			atmel_port->use_dma_tx  = false;
1756  			atmel_port->use_pdc_tx  = true;
1757  		}
1758  	} else {
1759  		atmel_port->use_dma_tx  = false;
1760  		atmel_port->use_pdc_tx  = false;
1761  	}
1762  }
1763  
atmel_set_ops(struct uart_port * port)1764  static void atmel_set_ops(struct uart_port *port)
1765  {
1766  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1767  
1768  	if (atmel_use_dma_rx(port)) {
1769  		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1770  		atmel_port->schedule_rx = &atmel_rx_from_dma;
1771  		atmel_port->release_rx = &atmel_release_rx_dma;
1772  	} else if (atmel_use_pdc_rx(port)) {
1773  		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1774  		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1775  		atmel_port->release_rx = &atmel_release_rx_pdc;
1776  	} else {
1777  		atmel_port->prepare_rx = NULL;
1778  		atmel_port->schedule_rx = &atmel_rx_from_ring;
1779  		atmel_port->release_rx = NULL;
1780  	}
1781  
1782  	if (atmel_use_dma_tx(port)) {
1783  		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1784  		atmel_port->schedule_tx = &atmel_tx_dma;
1785  		atmel_port->release_tx = &atmel_release_tx_dma;
1786  	} else if (atmel_use_pdc_tx(port)) {
1787  		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1788  		atmel_port->schedule_tx = &atmel_tx_pdc;
1789  		atmel_port->release_tx = &atmel_release_tx_pdc;
1790  	} else {
1791  		atmel_port->prepare_tx = NULL;
1792  		atmel_port->schedule_tx = &atmel_tx_chars;
1793  		atmel_port->release_tx = NULL;
1794  	}
1795  }
1796  
1797  /*
1798   * Get ip name usart or uart
1799   */
atmel_get_ip_name(struct uart_port * port)1800  static void atmel_get_ip_name(struct uart_port *port)
1801  {
1802  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1803  	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1804  	u32 version;
1805  	u32 usart, dbgu_uart, new_uart;
1806  	/* ASCII decoding for IP version */
1807  	usart = 0x55534152;	/* USAR(T) */
1808  	dbgu_uart = 0x44424755;	/* DBGU */
1809  	new_uart = 0x55415254;	/* UART */
1810  
1811  	/*
1812  	 * Only USART devices from at91sam9260 SOC implement fractional
1813  	 * baudrate. It is available for all asynchronous modes, with the
1814  	 * following restriction: the sampling clock's duty cycle is not
1815  	 * constant.
1816  	 */
1817  	atmel_port->has_frac_baudrate = false;
1818  	atmel_port->has_hw_timer = false;
1819  	atmel_port->is_usart = false;
1820  
1821  	if (name == new_uart) {
1822  		dev_dbg(port->dev, "Uart with hw timer");
1823  		atmel_port->has_hw_timer = true;
1824  		atmel_port->rtor = ATMEL_UA_RTOR;
1825  	} else if (name == usart) {
1826  		dev_dbg(port->dev, "Usart\n");
1827  		atmel_port->has_frac_baudrate = true;
1828  		atmel_port->has_hw_timer = true;
1829  		atmel_port->is_usart = true;
1830  		atmel_port->rtor = ATMEL_US_RTOR;
1831  		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1832  		switch (version) {
1833  		case 0x814:	/* sama5d2 */
1834  			fallthrough;
1835  		case 0x701:	/* sama5d4 */
1836  			atmel_port->fidi_min = 3;
1837  			atmel_port->fidi_max = 65535;
1838  			break;
1839  		case 0x502:	/* sam9x5, sama5d3 */
1840  			atmel_port->fidi_min = 3;
1841  			atmel_port->fidi_max = 2047;
1842  			break;
1843  		default:
1844  			atmel_port->fidi_min = 1;
1845  			atmel_port->fidi_max = 2047;
1846  		}
1847  	} else if (name == dbgu_uart) {
1848  		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1849  	} else {
1850  		/* fallback for older SoCs: use version field */
1851  		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1852  		switch (version) {
1853  		case 0x302:
1854  		case 0x10213:
1855  		case 0x10302:
1856  			dev_dbg(port->dev, "This version is usart\n");
1857  			atmel_port->has_frac_baudrate = true;
1858  			atmel_port->has_hw_timer = true;
1859  			atmel_port->is_usart = true;
1860  			atmel_port->rtor = ATMEL_US_RTOR;
1861  			break;
1862  		case 0x203:
1863  		case 0x10202:
1864  			dev_dbg(port->dev, "This version is uart\n");
1865  			break;
1866  		default:
1867  			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1868  		}
1869  	}
1870  }
1871  
1872  /*
1873   * Perform initialization and enable port for reception
1874   */
atmel_startup(struct uart_port * port)1875  static int atmel_startup(struct uart_port *port)
1876  {
1877  	struct platform_device *pdev = to_platform_device(port->dev);
1878  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1879  	int retval;
1880  
1881  	/*
1882  	 * Ensure that no interrupts are enabled otherwise when
1883  	 * request_irq() is called we could get stuck trying to
1884  	 * handle an unexpected interrupt
1885  	 */
1886  	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1887  	atmel_port->ms_irq_enabled = false;
1888  
1889  	/*
1890  	 * Allocate the IRQ
1891  	 */
1892  	retval = request_irq(port->irq, atmel_interrupt,
1893  			     IRQF_SHARED | IRQF_COND_SUSPEND,
1894  			     dev_name(&pdev->dev), port);
1895  	if (retval) {
1896  		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1897  		return retval;
1898  	}
1899  
1900  	atomic_set(&atmel_port->tasklet_shutdown, 0);
1901  	tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
1902  	tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
1903  
1904  	/*
1905  	 * Initialize DMA (if necessary)
1906  	 */
1907  	atmel_init_property(atmel_port, pdev);
1908  	atmel_set_ops(port);
1909  
1910  	if (atmel_port->prepare_rx) {
1911  		retval = atmel_port->prepare_rx(port);
1912  		if (retval < 0)
1913  			atmel_set_ops(port);
1914  	}
1915  
1916  	if (atmel_port->prepare_tx) {
1917  		retval = atmel_port->prepare_tx(port);
1918  		if (retval < 0)
1919  			atmel_set_ops(port);
1920  	}
1921  
1922  	/*
1923  	 * Enable FIFO when available
1924  	 */
1925  	if (atmel_port->fifo_size) {
1926  		unsigned int txrdym = ATMEL_US_ONE_DATA;
1927  		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1928  		unsigned int fmr;
1929  
1930  		atmel_uart_writel(port, ATMEL_US_CR,
1931  				  ATMEL_US_FIFOEN |
1932  				  ATMEL_US_RXFCLR |
1933  				  ATMEL_US_TXFLCLR);
1934  
1935  		if (atmel_use_dma_tx(port))
1936  			txrdym = ATMEL_US_FOUR_DATA;
1937  
1938  		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1939  		if (atmel_port->rts_high &&
1940  		    atmel_port->rts_low)
1941  			fmr |=	ATMEL_US_FRTSC |
1942  				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1943  				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1944  
1945  		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1946  	}
1947  
1948  	/* Save current CSR for comparison in atmel_tasklet_func() */
1949  	atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
1950  
1951  	/*
1952  	 * Finally, enable the serial port
1953  	 */
1954  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1955  	/* enable xmit & rcvr */
1956  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1957  	atmel_port->tx_stopped = false;
1958  
1959  	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1960  
1961  	if (atmel_use_pdc_rx(port)) {
1962  		/* set UART timeout */
1963  		if (!atmel_port->has_hw_timer) {
1964  			mod_timer(&atmel_port->uart_timer,
1965  					jiffies + uart_poll_timeout(port));
1966  		/* set USART timeout */
1967  		} else {
1968  			atmel_uart_writel(port, atmel_port->rtor,
1969  					  PDC_RX_TIMEOUT);
1970  			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1971  
1972  			atmel_uart_writel(port, ATMEL_US_IER,
1973  					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1974  		}
1975  		/* enable PDC controller */
1976  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1977  	} else if (atmel_use_dma_rx(port)) {
1978  		/* set UART timeout */
1979  		if (!atmel_port->has_hw_timer) {
1980  			mod_timer(&atmel_port->uart_timer,
1981  					jiffies + uart_poll_timeout(port));
1982  		/* set USART timeout */
1983  		} else {
1984  			atmel_uart_writel(port, atmel_port->rtor,
1985  					  PDC_RX_TIMEOUT);
1986  			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1987  
1988  			atmel_uart_writel(port, ATMEL_US_IER,
1989  					  ATMEL_US_TIMEOUT);
1990  		}
1991  	} else {
1992  		/* enable receive only */
1993  		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1994  	}
1995  
1996  	return 0;
1997  }
1998  
1999  /*
2000   * Flush any TX data submitted for DMA. Called when the TX circular
2001   * buffer is reset.
2002   */
atmel_flush_buffer(struct uart_port * port)2003  static void atmel_flush_buffer(struct uart_port *port)
2004  {
2005  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2006  
2007  	if (atmel_use_pdc_tx(port)) {
2008  		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
2009  		atmel_port->pdc_tx.ofs = 0;
2010  	}
2011  	/*
2012  	 * in uart_flush_buffer(), the xmit circular buffer has just
2013  	 * been cleared, so we have to reset tx_len accordingly.
2014  	 */
2015  	atmel_port->tx_len = 0;
2016  }
2017  
2018  /*
2019   * Disable the port
2020   */
atmel_shutdown(struct uart_port * port)2021  static void atmel_shutdown(struct uart_port *port)
2022  {
2023  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2024  
2025  	/* Disable modem control lines interrupts */
2026  	atmel_disable_ms(port);
2027  
2028  	/* Disable interrupts at device level */
2029  	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2030  
2031  	/* Prevent spurious interrupts from scheduling the tasklet */
2032  	atomic_inc(&atmel_port->tasklet_shutdown);
2033  
2034  	/*
2035  	 * Prevent any tasklets being scheduled during
2036  	 * cleanup
2037  	 */
2038  	del_timer_sync(&atmel_port->uart_timer);
2039  
2040  	/* Make sure that no interrupt is on the fly */
2041  	synchronize_irq(port->irq);
2042  
2043  	/*
2044  	 * Clear out any scheduled tasklets before
2045  	 * we destroy the buffers
2046  	 */
2047  	tasklet_kill(&atmel_port->tasklet_rx);
2048  	tasklet_kill(&atmel_port->tasklet_tx);
2049  
2050  	/*
2051  	 * Ensure everything is stopped and
2052  	 * disable port and break condition.
2053  	 */
2054  	atmel_stop_rx(port);
2055  	atmel_stop_tx(port);
2056  
2057  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2058  
2059  	/*
2060  	 * Shut-down the DMA.
2061  	 */
2062  	if (atmel_port->release_rx)
2063  		atmel_port->release_rx(port);
2064  	if (atmel_port->release_tx)
2065  		atmel_port->release_tx(port);
2066  
2067  	/*
2068  	 * Reset ring buffer pointers
2069  	 */
2070  	atmel_port->rx_ring.head = 0;
2071  	atmel_port->rx_ring.tail = 0;
2072  
2073  	/*
2074  	 * Free the interrupts
2075  	 */
2076  	free_irq(port->irq, port);
2077  
2078  	atmel_flush_buffer(port);
2079  }
2080  
2081  /*
2082   * Power / Clock management.
2083   */
atmel_serial_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)2084  static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2085  			    unsigned int oldstate)
2086  {
2087  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2088  
2089  	switch (state) {
2090  	case UART_PM_STATE_ON:
2091  		/*
2092  		 * Enable the peripheral clock for this serial port.
2093  		 * This is called on uart_open() or a resume event.
2094  		 */
2095  		clk_prepare_enable(atmel_port->clk);
2096  
2097  		/* re-enable interrupts if we disabled some on suspend */
2098  		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2099  		break;
2100  	case UART_PM_STATE_OFF:
2101  		/* Back up the interrupt mask and disable all interrupts */
2102  		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2103  		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2104  
2105  		/*
2106  		 * Disable the peripheral clock for this serial port.
2107  		 * This is called on uart_close() or a suspend event.
2108  		 */
2109  		clk_disable_unprepare(atmel_port->clk);
2110  		if (__clk_is_enabled(atmel_port->gclk))
2111  			clk_disable_unprepare(atmel_port->gclk);
2112  		break;
2113  	default:
2114  		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2115  	}
2116  }
2117  
2118  /*
2119   * Change the port parameters
2120   */
atmel_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)2121  static void atmel_set_termios(struct uart_port *port,
2122  			      struct ktermios *termios,
2123  			      const struct ktermios *old)
2124  {
2125  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2126  	unsigned long flags;
2127  	unsigned int old_mode, mode, imr, quot, div, cd, fp = 0;
2128  	unsigned int baud, actual_baud, gclk_rate;
2129  	int ret;
2130  
2131  	/* save the current mode register */
2132  	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2133  
2134  	/* reset the mode, clock divisor, parity, stop bits and data size */
2135  	if (atmel_port->is_usart)
2136  		mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL |
2137  			  ATMEL_US_USCLKS | ATMEL_US_USMODE);
2138  	else
2139  		mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER);
2140  
2141  	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2142  
2143  	/* byte size */
2144  	switch (termios->c_cflag & CSIZE) {
2145  	case CS5:
2146  		mode |= ATMEL_US_CHRL_5;
2147  		break;
2148  	case CS6:
2149  		mode |= ATMEL_US_CHRL_6;
2150  		break;
2151  	case CS7:
2152  		mode |= ATMEL_US_CHRL_7;
2153  		break;
2154  	default:
2155  		mode |= ATMEL_US_CHRL_8;
2156  		break;
2157  	}
2158  
2159  	/* stop bits */
2160  	if (termios->c_cflag & CSTOPB)
2161  		mode |= ATMEL_US_NBSTOP_2;
2162  
2163  	/* parity */
2164  	if (termios->c_cflag & PARENB) {
2165  		/* Mark or Space parity */
2166  		if (termios->c_cflag & CMSPAR) {
2167  			if (termios->c_cflag & PARODD)
2168  				mode |= ATMEL_US_PAR_MARK;
2169  			else
2170  				mode |= ATMEL_US_PAR_SPACE;
2171  		} else if (termios->c_cflag & PARODD)
2172  			mode |= ATMEL_US_PAR_ODD;
2173  		else
2174  			mode |= ATMEL_US_PAR_EVEN;
2175  	} else
2176  		mode |= ATMEL_US_PAR_NONE;
2177  
2178  	spin_lock_irqsave(&port->lock, flags);
2179  
2180  	port->read_status_mask = ATMEL_US_OVRE;
2181  	if (termios->c_iflag & INPCK)
2182  		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2183  	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2184  		port->read_status_mask |= ATMEL_US_RXBRK;
2185  
2186  	if (atmel_use_pdc_rx(port))
2187  		/* need to enable error interrupts */
2188  		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2189  
2190  	/*
2191  	 * Characters to ignore
2192  	 */
2193  	port->ignore_status_mask = 0;
2194  	if (termios->c_iflag & IGNPAR)
2195  		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2196  	if (termios->c_iflag & IGNBRK) {
2197  		port->ignore_status_mask |= ATMEL_US_RXBRK;
2198  		/*
2199  		 * If we're ignoring parity and break indicators,
2200  		 * ignore overruns too (for real raw support).
2201  		 */
2202  		if (termios->c_iflag & IGNPAR)
2203  			port->ignore_status_mask |= ATMEL_US_OVRE;
2204  	}
2205  	/* TODO: Ignore all characters if CREAD is set.*/
2206  
2207  	/* update the per-port timeout */
2208  	uart_update_timeout(port, termios->c_cflag, baud);
2209  
2210  	/*
2211  	 * save/disable interrupts. The tty layer will ensure that the
2212  	 * transmitter is empty if requested by the caller, so there's
2213  	 * no need to wait for it here.
2214  	 */
2215  	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2216  	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2217  
2218  	/* disable receiver and transmitter */
2219  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2220  	atmel_port->tx_stopped = true;
2221  
2222  	/* mode */
2223  	if (port->rs485.flags & SER_RS485_ENABLED) {
2224  		atmel_uart_writel(port, ATMEL_US_TTGR,
2225  				  port->rs485.delay_rts_after_send);
2226  		mode |= ATMEL_US_USMODE_RS485;
2227  	} else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
2228  		atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
2229  		/* select mck clock, and output  */
2230  		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
2231  		/* set max iterations */
2232  		mode |= ATMEL_US_MAX_ITER(3);
2233  		if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
2234  				== SER_ISO7816_T(0))
2235  			mode |= ATMEL_US_USMODE_ISO7816_T0;
2236  		else
2237  			mode |= ATMEL_US_USMODE_ISO7816_T1;
2238  	} else if (termios->c_cflag & CRTSCTS) {
2239  		/* RS232 with hardware handshake (RTS/CTS) */
2240  		if (atmel_use_fifo(port) &&
2241  		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2242  			/*
2243  			 * with ATMEL_US_USMODE_HWHS set, the controller will
2244  			 * be able to drive the RTS pin high/low when the RX
2245  			 * FIFO is above RXFTHRES/below RXFTHRES2.
2246  			 * It will also disable the transmitter when the CTS
2247  			 * pin is high.
2248  			 * This mode is not activated if CTS pin is a GPIO
2249  			 * because in this case, the transmitter is always
2250  			 * disabled (there must be an internal pull-up
2251  			 * responsible for this behaviour).
2252  			 * If the RTS pin is a GPIO, the controller won't be
2253  			 * able to drive it according to the FIFO thresholds,
2254  			 * but it will be handled by the driver.
2255  			 */
2256  			mode |= ATMEL_US_USMODE_HWHS;
2257  		} else {
2258  			/*
2259  			 * For platforms without FIFO, the flow control is
2260  			 * handled by the driver.
2261  			 */
2262  			mode |= ATMEL_US_USMODE_NORMAL;
2263  		}
2264  	} else {
2265  		/* RS232 without hadware handshake */
2266  		mode |= ATMEL_US_USMODE_NORMAL;
2267  	}
2268  
2269  	/*
2270  	 * Set the baud rate:
2271  	 * Fractional baudrate allows to setup output frequency more
2272  	 * accurately. This feature is enabled only when using normal mode.
2273  	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2274  	 * Currently, OVER is always set to 0 so we get
2275  	 * baudrate = selected clock / (16 * (CD + FP / 8))
2276  	 * then
2277  	 * 8 CD + FP = selected clock / (2 * baudrate)
2278  	 */
2279  	if (atmel_port->has_frac_baudrate) {
2280  		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2281  		cd = div >> 3;
2282  		fp = div & ATMEL_US_FP_MASK;
2283  	} else {
2284  		cd = uart_get_divisor(port, baud);
2285  	}
2286  
2287  	/*
2288  	 * If the current value of the Clock Divisor surpasses the 16 bit
2289  	 * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral
2290  	 * Clock implicitly divided by 8.
2291  	 * If the IP is UART however, keep the highest possible value for
2292  	 * the CD and avoid needless division of CD, since UART IP's do not
2293  	 * support implicit division of the Peripheral Clock.
2294  	 */
2295  	if (atmel_port->is_usart && cd > ATMEL_US_CD) {
2296  		cd /= 8;
2297  		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2298  	} else {
2299  		cd = min_t(unsigned int, cd, ATMEL_US_CD);
2300  	}
2301  
2302  	/*
2303  	 * If there is no Fractional Part, there is a high chance that
2304  	 * we may be able to generate a baudrate closer to the desired one
2305  	 * if we use the GCLK as the clock source driving the baudrate
2306  	 * generator.
2307  	 */
2308  	if (!atmel_port->has_frac_baudrate) {
2309  		if (__clk_is_enabled(atmel_port->gclk))
2310  			clk_disable_unprepare(atmel_port->gclk);
2311  		gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud);
2312  		actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd);
2313  		if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) >
2314  		    abs(atmel_error_rate(baud, gclk_rate / 16))) {
2315  			clk_set_rate(atmel_port->gclk, 16 * baud);
2316  			ret = clk_prepare_enable(atmel_port->gclk);
2317  			if (ret)
2318  				goto gclk_fail;
2319  
2320  			if (atmel_port->is_usart) {
2321  				mode &= ~ATMEL_US_USCLKS;
2322  				mode |= ATMEL_US_USCLKS_GCLK;
2323  			} else {
2324  				mode |= ATMEL_UA_BRSRCCK;
2325  			}
2326  
2327  			/*
2328  			 * Set the Clock Divisor for GCLK to 1.
2329  			 * Since we were able to generate the smallest
2330  			 * multiple of the desired baudrate times 16,
2331  			 * then we surely can generate a bigger multiple
2332  			 * with the exact error rate for an equally increased
2333  			 * CD. Thus no need to take into account
2334  			 * a higher value for CD.
2335  			 */
2336  			cd = 1;
2337  		}
2338  	}
2339  
2340  gclk_fail:
2341  	quot = cd | fp << ATMEL_US_FP_OFFSET;
2342  
2343  	if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
2344  		atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2345  
2346  	/* set the mode, clock divisor, parity, stop bits and data size */
2347  	atmel_uart_writel(port, ATMEL_US_MR, mode);
2348  
2349  	/*
2350  	 * when switching the mode, set the RTS line state according to the
2351  	 * new mode, otherwise keep the former state
2352  	 */
2353  	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2354  		unsigned int rts_state;
2355  
2356  		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2357  			/* let the hardware control the RTS line */
2358  			rts_state = ATMEL_US_RTSDIS;
2359  		} else {
2360  			/* force RTS line to low level */
2361  			rts_state = ATMEL_US_RTSEN;
2362  		}
2363  
2364  		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2365  	}
2366  
2367  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2368  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2369  	atmel_port->tx_stopped = false;
2370  
2371  	/* restore interrupts */
2372  	atmel_uart_writel(port, ATMEL_US_IER, imr);
2373  
2374  	/* CTS flow-control and modem-status interrupts */
2375  	if (UART_ENABLE_MS(port, termios->c_cflag))
2376  		atmel_enable_ms(port);
2377  	else
2378  		atmel_disable_ms(port);
2379  
2380  	spin_unlock_irqrestore(&port->lock, flags);
2381  }
2382  
atmel_set_ldisc(struct uart_port * port,struct ktermios * termios)2383  static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2384  {
2385  	if (termios->c_line == N_PPS) {
2386  		port->flags |= UPF_HARDPPS_CD;
2387  		spin_lock_irq(&port->lock);
2388  		atmel_enable_ms(port);
2389  		spin_unlock_irq(&port->lock);
2390  	} else {
2391  		port->flags &= ~UPF_HARDPPS_CD;
2392  		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2393  			spin_lock_irq(&port->lock);
2394  			atmel_disable_ms(port);
2395  			spin_unlock_irq(&port->lock);
2396  		}
2397  	}
2398  }
2399  
2400  /*
2401   * Return string describing the specified port
2402   */
atmel_type(struct uart_port * port)2403  static const char *atmel_type(struct uart_port *port)
2404  {
2405  	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2406  }
2407  
2408  /*
2409   * Release the memory region(s) being used by 'port'.
2410   */
atmel_release_port(struct uart_port * port)2411  static void atmel_release_port(struct uart_port *port)
2412  {
2413  	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2414  	int size = resource_size(mpdev->resource);
2415  
2416  	release_mem_region(port->mapbase, size);
2417  
2418  	if (port->flags & UPF_IOREMAP) {
2419  		iounmap(port->membase);
2420  		port->membase = NULL;
2421  	}
2422  }
2423  
2424  /*
2425   * Request the memory region(s) being used by 'port'.
2426   */
atmel_request_port(struct uart_port * port)2427  static int atmel_request_port(struct uart_port *port)
2428  {
2429  	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2430  	int size = resource_size(mpdev->resource);
2431  
2432  	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2433  		return -EBUSY;
2434  
2435  	if (port->flags & UPF_IOREMAP) {
2436  		port->membase = ioremap(port->mapbase, size);
2437  		if (port->membase == NULL) {
2438  			release_mem_region(port->mapbase, size);
2439  			return -ENOMEM;
2440  		}
2441  	}
2442  
2443  	return 0;
2444  }
2445  
2446  /*
2447   * Configure/autoconfigure the port.
2448   */
atmel_config_port(struct uart_port * port,int flags)2449  static void atmel_config_port(struct uart_port *port, int flags)
2450  {
2451  	if (flags & UART_CONFIG_TYPE) {
2452  		port->type = PORT_ATMEL;
2453  		atmel_request_port(port);
2454  	}
2455  }
2456  
2457  /*
2458   * Verify the new serial_struct (for TIOCSSERIAL).
2459   */
atmel_verify_port(struct uart_port * port,struct serial_struct * ser)2460  static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2461  {
2462  	int ret = 0;
2463  	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2464  		ret = -EINVAL;
2465  	if (port->irq != ser->irq)
2466  		ret = -EINVAL;
2467  	if (ser->io_type != SERIAL_IO_MEM)
2468  		ret = -EINVAL;
2469  	if (port->uartclk / 16 != ser->baud_base)
2470  		ret = -EINVAL;
2471  	if (port->mapbase != (unsigned long)ser->iomem_base)
2472  		ret = -EINVAL;
2473  	if (port->iobase != ser->port)
2474  		ret = -EINVAL;
2475  	if (ser->hub6 != 0)
2476  		ret = -EINVAL;
2477  	return ret;
2478  }
2479  
2480  #ifdef CONFIG_CONSOLE_POLL
atmel_poll_get_char(struct uart_port * port)2481  static int atmel_poll_get_char(struct uart_port *port)
2482  {
2483  	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2484  		cpu_relax();
2485  
2486  	return atmel_uart_read_char(port);
2487  }
2488  
atmel_poll_put_char(struct uart_port * port,unsigned char ch)2489  static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2490  {
2491  	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2492  		cpu_relax();
2493  
2494  	atmel_uart_write_char(port, ch);
2495  }
2496  #endif
2497  
2498  static const struct uart_ops atmel_pops = {
2499  	.tx_empty	= atmel_tx_empty,
2500  	.set_mctrl	= atmel_set_mctrl,
2501  	.get_mctrl	= atmel_get_mctrl,
2502  	.stop_tx	= atmel_stop_tx,
2503  	.start_tx	= atmel_start_tx,
2504  	.stop_rx	= atmel_stop_rx,
2505  	.enable_ms	= atmel_enable_ms,
2506  	.break_ctl	= atmel_break_ctl,
2507  	.startup	= atmel_startup,
2508  	.shutdown	= atmel_shutdown,
2509  	.flush_buffer	= atmel_flush_buffer,
2510  	.set_termios	= atmel_set_termios,
2511  	.set_ldisc	= atmel_set_ldisc,
2512  	.type		= atmel_type,
2513  	.release_port	= atmel_release_port,
2514  	.request_port	= atmel_request_port,
2515  	.config_port	= atmel_config_port,
2516  	.verify_port	= atmel_verify_port,
2517  	.pm		= atmel_serial_pm,
2518  #ifdef CONFIG_CONSOLE_POLL
2519  	.poll_get_char	= atmel_poll_get_char,
2520  	.poll_put_char	= atmel_poll_put_char,
2521  #endif
2522  };
2523  
2524  static const struct serial_rs485 atmel_rs485_supported = {
2525  	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
2526  	.delay_rts_before_send = 1,
2527  	.delay_rts_after_send = 1,
2528  };
2529  
2530  /*
2531   * Configure the port from the platform device resource info.
2532   */
atmel_init_port(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2533  static int atmel_init_port(struct atmel_uart_port *atmel_port,
2534  				      struct platform_device *pdev)
2535  {
2536  	int ret;
2537  	struct uart_port *port = &atmel_port->uart;
2538  	struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2539  
2540  	atmel_init_property(atmel_port, pdev);
2541  	atmel_set_ops(port);
2542  
2543  	port->iotype		= UPIO_MEM;
2544  	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2545  	port->ops		= &atmel_pops;
2546  	port->fifosize		= 1;
2547  	port->dev		= &pdev->dev;
2548  	port->mapbase		= mpdev->resource[0].start;
2549  	port->irq		= platform_get_irq(mpdev, 0);
2550  	port->rs485_config	= atmel_config_rs485;
2551  	port->rs485_supported	= atmel_rs485_supported;
2552  	port->iso7816_config	= atmel_config_iso7816;
2553  	port->membase		= NULL;
2554  
2555  	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2556  
2557  	ret = uart_get_rs485_mode(port);
2558  	if (ret)
2559  		return ret;
2560  
2561  	port->uartclk = clk_get_rate(atmel_port->clk);
2562  
2563  	/*
2564  	 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
2565  	 * ENDTX|TXBUFE
2566  	 */
2567  	if (atmel_uart_is_half_duplex(port))
2568  		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2569  	else if (atmel_use_pdc_tx(port)) {
2570  		port->fifosize = PDC_BUFFER_SIZE;
2571  		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2572  	} else {
2573  		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2574  	}
2575  
2576  	return 0;
2577  }
2578  
2579  #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
atmel_console_putchar(struct uart_port * port,unsigned char ch)2580  static void atmel_console_putchar(struct uart_port *port, unsigned char ch)
2581  {
2582  	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2583  		cpu_relax();
2584  	atmel_uart_write_char(port, ch);
2585  }
2586  
2587  /*
2588   * Interrupts are disabled on entering
2589   */
atmel_console_write(struct console * co,const char * s,u_int count)2590  static void atmel_console_write(struct console *co, const char *s, u_int count)
2591  {
2592  	struct uart_port *port = &atmel_ports[co->index].uart;
2593  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2594  	unsigned int status, imr;
2595  	unsigned int pdc_tx;
2596  
2597  	/*
2598  	 * First, save IMR and then disable interrupts
2599  	 */
2600  	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2601  	atmel_uart_writel(port, ATMEL_US_IDR,
2602  			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2603  
2604  	/* Store PDC transmit status and disable it */
2605  	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2606  	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2607  
2608  	/* Make sure that tx path is actually able to send characters */
2609  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2610  	atmel_port->tx_stopped = false;
2611  
2612  	uart_console_write(port, s, count, atmel_console_putchar);
2613  
2614  	/*
2615  	 * Finally, wait for transmitter to become empty
2616  	 * and restore IMR
2617  	 */
2618  	do {
2619  		status = atmel_uart_readl(port, ATMEL_US_CSR);
2620  	} while (!(status & ATMEL_US_TXRDY));
2621  
2622  	/* Restore PDC transmit status */
2623  	if (pdc_tx)
2624  		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2625  
2626  	/* set interrupts back the way they were */
2627  	atmel_uart_writel(port, ATMEL_US_IER, imr);
2628  }
2629  
2630  /*
2631   * If the port was already initialised (eg, by a boot loader),
2632   * try to determine the current setup.
2633   */
atmel_console_get_options(struct uart_port * port,int * baud,int * parity,int * bits)2634  static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2635  					     int *parity, int *bits)
2636  {
2637  	unsigned int mr, quot;
2638  
2639  	/*
2640  	 * If the baud rate generator isn't running, the port wasn't
2641  	 * initialized by the boot loader.
2642  	 */
2643  	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2644  	if (!quot)
2645  		return;
2646  
2647  	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2648  	if (mr == ATMEL_US_CHRL_8)
2649  		*bits = 8;
2650  	else
2651  		*bits = 7;
2652  
2653  	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2654  	if (mr == ATMEL_US_PAR_EVEN)
2655  		*parity = 'e';
2656  	else if (mr == ATMEL_US_PAR_ODD)
2657  		*parity = 'o';
2658  
2659  	*baud = port->uartclk / (16 * quot);
2660  }
2661  
atmel_console_setup(struct console * co,char * options)2662  static int __init atmel_console_setup(struct console *co, char *options)
2663  {
2664  	struct uart_port *port = &atmel_ports[co->index].uart;
2665  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2666  	int baud = 115200;
2667  	int bits = 8;
2668  	int parity = 'n';
2669  	int flow = 'n';
2670  
2671  	if (port->membase == NULL) {
2672  		/* Port not initialized yet - delay setup */
2673  		return -ENODEV;
2674  	}
2675  
2676  	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2677  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2678  	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2679  	atmel_port->tx_stopped = false;
2680  
2681  	if (options)
2682  		uart_parse_options(options, &baud, &parity, &bits, &flow);
2683  	else
2684  		atmel_console_get_options(port, &baud, &parity, &bits);
2685  
2686  	return uart_set_options(port, co, baud, parity, bits, flow);
2687  }
2688  
2689  static struct uart_driver atmel_uart;
2690  
2691  static struct console atmel_console = {
2692  	.name		= ATMEL_DEVICENAME,
2693  	.write		= atmel_console_write,
2694  	.device		= uart_console_device,
2695  	.setup		= atmel_console_setup,
2696  	.flags		= CON_PRINTBUFFER,
2697  	.index		= -1,
2698  	.data		= &atmel_uart,
2699  };
2700  
atmel_serial_early_write(struct console * con,const char * s,unsigned int n)2701  static void atmel_serial_early_write(struct console *con, const char *s,
2702  				     unsigned int n)
2703  {
2704  	struct earlycon_device *dev = con->data;
2705  
2706  	uart_console_write(&dev->port, s, n, atmel_console_putchar);
2707  }
2708  
atmel_early_console_setup(struct earlycon_device * device,const char * options)2709  static int __init atmel_early_console_setup(struct earlycon_device *device,
2710  					    const char *options)
2711  {
2712  	if (!device->port.membase)
2713  		return -ENODEV;
2714  
2715  	device->con->write = atmel_serial_early_write;
2716  
2717  	return 0;
2718  }
2719  
2720  OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart",
2721  		    atmel_early_console_setup);
2722  OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart",
2723  		    atmel_early_console_setup);
2724  
2725  #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2726  
2727  #else
2728  #define ATMEL_CONSOLE_DEVICE	NULL
2729  #endif
2730  
2731  static struct uart_driver atmel_uart = {
2732  	.owner		= THIS_MODULE,
2733  	.driver_name	= "atmel_serial",
2734  	.dev_name	= ATMEL_DEVICENAME,
2735  	.major		= SERIAL_ATMEL_MAJOR,
2736  	.minor		= MINOR_START,
2737  	.nr		= ATMEL_MAX_UART,
2738  	.cons		= ATMEL_CONSOLE_DEVICE,
2739  };
2740  
atmel_serial_clk_will_stop(void)2741  static bool atmel_serial_clk_will_stop(void)
2742  {
2743  #ifdef CONFIG_ARCH_AT91
2744  	return at91_suspend_entering_slow_clock();
2745  #else
2746  	return false;
2747  #endif
2748  }
2749  
atmel_serial_suspend(struct device * dev)2750  static int __maybe_unused atmel_serial_suspend(struct device *dev)
2751  {
2752  	struct uart_port *port = dev_get_drvdata(dev);
2753  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2754  
2755  	if (uart_console(port) && console_suspend_enabled) {
2756  		/* Drain the TX shifter */
2757  		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2758  			 ATMEL_US_TXEMPTY))
2759  			cpu_relax();
2760  	}
2761  
2762  	if (uart_console(port) && !console_suspend_enabled) {
2763  		/* Cache register values as we won't get a full shutdown/startup
2764  		 * cycle
2765  		 */
2766  		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2767  		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2768  		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2769  		atmel_port->cache.rtor = atmel_uart_readl(port,
2770  							  atmel_port->rtor);
2771  		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2772  		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2773  		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2774  	}
2775  
2776  	/* we can not wake up if we're running on slow clock */
2777  	atmel_port->may_wakeup = device_may_wakeup(dev);
2778  	if (atmel_serial_clk_will_stop()) {
2779  		unsigned long flags;
2780  
2781  		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2782  		atmel_port->suspended = true;
2783  		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2784  		device_set_wakeup_enable(dev, 0);
2785  	}
2786  
2787  	uart_suspend_port(&atmel_uart, port);
2788  
2789  	return 0;
2790  }
2791  
atmel_serial_resume(struct device * dev)2792  static int __maybe_unused atmel_serial_resume(struct device *dev)
2793  {
2794  	struct uart_port *port = dev_get_drvdata(dev);
2795  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2796  	unsigned long flags;
2797  
2798  	if (uart_console(port) && !console_suspend_enabled) {
2799  		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2800  		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2801  		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2802  		atmel_uart_writel(port, atmel_port->rtor,
2803  				  atmel_port->cache.rtor);
2804  		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2805  
2806  		if (atmel_port->fifo_size) {
2807  			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2808  					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2809  			atmel_uart_writel(port, ATMEL_US_FMR,
2810  					  atmel_port->cache.fmr);
2811  			atmel_uart_writel(port, ATMEL_US_FIER,
2812  					  atmel_port->cache.fimr);
2813  		}
2814  		atmel_start_rx(port);
2815  	}
2816  
2817  	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2818  	if (atmel_port->pending) {
2819  		atmel_handle_receive(port, atmel_port->pending);
2820  		atmel_handle_status(port, atmel_port->pending,
2821  				    atmel_port->pending_status);
2822  		atmel_handle_transmit(port, atmel_port->pending);
2823  		atmel_port->pending = 0;
2824  	}
2825  	atmel_port->suspended = false;
2826  	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2827  
2828  	uart_resume_port(&atmel_uart, port);
2829  	device_set_wakeup_enable(dev, atmel_port->may_wakeup);
2830  
2831  	return 0;
2832  }
2833  
atmel_serial_probe_fifos(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2834  static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2835  				     struct platform_device *pdev)
2836  {
2837  	atmel_port->fifo_size = 0;
2838  	atmel_port->rts_low = 0;
2839  	atmel_port->rts_high = 0;
2840  
2841  	if (of_property_read_u32(pdev->dev.of_node,
2842  				 "atmel,fifo-size",
2843  				 &atmel_port->fifo_size))
2844  		return;
2845  
2846  	if (!atmel_port->fifo_size)
2847  		return;
2848  
2849  	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2850  		atmel_port->fifo_size = 0;
2851  		dev_err(&pdev->dev, "Invalid FIFO size\n");
2852  		return;
2853  	}
2854  
2855  	/*
2856  	 * 0 <= rts_low <= rts_high <= fifo_size
2857  	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2858  	 * to flush their internal TX FIFO, commonly up to 16 data, before
2859  	 * actually stopping to send new data. So we try to set the RTS High
2860  	 * Threshold to a reasonably high value respecting this 16 data
2861  	 * empirical rule when possible.
2862  	 */
2863  	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2864  			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2865  	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2866  			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2867  
2868  	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2869  		 atmel_port->fifo_size);
2870  	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2871  		atmel_port->rts_high);
2872  	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2873  		atmel_port->rts_low);
2874  }
2875  
atmel_serial_probe(struct platform_device * pdev)2876  static int atmel_serial_probe(struct platform_device *pdev)
2877  {
2878  	struct atmel_uart_port *atmel_port;
2879  	struct device_node *np = pdev->dev.parent->of_node;
2880  	void *data;
2881  	int ret;
2882  	bool rs485_enabled;
2883  
2884  	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2885  
2886  	/*
2887  	 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2888  	 * as compatible string. This driver is probed by at91-usart mfd driver
2889  	 * which is just a wrapper over the atmel_serial driver and
2890  	 * spi-at91-usart driver. All attributes needed by this driver are
2891  	 * found in of_node of parent.
2892  	 */
2893  	pdev->dev.of_node = np;
2894  
2895  	ret = of_alias_get_id(np, "serial");
2896  	if (ret < 0)
2897  		/* port id not found in platform data nor device-tree aliases:
2898  		 * auto-enumerate it */
2899  		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2900  
2901  	if (ret >= ATMEL_MAX_UART) {
2902  		ret = -ENODEV;
2903  		goto err;
2904  	}
2905  
2906  	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2907  		/* port already in use */
2908  		ret = -EBUSY;
2909  		goto err;
2910  	}
2911  
2912  	atmel_port = &atmel_ports[ret];
2913  	atmel_port->backup_imr = 0;
2914  	atmel_port->uart.line = ret;
2915  	atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
2916  	atmel_serial_probe_fifos(atmel_port, pdev);
2917  
2918  	atomic_set(&atmel_port->tasklet_shutdown, 0);
2919  	spin_lock_init(&atmel_port->lock_suspended);
2920  
2921  	atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
2922  	if (IS_ERR(atmel_port->clk)) {
2923  		ret = PTR_ERR(atmel_port->clk);
2924  		goto err;
2925  	}
2926  	ret = clk_prepare_enable(atmel_port->clk);
2927  	if (ret)
2928  		goto err;
2929  
2930  	atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk");
2931  	if (IS_ERR(atmel_port->gclk)) {
2932  		ret = PTR_ERR(atmel_port->gclk);
2933  		goto err_clk_disable_unprepare;
2934  	}
2935  
2936  	ret = atmel_init_port(atmel_port, pdev);
2937  	if (ret)
2938  		goto err_clk_disable_unprepare;
2939  
2940  	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2941  	if (IS_ERR(atmel_port->gpios)) {
2942  		ret = PTR_ERR(atmel_port->gpios);
2943  		goto err_clk_disable_unprepare;
2944  	}
2945  
2946  	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2947  		ret = -ENOMEM;
2948  		data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2949  				     sizeof(struct atmel_uart_char),
2950  				     GFP_KERNEL);
2951  		if (!data)
2952  			goto err_clk_disable_unprepare;
2953  		atmel_port->rx_ring.buf = data;
2954  	}
2955  
2956  	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2957  
2958  	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2959  	if (ret)
2960  		goto err_add_port;
2961  
2962  	device_init_wakeup(&pdev->dev, 1);
2963  	platform_set_drvdata(pdev, atmel_port);
2964  
2965  	if (rs485_enabled) {
2966  		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2967  				  ATMEL_US_USMODE_NORMAL);
2968  		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2969  				  ATMEL_US_RTSEN);
2970  	}
2971  
2972  	/*
2973  	 * Get port name of usart or uart
2974  	 */
2975  	atmel_get_ip_name(&atmel_port->uart);
2976  
2977  	/*
2978  	 * The peripheral clock can now safely be disabled till the port
2979  	 * is used
2980  	 */
2981  	clk_disable_unprepare(atmel_port->clk);
2982  
2983  	return 0;
2984  
2985  err_add_port:
2986  	kfree(atmel_port->rx_ring.buf);
2987  	atmel_port->rx_ring.buf = NULL;
2988  err_clk_disable_unprepare:
2989  	clk_disable_unprepare(atmel_port->clk);
2990  	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2991  err:
2992  	return ret;
2993  }
2994  
2995  /*
2996   * Even if the driver is not modular, it makes sense to be able to
2997   * unbind a device: there can be many bound devices, and there are
2998   * situations where dynamic binding and unbinding can be useful.
2999   *
3000   * For example, a connected device can require a specific firmware update
3001   * protocol that needs bitbanging on IO lines, but use the regular serial
3002   * port in the normal case.
3003   */
atmel_serial_remove(struct platform_device * pdev)3004  static int atmel_serial_remove(struct platform_device *pdev)
3005  {
3006  	struct uart_port *port = platform_get_drvdata(pdev);
3007  	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
3008  
3009  	tasklet_kill(&atmel_port->tasklet_rx);
3010  	tasklet_kill(&atmel_port->tasklet_tx);
3011  
3012  	device_init_wakeup(&pdev->dev, 0);
3013  
3014  	uart_remove_one_port(&atmel_uart, port);
3015  
3016  	kfree(atmel_port->rx_ring.buf);
3017  
3018  	/* "port" is allocated statically, so we shouldn't free it */
3019  
3020  	clear_bit(port->line, atmel_ports_in_use);
3021  
3022  	pdev->dev.of_node = NULL;
3023  
3024  	return 0;
3025  }
3026  
3027  static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
3028  			 atmel_serial_resume);
3029  
3030  static struct platform_driver atmel_serial_driver = {
3031  	.probe		= atmel_serial_probe,
3032  	.remove		= atmel_serial_remove,
3033  	.driver		= {
3034  		.name			= "atmel_usart_serial",
3035  		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
3036  		.pm			= pm_ptr(&atmel_serial_pm_ops),
3037  	},
3038  };
3039  
atmel_serial_init(void)3040  static int __init atmel_serial_init(void)
3041  {
3042  	int ret;
3043  
3044  	ret = uart_register_driver(&atmel_uart);
3045  	if (ret)
3046  		return ret;
3047  
3048  	ret = platform_driver_register(&atmel_serial_driver);
3049  	if (ret)
3050  		uart_unregister_driver(&atmel_uart);
3051  
3052  	return ret;
3053  }
3054  device_initcall(atmel_serial_init);
3055