xref: /openbmc/linux/drivers/tty/serial/amba-pl011.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   *  Driver for AMBA serial ports
4   *
5   *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6   *
7   *  Copyright 1999 ARM Limited
8   *  Copyright (C) 2000 Deep Blue Solutions Ltd.
9   *  Copyright (C) 2010 ST-Ericsson SA
10   *
11   * This is a generic driver for ARM AMBA-type serial ports.  They
12   * have a lot of 16550-like features, but are not register compatible.
13   * Note that although they do have CTS, DCD and DSR inputs, they do
14   * not have an RI input, nor do they have DTR or RTS outputs.  If
15   * required, these have to be supplied via some other means (eg, GPIO)
16   * and hooked into this driver.
17   */
18  
19  #include <linux/module.h>
20  #include <linux/ioport.h>
21  #include <linux/init.h>
22  #include <linux/console.h>
23  #include <linux/platform_device.h>
24  #include <linux/sysrq.h>
25  #include <linux/device.h>
26  #include <linux/tty.h>
27  #include <linux/tty_flip.h>
28  #include <linux/serial_core.h>
29  #include <linux/serial.h>
30  #include <linux/amba/bus.h>
31  #include <linux/amba/serial.h>
32  #include <linux/clk.h>
33  #include <linux/slab.h>
34  #include <linux/dmaengine.h>
35  #include <linux/dma-mapping.h>
36  #include <linux/scatterlist.h>
37  #include <linux/delay.h>
38  #include <linux/types.h>
39  #include <linux/of.h>
40  #include <linux/pinctrl/consumer.h>
41  #include <linux/sizes.h>
42  #include <linux/io.h>
43  #include <linux/acpi.h>
44  
45  #define UART_NR			14
46  
47  #define SERIAL_AMBA_MAJOR	204
48  #define SERIAL_AMBA_MINOR	64
49  #define SERIAL_AMBA_NR		UART_NR
50  
51  #define AMBA_ISR_PASS_LIMIT	256
52  
53  #define UART_DR_ERROR		(UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
54  #define UART_DUMMY_DR_RX	(1 << 16)
55  
56  enum {
57  	REG_DR,
58  	REG_ST_DMAWM,
59  	REG_ST_TIMEOUT,
60  	REG_FR,
61  	REG_LCRH_RX,
62  	REG_LCRH_TX,
63  	REG_IBRD,
64  	REG_FBRD,
65  	REG_CR,
66  	REG_IFLS,
67  	REG_IMSC,
68  	REG_RIS,
69  	REG_MIS,
70  	REG_ICR,
71  	REG_DMACR,
72  	REG_ST_XFCR,
73  	REG_ST_XON1,
74  	REG_ST_XON2,
75  	REG_ST_XOFF1,
76  	REG_ST_XOFF2,
77  	REG_ST_ITCR,
78  	REG_ST_ITIP,
79  	REG_ST_ABCR,
80  	REG_ST_ABIMSC,
81  
82  	/* The size of the array - must be last */
83  	REG_ARRAY_SIZE,
84  };
85  
86  static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
87  	[REG_DR] = UART01x_DR,
88  	[REG_FR] = UART01x_FR,
89  	[REG_LCRH_RX] = UART011_LCRH,
90  	[REG_LCRH_TX] = UART011_LCRH,
91  	[REG_IBRD] = UART011_IBRD,
92  	[REG_FBRD] = UART011_FBRD,
93  	[REG_CR] = UART011_CR,
94  	[REG_IFLS] = UART011_IFLS,
95  	[REG_IMSC] = UART011_IMSC,
96  	[REG_RIS] = UART011_RIS,
97  	[REG_MIS] = UART011_MIS,
98  	[REG_ICR] = UART011_ICR,
99  	[REG_DMACR] = UART011_DMACR,
100  };
101  
102  /* There is by now at least one vendor with differing details, so handle it */
103  struct vendor_data {
104  	const u16		*reg_offset;
105  	unsigned int		ifls;
106  	unsigned int		fr_busy;
107  	unsigned int		fr_dsr;
108  	unsigned int		fr_cts;
109  	unsigned int		fr_ri;
110  	unsigned int		inv_fr;
111  	bool			access_32b;
112  	bool			oversampling;
113  	bool			dma_threshold;
114  	bool			cts_event_workaround;
115  	bool			always_enabled;
116  	bool			fixed_options;
117  
118  	unsigned int (*get_fifosize)(struct amba_device *dev);
119  };
120  
get_fifosize_arm(struct amba_device * dev)121  static unsigned int get_fifosize_arm(struct amba_device *dev)
122  {
123  	return amba_rev(dev) < 3 ? 16 : 32;
124  }
125  
126  static struct vendor_data vendor_arm = {
127  	.reg_offset		= pl011_std_offsets,
128  	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
129  	.fr_busy		= UART01x_FR_BUSY,
130  	.fr_dsr			= UART01x_FR_DSR,
131  	.fr_cts			= UART01x_FR_CTS,
132  	.fr_ri			= UART011_FR_RI,
133  	.oversampling		= false,
134  	.dma_threshold		= false,
135  	.cts_event_workaround	= false,
136  	.always_enabled		= false,
137  	.fixed_options		= false,
138  	.get_fifosize		= get_fifosize_arm,
139  };
140  
141  static const struct vendor_data vendor_sbsa = {
142  	.reg_offset		= pl011_std_offsets,
143  	.fr_busy		= UART01x_FR_BUSY,
144  	.fr_dsr			= UART01x_FR_DSR,
145  	.fr_cts			= UART01x_FR_CTS,
146  	.fr_ri			= UART011_FR_RI,
147  	.access_32b		= true,
148  	.oversampling		= false,
149  	.dma_threshold		= false,
150  	.cts_event_workaround	= false,
151  	.always_enabled		= true,
152  	.fixed_options		= true,
153  };
154  
155  #ifdef CONFIG_ACPI_SPCR_TABLE
156  static const struct vendor_data vendor_qdt_qdf2400_e44 = {
157  	.reg_offset		= pl011_std_offsets,
158  	.fr_busy		= UART011_FR_TXFE,
159  	.fr_dsr			= UART01x_FR_DSR,
160  	.fr_cts			= UART01x_FR_CTS,
161  	.fr_ri			= UART011_FR_RI,
162  	.inv_fr			= UART011_FR_TXFE,
163  	.access_32b		= true,
164  	.oversampling		= false,
165  	.dma_threshold		= false,
166  	.cts_event_workaround	= false,
167  	.always_enabled		= true,
168  	.fixed_options		= true,
169  };
170  #endif
171  
172  static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
173  	[REG_DR] = UART01x_DR,
174  	[REG_ST_DMAWM] = ST_UART011_DMAWM,
175  	[REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
176  	[REG_FR] = UART01x_FR,
177  	[REG_LCRH_RX] = ST_UART011_LCRH_RX,
178  	[REG_LCRH_TX] = ST_UART011_LCRH_TX,
179  	[REG_IBRD] = UART011_IBRD,
180  	[REG_FBRD] = UART011_FBRD,
181  	[REG_CR] = UART011_CR,
182  	[REG_IFLS] = UART011_IFLS,
183  	[REG_IMSC] = UART011_IMSC,
184  	[REG_RIS] = UART011_RIS,
185  	[REG_MIS] = UART011_MIS,
186  	[REG_ICR] = UART011_ICR,
187  	[REG_DMACR] = UART011_DMACR,
188  	[REG_ST_XFCR] = ST_UART011_XFCR,
189  	[REG_ST_XON1] = ST_UART011_XON1,
190  	[REG_ST_XON2] = ST_UART011_XON2,
191  	[REG_ST_XOFF1] = ST_UART011_XOFF1,
192  	[REG_ST_XOFF2] = ST_UART011_XOFF2,
193  	[REG_ST_ITCR] = ST_UART011_ITCR,
194  	[REG_ST_ITIP] = ST_UART011_ITIP,
195  	[REG_ST_ABCR] = ST_UART011_ABCR,
196  	[REG_ST_ABIMSC] = ST_UART011_ABIMSC,
197  };
198  
get_fifosize_st(struct amba_device * dev)199  static unsigned int get_fifosize_st(struct amba_device *dev)
200  {
201  	return 64;
202  }
203  
204  static struct vendor_data vendor_st = {
205  	.reg_offset		= pl011_st_offsets,
206  	.ifls			= UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
207  	.fr_busy		= UART01x_FR_BUSY,
208  	.fr_dsr			= UART01x_FR_DSR,
209  	.fr_cts			= UART01x_FR_CTS,
210  	.fr_ri			= UART011_FR_RI,
211  	.oversampling		= true,
212  	.dma_threshold		= true,
213  	.cts_event_workaround	= true,
214  	.always_enabled		= false,
215  	.fixed_options		= false,
216  	.get_fifosize		= get_fifosize_st,
217  };
218  
219  /* Deals with DMA transactions */
220  
221  struct pl011_dmabuf {
222  	dma_addr_t		dma;
223  	size_t			len;
224  	char			*buf;
225  };
226  
227  struct pl011_dmarx_data {
228  	struct dma_chan		*chan;
229  	struct completion	complete;
230  	bool			use_buf_b;
231  	struct pl011_dmabuf	dbuf_a;
232  	struct pl011_dmabuf	dbuf_b;
233  	dma_cookie_t		cookie;
234  	bool			running;
235  	struct timer_list	timer;
236  	unsigned int last_residue;
237  	unsigned long last_jiffies;
238  	bool auto_poll_rate;
239  	unsigned int poll_rate;
240  	unsigned int poll_timeout;
241  };
242  
243  struct pl011_dmatx_data {
244  	struct dma_chan		*chan;
245  	dma_addr_t		dma;
246  	size_t			len;
247  	char			*buf;
248  	bool			queued;
249  };
250  
251  /*
252   * We wrap our port structure around the generic uart_port.
253   */
254  struct uart_amba_port {
255  	struct uart_port	port;
256  	const u16		*reg_offset;
257  	struct clk		*clk;
258  	const struct vendor_data *vendor;
259  	unsigned int		dmacr;		/* dma control reg */
260  	unsigned int		im;		/* interrupt mask */
261  	unsigned int		old_status;
262  	unsigned int		fifosize;	/* vendor-specific */
263  	unsigned int		fixed_baud;	/* vendor-set fixed baud rate */
264  	char			type[12];
265  	bool			rs485_tx_started;
266  	unsigned int		rs485_tx_drain_interval; /* usecs */
267  #ifdef CONFIG_DMA_ENGINE
268  	/* DMA stuff */
269  	bool			using_tx_dma;
270  	bool			using_rx_dma;
271  	struct pl011_dmarx_data dmarx;
272  	struct pl011_dmatx_data	dmatx;
273  	bool			dma_probed;
274  #endif
275  };
276  
277  static unsigned int pl011_tx_empty(struct uart_port *port);
278  
pl011_reg_to_offset(const struct uart_amba_port * uap,unsigned int reg)279  static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
280  	unsigned int reg)
281  {
282  	return uap->reg_offset[reg];
283  }
284  
pl011_read(const struct uart_amba_port * uap,unsigned int reg)285  static unsigned int pl011_read(const struct uart_amba_port *uap,
286  	unsigned int reg)
287  {
288  	void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
289  
290  	return (uap->port.iotype == UPIO_MEM32) ?
291  		readl_relaxed(addr) : readw_relaxed(addr);
292  }
293  
pl011_write(unsigned int val,const struct uart_amba_port * uap,unsigned int reg)294  static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
295  	unsigned int reg)
296  {
297  	void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
298  
299  	if (uap->port.iotype == UPIO_MEM32)
300  		writel_relaxed(val, addr);
301  	else
302  		writew_relaxed(val, addr);
303  }
304  
305  /*
306   * Reads up to 256 characters from the FIFO or until it's empty and
307   * inserts them into the TTY layer. Returns the number of characters
308   * read from the FIFO.
309   */
pl011_fifo_to_tty(struct uart_amba_port * uap)310  static int pl011_fifo_to_tty(struct uart_amba_port *uap)
311  {
312  	unsigned int ch, fifotaken;
313  	int sysrq;
314  	u16 status;
315  	u8 flag;
316  
317  	for (fifotaken = 0; fifotaken != 256; fifotaken++) {
318  		status = pl011_read(uap, REG_FR);
319  		if (status & UART01x_FR_RXFE)
320  			break;
321  
322  		/* Take chars from the FIFO and update status */
323  		ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
324  		flag = TTY_NORMAL;
325  		uap->port.icount.rx++;
326  
327  		if (unlikely(ch & UART_DR_ERROR)) {
328  			if (ch & UART011_DR_BE) {
329  				ch &= ~(UART011_DR_FE | UART011_DR_PE);
330  				uap->port.icount.brk++;
331  				if (uart_handle_break(&uap->port))
332  					continue;
333  			} else if (ch & UART011_DR_PE)
334  				uap->port.icount.parity++;
335  			else if (ch & UART011_DR_FE)
336  				uap->port.icount.frame++;
337  			if (ch & UART011_DR_OE)
338  				uap->port.icount.overrun++;
339  
340  			ch &= uap->port.read_status_mask;
341  
342  			if (ch & UART011_DR_BE)
343  				flag = TTY_BREAK;
344  			else if (ch & UART011_DR_PE)
345  				flag = TTY_PARITY;
346  			else if (ch & UART011_DR_FE)
347  				flag = TTY_FRAME;
348  		}
349  
350  		uart_port_unlock(&uap->port);
351  		sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
352  		uart_port_lock(&uap->port);
353  
354  		if (!sysrq)
355  			uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
356  	}
357  
358  	return fifotaken;
359  }
360  
361  
362  /*
363   * All the DMA operation mode stuff goes inside this ifdef.
364   * This assumes that you have a generic DMA device interface,
365   * no custom DMA interfaces are supported.
366   */
367  #ifdef CONFIG_DMA_ENGINE
368  
369  #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
370  
pl011_dmabuf_init(struct dma_chan * chan,struct pl011_dmabuf * db,enum dma_data_direction dir)371  static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
372  	enum dma_data_direction dir)
373  {
374  	db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
375  				     &db->dma, GFP_KERNEL);
376  	if (!db->buf)
377  		return -ENOMEM;
378  	db->len = PL011_DMA_BUFFER_SIZE;
379  
380  	return 0;
381  }
382  
pl011_dmabuf_free(struct dma_chan * chan,struct pl011_dmabuf * db,enum dma_data_direction dir)383  static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
384  	enum dma_data_direction dir)
385  {
386  	if (db->buf) {
387  		dma_free_coherent(chan->device->dev,
388  				  PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
389  	}
390  }
391  
pl011_dma_probe(struct uart_amba_port * uap)392  static void pl011_dma_probe(struct uart_amba_port *uap)
393  {
394  	/* DMA is the sole user of the platform data right now */
395  	struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
396  	struct device *dev = uap->port.dev;
397  	struct dma_slave_config tx_conf = {
398  		.dst_addr = uap->port.mapbase +
399  				 pl011_reg_to_offset(uap, REG_DR),
400  		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
401  		.direction = DMA_MEM_TO_DEV,
402  		.dst_maxburst = uap->fifosize >> 1,
403  		.device_fc = false,
404  	};
405  	struct dma_chan *chan;
406  	dma_cap_mask_t mask;
407  
408  	uap->dma_probed = true;
409  	chan = dma_request_chan(dev, "tx");
410  	if (IS_ERR(chan)) {
411  		if (PTR_ERR(chan) == -EPROBE_DEFER) {
412  			uap->dma_probed = false;
413  			return;
414  		}
415  
416  		/* We need platform data */
417  		if (!plat || !plat->dma_filter) {
418  			dev_info(uap->port.dev, "no DMA platform data\n");
419  			return;
420  		}
421  
422  		/* Try to acquire a generic DMA engine slave TX channel */
423  		dma_cap_zero(mask);
424  		dma_cap_set(DMA_SLAVE, mask);
425  
426  		chan = dma_request_channel(mask, plat->dma_filter,
427  						plat->dma_tx_param);
428  		if (!chan) {
429  			dev_err(uap->port.dev, "no TX DMA channel!\n");
430  			return;
431  		}
432  	}
433  
434  	dmaengine_slave_config(chan, &tx_conf);
435  	uap->dmatx.chan = chan;
436  
437  	dev_info(uap->port.dev, "DMA channel TX %s\n",
438  		 dma_chan_name(uap->dmatx.chan));
439  
440  	/* Optionally make use of an RX channel as well */
441  	chan = dma_request_slave_channel(dev, "rx");
442  
443  	if (!chan && plat && plat->dma_rx_param) {
444  		chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
445  
446  		if (!chan) {
447  			dev_err(uap->port.dev, "no RX DMA channel!\n");
448  			return;
449  		}
450  	}
451  
452  	if (chan) {
453  		struct dma_slave_config rx_conf = {
454  			.src_addr = uap->port.mapbase +
455  				pl011_reg_to_offset(uap, REG_DR),
456  			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
457  			.direction = DMA_DEV_TO_MEM,
458  			.src_maxburst = uap->fifosize >> 2,
459  			.device_fc = false,
460  		};
461  		struct dma_slave_caps caps;
462  
463  		/*
464  		 * Some DMA controllers provide information on their capabilities.
465  		 * If the controller does, check for suitable residue processing
466  		 * otherwise assime all is well.
467  		 */
468  		if (0 == dma_get_slave_caps(chan, &caps)) {
469  			if (caps.residue_granularity ==
470  					DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
471  				dma_release_channel(chan);
472  				dev_info(uap->port.dev,
473  					"RX DMA disabled - no residue processing\n");
474  				return;
475  			}
476  		}
477  		dmaengine_slave_config(chan, &rx_conf);
478  		uap->dmarx.chan = chan;
479  
480  		uap->dmarx.auto_poll_rate = false;
481  		if (plat && plat->dma_rx_poll_enable) {
482  			/* Set poll rate if specified. */
483  			if (plat->dma_rx_poll_rate) {
484  				uap->dmarx.auto_poll_rate = false;
485  				uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
486  			} else {
487  				/*
488  				 * 100 ms defaults to poll rate if not
489  				 * specified. This will be adjusted with
490  				 * the baud rate at set_termios.
491  				 */
492  				uap->dmarx.auto_poll_rate = true;
493  				uap->dmarx.poll_rate =  100;
494  			}
495  			/* 3 secs defaults poll_timeout if not specified. */
496  			if (plat->dma_rx_poll_timeout)
497  				uap->dmarx.poll_timeout =
498  					plat->dma_rx_poll_timeout;
499  			else
500  				uap->dmarx.poll_timeout = 3000;
501  		} else if (!plat && dev->of_node) {
502  			uap->dmarx.auto_poll_rate = of_property_read_bool(
503  						dev->of_node, "auto-poll");
504  			if (uap->dmarx.auto_poll_rate) {
505  				u32 x;
506  
507  				if (0 == of_property_read_u32(dev->of_node,
508  						"poll-rate-ms", &x))
509  					uap->dmarx.poll_rate = x;
510  				else
511  					uap->dmarx.poll_rate = 100;
512  				if (0 == of_property_read_u32(dev->of_node,
513  						"poll-timeout-ms", &x))
514  					uap->dmarx.poll_timeout = x;
515  				else
516  					uap->dmarx.poll_timeout = 3000;
517  			}
518  		}
519  		dev_info(uap->port.dev, "DMA channel RX %s\n",
520  			 dma_chan_name(uap->dmarx.chan));
521  	}
522  }
523  
pl011_dma_remove(struct uart_amba_port * uap)524  static void pl011_dma_remove(struct uart_amba_port *uap)
525  {
526  	if (uap->dmatx.chan)
527  		dma_release_channel(uap->dmatx.chan);
528  	if (uap->dmarx.chan)
529  		dma_release_channel(uap->dmarx.chan);
530  }
531  
532  /* Forward declare these for the refill routine */
533  static int pl011_dma_tx_refill(struct uart_amba_port *uap);
534  static void pl011_start_tx_pio(struct uart_amba_port *uap);
535  
536  /*
537   * The current DMA TX buffer has been sent.
538   * Try to queue up another DMA buffer.
539   */
pl011_dma_tx_callback(void * data)540  static void pl011_dma_tx_callback(void *data)
541  {
542  	struct uart_amba_port *uap = data;
543  	struct pl011_dmatx_data *dmatx = &uap->dmatx;
544  	unsigned long flags;
545  	u16 dmacr;
546  
547  	uart_port_lock_irqsave(&uap->port, &flags);
548  	if (uap->dmatx.queued)
549  		dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
550  				dmatx->len, DMA_TO_DEVICE);
551  
552  	dmacr = uap->dmacr;
553  	uap->dmacr = dmacr & ~UART011_TXDMAE;
554  	pl011_write(uap->dmacr, uap, REG_DMACR);
555  
556  	/*
557  	 * If TX DMA was disabled, it means that we've stopped the DMA for
558  	 * some reason (eg, XOFF received, or we want to send an X-char.)
559  	 *
560  	 * Note: we need to be careful here of a potential race between DMA
561  	 * and the rest of the driver - if the driver disables TX DMA while
562  	 * a TX buffer completing, we must update the tx queued status to
563  	 * get further refills (hence we check dmacr).
564  	 */
565  	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
566  	    uart_circ_empty(&uap->port.state->xmit)) {
567  		uap->dmatx.queued = false;
568  		uart_port_unlock_irqrestore(&uap->port, flags);
569  		return;
570  	}
571  
572  	if (pl011_dma_tx_refill(uap) <= 0)
573  		/*
574  		 * We didn't queue a DMA buffer for some reason, but we
575  		 * have data pending to be sent.  Re-enable the TX IRQ.
576  		 */
577  		pl011_start_tx_pio(uap);
578  
579  	uart_port_unlock_irqrestore(&uap->port, flags);
580  }
581  
582  /*
583   * Try to refill the TX DMA buffer.
584   * Locking: called with port lock held and IRQs disabled.
585   * Returns:
586   *   1 if we queued up a TX DMA buffer.
587   *   0 if we didn't want to handle this by DMA
588   *  <0 on error
589   */
pl011_dma_tx_refill(struct uart_amba_port * uap)590  static int pl011_dma_tx_refill(struct uart_amba_port *uap)
591  {
592  	struct pl011_dmatx_data *dmatx = &uap->dmatx;
593  	struct dma_chan *chan = dmatx->chan;
594  	struct dma_device *dma_dev = chan->device;
595  	struct dma_async_tx_descriptor *desc;
596  	struct circ_buf *xmit = &uap->port.state->xmit;
597  	unsigned int count;
598  
599  	/*
600  	 * Try to avoid the overhead involved in using DMA if the
601  	 * transaction fits in the first half of the FIFO, by using
602  	 * the standard interrupt handling.  This ensures that we
603  	 * issue a uart_write_wakeup() at the appropriate time.
604  	 */
605  	count = uart_circ_chars_pending(xmit);
606  	if (count < (uap->fifosize >> 1)) {
607  		uap->dmatx.queued = false;
608  		return 0;
609  	}
610  
611  	/*
612  	 * Bodge: don't send the last character by DMA, as this
613  	 * will prevent XON from notifying us to restart DMA.
614  	 */
615  	count -= 1;
616  
617  	/* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
618  	if (count > PL011_DMA_BUFFER_SIZE)
619  		count = PL011_DMA_BUFFER_SIZE;
620  
621  	if (xmit->tail < xmit->head)
622  		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
623  	else {
624  		size_t first = UART_XMIT_SIZE - xmit->tail;
625  		size_t second;
626  
627  		if (first > count)
628  			first = count;
629  		second = count - first;
630  
631  		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
632  		if (second)
633  			memcpy(&dmatx->buf[first], &xmit->buf[0], second);
634  	}
635  
636  	dmatx->len = count;
637  	dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
638  				    DMA_TO_DEVICE);
639  	if (dmatx->dma == DMA_MAPPING_ERROR) {
640  		uap->dmatx.queued = false;
641  		dev_dbg(uap->port.dev, "unable to map TX DMA\n");
642  		return -EBUSY;
643  	}
644  
645  	desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
646  					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
647  	if (!desc) {
648  		dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
649  		uap->dmatx.queued = false;
650  		/*
651  		 * If DMA cannot be used right now, we complete this
652  		 * transaction via IRQ and let the TTY layer retry.
653  		 */
654  		dev_dbg(uap->port.dev, "TX DMA busy\n");
655  		return -EBUSY;
656  	}
657  
658  	/* Some data to go along to the callback */
659  	desc->callback = pl011_dma_tx_callback;
660  	desc->callback_param = uap;
661  
662  	/* All errors should happen at prepare time */
663  	dmaengine_submit(desc);
664  
665  	/* Fire the DMA transaction */
666  	dma_dev->device_issue_pending(chan);
667  
668  	uap->dmacr |= UART011_TXDMAE;
669  	pl011_write(uap->dmacr, uap, REG_DMACR);
670  	uap->dmatx.queued = true;
671  
672  	/*
673  	 * Now we know that DMA will fire, so advance the ring buffer
674  	 * with the stuff we just dispatched.
675  	 */
676  	uart_xmit_advance(&uap->port, count);
677  
678  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
679  		uart_write_wakeup(&uap->port);
680  
681  	return 1;
682  }
683  
684  /*
685   * We received a transmit interrupt without a pending X-char but with
686   * pending characters.
687   * Locking: called with port lock held and IRQs disabled.
688   * Returns:
689   *   false if we want to use PIO to transmit
690   *   true if we queued a DMA buffer
691   */
pl011_dma_tx_irq(struct uart_amba_port * uap)692  static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
693  {
694  	if (!uap->using_tx_dma)
695  		return false;
696  
697  	/*
698  	 * If we already have a TX buffer queued, but received a
699  	 * TX interrupt, it will be because we've just sent an X-char.
700  	 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
701  	 */
702  	if (uap->dmatx.queued) {
703  		uap->dmacr |= UART011_TXDMAE;
704  		pl011_write(uap->dmacr, uap, REG_DMACR);
705  		uap->im &= ~UART011_TXIM;
706  		pl011_write(uap->im, uap, REG_IMSC);
707  		return true;
708  	}
709  
710  	/*
711  	 * We don't have a TX buffer queued, so try to queue one.
712  	 * If we successfully queued a buffer, mask the TX IRQ.
713  	 */
714  	if (pl011_dma_tx_refill(uap) > 0) {
715  		uap->im &= ~UART011_TXIM;
716  		pl011_write(uap->im, uap, REG_IMSC);
717  		return true;
718  	}
719  	return false;
720  }
721  
722  /*
723   * Stop the DMA transmit (eg, due to received XOFF).
724   * Locking: called with port lock held and IRQs disabled.
725   */
pl011_dma_tx_stop(struct uart_amba_port * uap)726  static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
727  {
728  	if (uap->dmatx.queued) {
729  		uap->dmacr &= ~UART011_TXDMAE;
730  		pl011_write(uap->dmacr, uap, REG_DMACR);
731  	}
732  }
733  
734  /*
735   * Try to start a DMA transmit, or in the case of an XON/OFF
736   * character queued for send, try to get that character out ASAP.
737   * Locking: called with port lock held and IRQs disabled.
738   * Returns:
739   *   false if we want the TX IRQ to be enabled
740   *   true if we have a buffer queued
741   */
pl011_dma_tx_start(struct uart_amba_port * uap)742  static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
743  {
744  	u16 dmacr;
745  
746  	if (!uap->using_tx_dma)
747  		return false;
748  
749  	if (!uap->port.x_char) {
750  		/* no X-char, try to push chars out in DMA mode */
751  		bool ret = true;
752  
753  		if (!uap->dmatx.queued) {
754  			if (pl011_dma_tx_refill(uap) > 0) {
755  				uap->im &= ~UART011_TXIM;
756  				pl011_write(uap->im, uap, REG_IMSC);
757  			} else
758  				ret = false;
759  		} else if (!(uap->dmacr & UART011_TXDMAE)) {
760  			uap->dmacr |= UART011_TXDMAE;
761  			pl011_write(uap->dmacr, uap, REG_DMACR);
762  		}
763  		return ret;
764  	}
765  
766  	/*
767  	 * We have an X-char to send.  Disable DMA to prevent it loading
768  	 * the TX fifo, and then see if we can stuff it into the FIFO.
769  	 */
770  	dmacr = uap->dmacr;
771  	uap->dmacr &= ~UART011_TXDMAE;
772  	pl011_write(uap->dmacr, uap, REG_DMACR);
773  
774  	if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
775  		/*
776  		 * No space in the FIFO, so enable the transmit interrupt
777  		 * so we know when there is space.  Note that once we've
778  		 * loaded the character, we should just re-enable DMA.
779  		 */
780  		return false;
781  	}
782  
783  	pl011_write(uap->port.x_char, uap, REG_DR);
784  	uap->port.icount.tx++;
785  	uap->port.x_char = 0;
786  
787  	/* Success - restore the DMA state */
788  	uap->dmacr = dmacr;
789  	pl011_write(dmacr, uap, REG_DMACR);
790  
791  	return true;
792  }
793  
794  /*
795   * Flush the transmit buffer.
796   * Locking: called with port lock held and IRQs disabled.
797   */
pl011_dma_flush_buffer(struct uart_port * port)798  static void pl011_dma_flush_buffer(struct uart_port *port)
799  __releases(&uap->port.lock)
800  __acquires(&uap->port.lock)
801  {
802  	struct uart_amba_port *uap =
803  	    container_of(port, struct uart_amba_port, port);
804  
805  	if (!uap->using_tx_dma)
806  		return;
807  
808  	dmaengine_terminate_async(uap->dmatx.chan);
809  
810  	if (uap->dmatx.queued) {
811  		dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
812  				 uap->dmatx.len, DMA_TO_DEVICE);
813  		uap->dmatx.queued = false;
814  		uap->dmacr &= ~UART011_TXDMAE;
815  		pl011_write(uap->dmacr, uap, REG_DMACR);
816  	}
817  }
818  
819  static void pl011_dma_rx_callback(void *data);
820  
pl011_dma_rx_trigger_dma(struct uart_amba_port * uap)821  static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
822  {
823  	struct dma_chan *rxchan = uap->dmarx.chan;
824  	struct pl011_dmarx_data *dmarx = &uap->dmarx;
825  	struct dma_async_tx_descriptor *desc;
826  	struct pl011_dmabuf *dbuf;
827  
828  	if (!rxchan)
829  		return -EIO;
830  
831  	/* Start the RX DMA job */
832  	dbuf = uap->dmarx.use_buf_b ?
833  		&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
834  	desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
835  					DMA_DEV_TO_MEM,
836  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
837  	/*
838  	 * If the DMA engine is busy and cannot prepare a
839  	 * channel, no big deal, the driver will fall back
840  	 * to interrupt mode as a result of this error code.
841  	 */
842  	if (!desc) {
843  		uap->dmarx.running = false;
844  		dmaengine_terminate_all(rxchan);
845  		return -EBUSY;
846  	}
847  
848  	/* Some data to go along to the callback */
849  	desc->callback = pl011_dma_rx_callback;
850  	desc->callback_param = uap;
851  	dmarx->cookie = dmaengine_submit(desc);
852  	dma_async_issue_pending(rxchan);
853  
854  	uap->dmacr |= UART011_RXDMAE;
855  	pl011_write(uap->dmacr, uap, REG_DMACR);
856  	uap->dmarx.running = true;
857  
858  	uap->im &= ~UART011_RXIM;
859  	pl011_write(uap->im, uap, REG_IMSC);
860  
861  	return 0;
862  }
863  
864  /*
865   * This is called when either the DMA job is complete, or
866   * the FIFO timeout interrupt occurred. This must be called
867   * with the port spinlock uap->port.lock held.
868   */
pl011_dma_rx_chars(struct uart_amba_port * uap,u32 pending,bool use_buf_b,bool readfifo)869  static void pl011_dma_rx_chars(struct uart_amba_port *uap,
870  			       u32 pending, bool use_buf_b,
871  			       bool readfifo)
872  {
873  	struct tty_port *port = &uap->port.state->port;
874  	struct pl011_dmabuf *dbuf = use_buf_b ?
875  		&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
876  	int dma_count = 0;
877  	u32 fifotaken = 0; /* only used for vdbg() */
878  
879  	struct pl011_dmarx_data *dmarx = &uap->dmarx;
880  	int dmataken = 0;
881  
882  	if (uap->dmarx.poll_rate) {
883  		/* The data can be taken by polling */
884  		dmataken = dbuf->len - dmarx->last_residue;
885  		/* Recalculate the pending size */
886  		if (pending >= dmataken)
887  			pending -= dmataken;
888  	}
889  
890  	/* Pick the remain data from the DMA */
891  	if (pending) {
892  
893  		/*
894  		 * First take all chars in the DMA pipe, then look in the FIFO.
895  		 * Note that tty_insert_flip_buf() tries to take as many chars
896  		 * as it can.
897  		 */
898  		dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
899  				pending);
900  
901  		uap->port.icount.rx += dma_count;
902  		if (dma_count < pending)
903  			dev_warn(uap->port.dev,
904  				 "couldn't insert all characters (TTY is full?)\n");
905  	}
906  
907  	/* Reset the last_residue for Rx DMA poll */
908  	if (uap->dmarx.poll_rate)
909  		dmarx->last_residue = dbuf->len;
910  
911  	/*
912  	 * Only continue with trying to read the FIFO if all DMA chars have
913  	 * been taken first.
914  	 */
915  	if (dma_count == pending && readfifo) {
916  		/* Clear any error flags */
917  		pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
918  			    UART011_FEIS, uap, REG_ICR);
919  
920  		/*
921  		 * If we read all the DMA'd characters, and we had an
922  		 * incomplete buffer, that could be due to an rx error, or
923  		 * maybe we just timed out. Read any pending chars and check
924  		 * the error status.
925  		 *
926  		 * Error conditions will only occur in the FIFO, these will
927  		 * trigger an immediate interrupt and stop the DMA job, so we
928  		 * will always find the error in the FIFO, never in the DMA
929  		 * buffer.
930  		 */
931  		fifotaken = pl011_fifo_to_tty(uap);
932  	}
933  
934  	dev_vdbg(uap->port.dev,
935  		 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
936  		 dma_count, fifotaken);
937  	tty_flip_buffer_push(port);
938  }
939  
pl011_dma_rx_irq(struct uart_amba_port * uap)940  static void pl011_dma_rx_irq(struct uart_amba_port *uap)
941  {
942  	struct pl011_dmarx_data *dmarx = &uap->dmarx;
943  	struct dma_chan *rxchan = dmarx->chan;
944  	struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
945  		&dmarx->dbuf_b : &dmarx->dbuf_a;
946  	size_t pending;
947  	struct dma_tx_state state;
948  	enum dma_status dmastat;
949  
950  	/*
951  	 * Pause the transfer so we can trust the current counter,
952  	 * do this before we pause the PL011 block, else we may
953  	 * overflow the FIFO.
954  	 */
955  	if (dmaengine_pause(rxchan))
956  		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
957  	dmastat = rxchan->device->device_tx_status(rxchan,
958  						   dmarx->cookie, &state);
959  	if (dmastat != DMA_PAUSED)
960  		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
961  
962  	/* Disable RX DMA - incoming data will wait in the FIFO */
963  	uap->dmacr &= ~UART011_RXDMAE;
964  	pl011_write(uap->dmacr, uap, REG_DMACR);
965  	uap->dmarx.running = false;
966  
967  	pending = dbuf->len - state.residue;
968  	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
969  	/* Then we terminate the transfer - we now know our residue */
970  	dmaengine_terminate_all(rxchan);
971  
972  	/*
973  	 * This will take the chars we have so far and insert
974  	 * into the framework.
975  	 */
976  	pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
977  
978  	/* Switch buffer & re-trigger DMA job */
979  	dmarx->use_buf_b = !dmarx->use_buf_b;
980  	if (pl011_dma_rx_trigger_dma(uap)) {
981  		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
982  			"fall back to interrupt mode\n");
983  		uap->im |= UART011_RXIM;
984  		pl011_write(uap->im, uap, REG_IMSC);
985  	}
986  }
987  
pl011_dma_rx_callback(void * data)988  static void pl011_dma_rx_callback(void *data)
989  {
990  	struct uart_amba_port *uap = data;
991  	struct pl011_dmarx_data *dmarx = &uap->dmarx;
992  	struct dma_chan *rxchan = dmarx->chan;
993  	bool lastbuf = dmarx->use_buf_b;
994  	struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
995  		&dmarx->dbuf_b : &dmarx->dbuf_a;
996  	size_t pending;
997  	struct dma_tx_state state;
998  	int ret;
999  
1000  	/*
1001  	 * This completion interrupt occurs typically when the
1002  	 * RX buffer is totally stuffed but no timeout has yet
1003  	 * occurred. When that happens, we just want the RX
1004  	 * routine to flush out the secondary DMA buffer while
1005  	 * we immediately trigger the next DMA job.
1006  	 */
1007  	uart_port_lock_irq(&uap->port);
1008  	/*
1009  	 * Rx data can be taken by the UART interrupts during
1010  	 * the DMA irq handler. So we check the residue here.
1011  	 */
1012  	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1013  	pending = dbuf->len - state.residue;
1014  	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1015  	/* Then we terminate the transfer - we now know our residue */
1016  	dmaengine_terminate_all(rxchan);
1017  
1018  	uap->dmarx.running = false;
1019  	dmarx->use_buf_b = !lastbuf;
1020  	ret = pl011_dma_rx_trigger_dma(uap);
1021  
1022  	pl011_dma_rx_chars(uap, pending, lastbuf, false);
1023  	uart_port_unlock_irq(&uap->port);
1024  	/*
1025  	 * Do this check after we picked the DMA chars so we don't
1026  	 * get some IRQ immediately from RX.
1027  	 */
1028  	if (ret) {
1029  		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1030  			"fall back to interrupt mode\n");
1031  		uap->im |= UART011_RXIM;
1032  		pl011_write(uap->im, uap, REG_IMSC);
1033  	}
1034  }
1035  
1036  /*
1037   * Stop accepting received characters, when we're shutting down or
1038   * suspending this port.
1039   * Locking: called with port lock held and IRQs disabled.
1040   */
pl011_dma_rx_stop(struct uart_amba_port * uap)1041  static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1042  {
1043  	if (!uap->using_rx_dma)
1044  		return;
1045  
1046  	/* FIXME.  Just disable the DMA enable */
1047  	uap->dmacr &= ~UART011_RXDMAE;
1048  	pl011_write(uap->dmacr, uap, REG_DMACR);
1049  }
1050  
1051  /*
1052   * Timer handler for Rx DMA polling.
1053   * Every polling, It checks the residue in the dma buffer and transfer
1054   * data to the tty. Also, last_residue is updated for the next polling.
1055   */
pl011_dma_rx_poll(struct timer_list * t)1056  static void pl011_dma_rx_poll(struct timer_list *t)
1057  {
1058  	struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1059  	struct tty_port *port = &uap->port.state->port;
1060  	struct pl011_dmarx_data *dmarx = &uap->dmarx;
1061  	struct dma_chan *rxchan = uap->dmarx.chan;
1062  	unsigned long flags;
1063  	unsigned int dmataken = 0;
1064  	unsigned int size = 0;
1065  	struct pl011_dmabuf *dbuf;
1066  	int dma_count;
1067  	struct dma_tx_state state;
1068  
1069  	dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
1070  	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1071  	if (likely(state.residue < dmarx->last_residue)) {
1072  		dmataken = dbuf->len - dmarx->last_residue;
1073  		size = dmarx->last_residue - state.residue;
1074  		dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
1075  				size);
1076  		if (dma_count == size)
1077  			dmarx->last_residue =  state.residue;
1078  		dmarx->last_jiffies = jiffies;
1079  	}
1080  	tty_flip_buffer_push(port);
1081  
1082  	/*
1083  	 * If no data is received in poll_timeout, the driver will fall back
1084  	 * to interrupt mode. We will retrigger DMA at the first interrupt.
1085  	 */
1086  	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1087  			> uap->dmarx.poll_timeout) {
1088  
1089  		uart_port_lock_irqsave(&uap->port, &flags);
1090  		pl011_dma_rx_stop(uap);
1091  		uap->im |= UART011_RXIM;
1092  		pl011_write(uap->im, uap, REG_IMSC);
1093  		uart_port_unlock_irqrestore(&uap->port, flags);
1094  
1095  		uap->dmarx.running = false;
1096  		dmaengine_terminate_all(rxchan);
1097  		del_timer(&uap->dmarx.timer);
1098  	} else {
1099  		mod_timer(&uap->dmarx.timer,
1100  			jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1101  	}
1102  }
1103  
pl011_dma_startup(struct uart_amba_port * uap)1104  static void pl011_dma_startup(struct uart_amba_port *uap)
1105  {
1106  	int ret;
1107  
1108  	if (!uap->dma_probed)
1109  		pl011_dma_probe(uap);
1110  
1111  	if (!uap->dmatx.chan)
1112  		return;
1113  
1114  	uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1115  	if (!uap->dmatx.buf) {
1116  		dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1117  		uap->port.fifosize = uap->fifosize;
1118  		return;
1119  	}
1120  
1121  	uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
1122  
1123  	/* The DMA buffer is now the FIFO the TTY subsystem can use */
1124  	uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1125  	uap->using_tx_dma = true;
1126  
1127  	if (!uap->dmarx.chan)
1128  		goto skip_rx;
1129  
1130  	/* Allocate and map DMA RX buffers */
1131  	ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
1132  			       DMA_FROM_DEVICE);
1133  	if (ret) {
1134  		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1135  			"RX buffer A", ret);
1136  		goto skip_rx;
1137  	}
1138  
1139  	ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
1140  			       DMA_FROM_DEVICE);
1141  	if (ret) {
1142  		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1143  			"RX buffer B", ret);
1144  		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
1145  				 DMA_FROM_DEVICE);
1146  		goto skip_rx;
1147  	}
1148  
1149  	uap->using_rx_dma = true;
1150  
1151  skip_rx:
1152  	/* Turn on DMA error (RX/TX will be enabled on demand) */
1153  	uap->dmacr |= UART011_DMAONERR;
1154  	pl011_write(uap->dmacr, uap, REG_DMACR);
1155  
1156  	/*
1157  	 * ST Micro variants has some specific dma burst threshold
1158  	 * compensation. Set this to 16 bytes, so burst will only
1159  	 * be issued above/below 16 bytes.
1160  	 */
1161  	if (uap->vendor->dma_threshold)
1162  		pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1163  			    uap, REG_ST_DMAWM);
1164  
1165  	if (uap->using_rx_dma) {
1166  		if (pl011_dma_rx_trigger_dma(uap))
1167  			dev_dbg(uap->port.dev, "could not trigger initial "
1168  				"RX DMA job, fall back to interrupt mode\n");
1169  		if (uap->dmarx.poll_rate) {
1170  			timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1171  			mod_timer(&uap->dmarx.timer,
1172  				jiffies +
1173  				msecs_to_jiffies(uap->dmarx.poll_rate));
1174  			uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1175  			uap->dmarx.last_jiffies = jiffies;
1176  		}
1177  	}
1178  }
1179  
pl011_dma_shutdown(struct uart_amba_port * uap)1180  static void pl011_dma_shutdown(struct uart_amba_port *uap)
1181  {
1182  	if (!(uap->using_tx_dma || uap->using_rx_dma))
1183  		return;
1184  
1185  	/* Disable RX and TX DMA */
1186  	while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1187  		cpu_relax();
1188  
1189  	uart_port_lock_irq(&uap->port);
1190  	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1191  	pl011_write(uap->dmacr, uap, REG_DMACR);
1192  	uart_port_unlock_irq(&uap->port);
1193  
1194  	if (uap->using_tx_dma) {
1195  		/* In theory, this should already be done by pl011_dma_flush_buffer */
1196  		dmaengine_terminate_all(uap->dmatx.chan);
1197  		if (uap->dmatx.queued) {
1198  			dma_unmap_single(uap->dmatx.chan->device->dev,
1199  					 uap->dmatx.dma, uap->dmatx.len,
1200  					 DMA_TO_DEVICE);
1201  			uap->dmatx.queued = false;
1202  		}
1203  
1204  		kfree(uap->dmatx.buf);
1205  		uap->using_tx_dma = false;
1206  	}
1207  
1208  	if (uap->using_rx_dma) {
1209  		dmaengine_terminate_all(uap->dmarx.chan);
1210  		/* Clean up the RX DMA */
1211  		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
1212  		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
1213  		if (uap->dmarx.poll_rate)
1214  			del_timer_sync(&uap->dmarx.timer);
1215  		uap->using_rx_dma = false;
1216  	}
1217  }
1218  
pl011_dma_rx_available(struct uart_amba_port * uap)1219  static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1220  {
1221  	return uap->using_rx_dma;
1222  }
1223  
pl011_dma_rx_running(struct uart_amba_port * uap)1224  static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1225  {
1226  	return uap->using_rx_dma && uap->dmarx.running;
1227  }
1228  
1229  #else
1230  /* Blank functions if the DMA engine is not available */
pl011_dma_remove(struct uart_amba_port * uap)1231  static inline void pl011_dma_remove(struct uart_amba_port *uap)
1232  {
1233  }
1234  
pl011_dma_startup(struct uart_amba_port * uap)1235  static inline void pl011_dma_startup(struct uart_amba_port *uap)
1236  {
1237  }
1238  
pl011_dma_shutdown(struct uart_amba_port * uap)1239  static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1240  {
1241  }
1242  
pl011_dma_tx_irq(struct uart_amba_port * uap)1243  static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1244  {
1245  	return false;
1246  }
1247  
pl011_dma_tx_stop(struct uart_amba_port * uap)1248  static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1249  {
1250  }
1251  
pl011_dma_tx_start(struct uart_amba_port * uap)1252  static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1253  {
1254  	return false;
1255  }
1256  
pl011_dma_rx_irq(struct uart_amba_port * uap)1257  static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1258  {
1259  }
1260  
pl011_dma_rx_stop(struct uart_amba_port * uap)1261  static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1262  {
1263  }
1264  
pl011_dma_rx_trigger_dma(struct uart_amba_port * uap)1265  static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1266  {
1267  	return -EIO;
1268  }
1269  
pl011_dma_rx_available(struct uart_amba_port * uap)1270  static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1271  {
1272  	return false;
1273  }
1274  
pl011_dma_rx_running(struct uart_amba_port * uap)1275  static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1276  {
1277  	return false;
1278  }
1279  
1280  #define pl011_dma_flush_buffer	NULL
1281  #endif
1282  
pl011_rs485_tx_stop(struct uart_amba_port * uap)1283  static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
1284  {
1285  	/*
1286  	 * To be on the safe side only time out after twice as many iterations
1287  	 * as fifo size.
1288  	 */
1289  	const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
1290  	struct uart_port *port = &uap->port;
1291  	int i = 0;
1292  	u32 cr;
1293  
1294  	/* Wait until hardware tx queue is empty */
1295  	while (!pl011_tx_empty(port)) {
1296  		if (i > MAX_TX_DRAIN_ITERS) {
1297  			dev_warn(port->dev,
1298  				 "timeout while draining hardware tx queue\n");
1299  			break;
1300  		}
1301  
1302  		udelay(uap->rs485_tx_drain_interval);
1303  		i++;
1304  	}
1305  
1306  	if (port->rs485.delay_rts_after_send)
1307  		mdelay(port->rs485.delay_rts_after_send);
1308  
1309  	cr = pl011_read(uap, REG_CR);
1310  
1311  	if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1312  		cr &= ~UART011_CR_RTS;
1313  	else
1314  		cr |= UART011_CR_RTS;
1315  
1316  	/* Disable the transmitter and reenable the transceiver */
1317  	cr &= ~UART011_CR_TXE;
1318  	cr |= UART011_CR_RXE;
1319  	pl011_write(cr, uap, REG_CR);
1320  
1321  	uap->rs485_tx_started = false;
1322  }
1323  
pl011_stop_tx(struct uart_port * port)1324  static void pl011_stop_tx(struct uart_port *port)
1325  {
1326  	struct uart_amba_port *uap =
1327  	    container_of(port, struct uart_amba_port, port);
1328  
1329  	uap->im &= ~UART011_TXIM;
1330  	pl011_write(uap->im, uap, REG_IMSC);
1331  	pl011_dma_tx_stop(uap);
1332  
1333  	if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1334  		pl011_rs485_tx_stop(uap);
1335  }
1336  
1337  static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1338  
1339  /* Start TX with programmed I/O only (no DMA) */
pl011_start_tx_pio(struct uart_amba_port * uap)1340  static void pl011_start_tx_pio(struct uart_amba_port *uap)
1341  {
1342  	if (pl011_tx_chars(uap, false)) {
1343  		uap->im |= UART011_TXIM;
1344  		pl011_write(uap->im, uap, REG_IMSC);
1345  	}
1346  }
1347  
pl011_rs485_tx_start(struct uart_amba_port * uap)1348  static void pl011_rs485_tx_start(struct uart_amba_port *uap)
1349  {
1350  	struct uart_port *port = &uap->port;
1351  	u32 cr;
1352  
1353  	/* Enable transmitter */
1354  	cr = pl011_read(uap, REG_CR);
1355  	cr |= UART011_CR_TXE;
1356  
1357  	/* Disable receiver if half-duplex */
1358  	if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
1359  		cr &= ~UART011_CR_RXE;
1360  
1361  	if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
1362  		cr &= ~UART011_CR_RTS;
1363  	else
1364  		cr |= UART011_CR_RTS;
1365  
1366  	pl011_write(cr, uap, REG_CR);
1367  
1368  	if (port->rs485.delay_rts_before_send)
1369  		mdelay(port->rs485.delay_rts_before_send);
1370  
1371  	uap->rs485_tx_started = true;
1372  }
1373  
pl011_start_tx(struct uart_port * port)1374  static void pl011_start_tx(struct uart_port *port)
1375  {
1376  	struct uart_amba_port *uap =
1377  	    container_of(port, struct uart_amba_port, port);
1378  
1379  	if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
1380  	    !uap->rs485_tx_started)
1381  		pl011_rs485_tx_start(uap);
1382  
1383  	if (!pl011_dma_tx_start(uap))
1384  		pl011_start_tx_pio(uap);
1385  }
1386  
pl011_stop_rx(struct uart_port * port)1387  static void pl011_stop_rx(struct uart_port *port)
1388  {
1389  	struct uart_amba_port *uap =
1390  	    container_of(port, struct uart_amba_port, port);
1391  
1392  	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1393  		     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1394  	pl011_write(uap->im, uap, REG_IMSC);
1395  
1396  	pl011_dma_rx_stop(uap);
1397  }
1398  
pl011_throttle_rx(struct uart_port * port)1399  static void pl011_throttle_rx(struct uart_port *port)
1400  {
1401  	unsigned long flags;
1402  
1403  	uart_port_lock_irqsave(port, &flags);
1404  	pl011_stop_rx(port);
1405  	uart_port_unlock_irqrestore(port, flags);
1406  }
1407  
pl011_enable_ms(struct uart_port * port)1408  static void pl011_enable_ms(struct uart_port *port)
1409  {
1410  	struct uart_amba_port *uap =
1411  	    container_of(port, struct uart_amba_port, port);
1412  
1413  	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1414  	pl011_write(uap->im, uap, REG_IMSC);
1415  }
1416  
pl011_rx_chars(struct uart_amba_port * uap)1417  static void pl011_rx_chars(struct uart_amba_port *uap)
1418  __releases(&uap->port.lock)
1419  __acquires(&uap->port.lock)
1420  {
1421  	pl011_fifo_to_tty(uap);
1422  
1423  	uart_port_unlock(&uap->port);
1424  	tty_flip_buffer_push(&uap->port.state->port);
1425  	/*
1426  	 * If we were temporarily out of DMA mode for a while,
1427  	 * attempt to switch back to DMA mode again.
1428  	 */
1429  	if (pl011_dma_rx_available(uap)) {
1430  		if (pl011_dma_rx_trigger_dma(uap)) {
1431  			dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1432  				"fall back to interrupt mode again\n");
1433  			uap->im |= UART011_RXIM;
1434  			pl011_write(uap->im, uap, REG_IMSC);
1435  		} else {
1436  #ifdef CONFIG_DMA_ENGINE
1437  			/* Start Rx DMA poll */
1438  			if (uap->dmarx.poll_rate) {
1439  				uap->dmarx.last_jiffies = jiffies;
1440  				uap->dmarx.last_residue	= PL011_DMA_BUFFER_SIZE;
1441  				mod_timer(&uap->dmarx.timer,
1442  					jiffies +
1443  					msecs_to_jiffies(uap->dmarx.poll_rate));
1444  			}
1445  #endif
1446  		}
1447  	}
1448  	uart_port_lock(&uap->port);
1449  }
1450  
pl011_tx_char(struct uart_amba_port * uap,unsigned char c,bool from_irq)1451  static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1452  			  bool from_irq)
1453  {
1454  	if (unlikely(!from_irq) &&
1455  	    pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1456  		return false; /* unable to transmit character */
1457  
1458  	pl011_write(c, uap, REG_DR);
1459  	uap->port.icount.tx++;
1460  
1461  	return true;
1462  }
1463  
1464  /* Returns true if tx interrupts have to be (kept) enabled  */
pl011_tx_chars(struct uart_amba_port * uap,bool from_irq)1465  static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1466  {
1467  	struct circ_buf *xmit = &uap->port.state->xmit;
1468  	int count = uap->fifosize >> 1;
1469  
1470  	if (uap->port.x_char) {
1471  		if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1472  			return true;
1473  		uap->port.x_char = 0;
1474  		--count;
1475  	}
1476  	if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1477  		pl011_stop_tx(&uap->port);
1478  		return false;
1479  	}
1480  
1481  	/* If we are using DMA mode, try to send some characters. */
1482  	if (pl011_dma_tx_irq(uap))
1483  		return true;
1484  
1485  	do {
1486  		if (likely(from_irq) && count-- == 0)
1487  			break;
1488  
1489  		if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1490  			break;
1491  
1492  		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1493  	} while (!uart_circ_empty(xmit));
1494  
1495  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1496  		uart_write_wakeup(&uap->port);
1497  
1498  	if (uart_circ_empty(xmit)) {
1499  		pl011_stop_tx(&uap->port);
1500  		return false;
1501  	}
1502  	return true;
1503  }
1504  
pl011_modem_status(struct uart_amba_port * uap)1505  static void pl011_modem_status(struct uart_amba_port *uap)
1506  {
1507  	unsigned int status, delta;
1508  
1509  	status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1510  
1511  	delta = status ^ uap->old_status;
1512  	uap->old_status = status;
1513  
1514  	if (!delta)
1515  		return;
1516  
1517  	if (delta & UART01x_FR_DCD)
1518  		uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1519  
1520  	if (delta & uap->vendor->fr_dsr)
1521  		uap->port.icount.dsr++;
1522  
1523  	if (delta & uap->vendor->fr_cts)
1524  		uart_handle_cts_change(&uap->port,
1525  				       status & uap->vendor->fr_cts);
1526  
1527  	wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1528  }
1529  
check_apply_cts_event_workaround(struct uart_amba_port * uap)1530  static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1531  {
1532  	if (!uap->vendor->cts_event_workaround)
1533  		return;
1534  
1535  	/* workaround to make sure that all bits are unlocked.. */
1536  	pl011_write(0x00, uap, REG_ICR);
1537  
1538  	/*
1539  	 * WA: introduce 26ns(1 uart clk) delay before W1C;
1540  	 * single apb access will incur 2 pclk(133.12Mhz) delay,
1541  	 * so add 2 dummy reads
1542  	 */
1543  	pl011_read(uap, REG_ICR);
1544  	pl011_read(uap, REG_ICR);
1545  }
1546  
pl011_int(int irq,void * dev_id)1547  static irqreturn_t pl011_int(int irq, void *dev_id)
1548  {
1549  	struct uart_amba_port *uap = dev_id;
1550  	unsigned long flags;
1551  	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1552  	int handled = 0;
1553  
1554  	uart_port_lock_irqsave(&uap->port, &flags);
1555  	status = pl011_read(uap, REG_RIS) & uap->im;
1556  	if (status) {
1557  		do {
1558  			check_apply_cts_event_workaround(uap);
1559  
1560  			pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1561  					       UART011_RXIS),
1562  				    uap, REG_ICR);
1563  
1564  			if (status & (UART011_RTIS|UART011_RXIS)) {
1565  				if (pl011_dma_rx_running(uap))
1566  					pl011_dma_rx_irq(uap);
1567  				else
1568  					pl011_rx_chars(uap);
1569  			}
1570  			if (status & (UART011_DSRMIS|UART011_DCDMIS|
1571  				      UART011_CTSMIS|UART011_RIMIS))
1572  				pl011_modem_status(uap);
1573  			if (status & UART011_TXIS)
1574  				pl011_tx_chars(uap, true);
1575  
1576  			if (pass_counter-- == 0)
1577  				break;
1578  
1579  			status = pl011_read(uap, REG_RIS) & uap->im;
1580  		} while (status != 0);
1581  		handled = 1;
1582  	}
1583  
1584  	uart_port_unlock_irqrestore(&uap->port, flags);
1585  
1586  	return IRQ_RETVAL(handled);
1587  }
1588  
pl011_tx_empty(struct uart_port * port)1589  static unsigned int pl011_tx_empty(struct uart_port *port)
1590  {
1591  	struct uart_amba_port *uap =
1592  	    container_of(port, struct uart_amba_port, port);
1593  
1594  	/* Allow feature register bits to be inverted to work around errata */
1595  	unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1596  
1597  	return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1598  							0 : TIOCSER_TEMT;
1599  }
1600  
pl011_get_mctrl(struct uart_port * port)1601  static unsigned int pl011_get_mctrl(struct uart_port *port)
1602  {
1603  	struct uart_amba_port *uap =
1604  	    container_of(port, struct uart_amba_port, port);
1605  	unsigned int result = 0;
1606  	unsigned int status = pl011_read(uap, REG_FR);
1607  
1608  #define TIOCMBIT(uartbit, tiocmbit)	\
1609  	if (status & uartbit)		\
1610  		result |= tiocmbit
1611  
1612  	TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1613  	TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1614  	TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1615  	TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1616  #undef TIOCMBIT
1617  	return result;
1618  }
1619  
pl011_set_mctrl(struct uart_port * port,unsigned int mctrl)1620  static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1621  {
1622  	struct uart_amba_port *uap =
1623  	    container_of(port, struct uart_amba_port, port);
1624  	unsigned int cr;
1625  
1626  	cr = pl011_read(uap, REG_CR);
1627  
1628  #define	TIOCMBIT(tiocmbit, uartbit)		\
1629  	if (mctrl & tiocmbit)		\
1630  		cr |= uartbit;		\
1631  	else				\
1632  		cr &= ~uartbit
1633  
1634  	TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1635  	TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1636  	TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1637  	TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1638  	TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1639  
1640  	if (port->status & UPSTAT_AUTORTS) {
1641  		/* We need to disable auto-RTS if we want to turn RTS off */
1642  		TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1643  	}
1644  #undef TIOCMBIT
1645  
1646  	pl011_write(cr, uap, REG_CR);
1647  }
1648  
pl011_break_ctl(struct uart_port * port,int break_state)1649  static void pl011_break_ctl(struct uart_port *port, int break_state)
1650  {
1651  	struct uart_amba_port *uap =
1652  	    container_of(port, struct uart_amba_port, port);
1653  	unsigned long flags;
1654  	unsigned int lcr_h;
1655  
1656  	uart_port_lock_irqsave(&uap->port, &flags);
1657  	lcr_h = pl011_read(uap, REG_LCRH_TX);
1658  	if (break_state == -1)
1659  		lcr_h |= UART01x_LCRH_BRK;
1660  	else
1661  		lcr_h &= ~UART01x_LCRH_BRK;
1662  	pl011_write(lcr_h, uap, REG_LCRH_TX);
1663  	uart_port_unlock_irqrestore(&uap->port, flags);
1664  }
1665  
1666  #ifdef CONFIG_CONSOLE_POLL
1667  
pl011_quiesce_irqs(struct uart_port * port)1668  static void pl011_quiesce_irqs(struct uart_port *port)
1669  {
1670  	struct uart_amba_port *uap =
1671  	    container_of(port, struct uart_amba_port, port);
1672  
1673  	pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1674  	/*
1675  	 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1676  	 * we simply mask it. start_tx() will unmask it.
1677  	 *
1678  	 * Note we can race with start_tx(), and if the race happens, the
1679  	 * polling user might get another interrupt just after we clear it.
1680  	 * But it should be OK and can happen even w/o the race, e.g.
1681  	 * controller immediately got some new data and raised the IRQ.
1682  	 *
1683  	 * And whoever uses polling routines assumes that it manages the device
1684  	 * (including tx queue), so we're also fine with start_tx()'s caller
1685  	 * side.
1686  	 */
1687  	pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1688  		    REG_IMSC);
1689  }
1690  
pl011_get_poll_char(struct uart_port * port)1691  static int pl011_get_poll_char(struct uart_port *port)
1692  {
1693  	struct uart_amba_port *uap =
1694  	    container_of(port, struct uart_amba_port, port);
1695  	unsigned int status;
1696  
1697  	/*
1698  	 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1699  	 * debugger.
1700  	 */
1701  	pl011_quiesce_irqs(port);
1702  
1703  	status = pl011_read(uap, REG_FR);
1704  	if (status & UART01x_FR_RXFE)
1705  		return NO_POLL_CHAR;
1706  
1707  	return pl011_read(uap, REG_DR);
1708  }
1709  
pl011_put_poll_char(struct uart_port * port,unsigned char ch)1710  static void pl011_put_poll_char(struct uart_port *port,
1711  			 unsigned char ch)
1712  {
1713  	struct uart_amba_port *uap =
1714  	    container_of(port, struct uart_amba_port, port);
1715  
1716  	while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1717  		cpu_relax();
1718  
1719  	pl011_write(ch, uap, REG_DR);
1720  }
1721  
1722  #endif /* CONFIG_CONSOLE_POLL */
1723  
pl011_hwinit(struct uart_port * port)1724  static int pl011_hwinit(struct uart_port *port)
1725  {
1726  	struct uart_amba_port *uap =
1727  	    container_of(port, struct uart_amba_port, port);
1728  	int retval;
1729  
1730  	/* Optionaly enable pins to be muxed in and configured */
1731  	pinctrl_pm_select_default_state(port->dev);
1732  
1733  	/*
1734  	 * Try to enable the clock producer.
1735  	 */
1736  	retval = clk_prepare_enable(uap->clk);
1737  	if (retval)
1738  		return retval;
1739  
1740  	uap->port.uartclk = clk_get_rate(uap->clk);
1741  
1742  	/* Clear pending error and receive interrupts */
1743  	pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1744  		    UART011_FEIS | UART011_RTIS | UART011_RXIS,
1745  		    uap, REG_ICR);
1746  
1747  	/*
1748  	 * Save interrupts enable mask, and enable RX interrupts in case if
1749  	 * the interrupt is used for NMI entry.
1750  	 */
1751  	uap->im = pl011_read(uap, REG_IMSC);
1752  	pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1753  
1754  	if (dev_get_platdata(uap->port.dev)) {
1755  		struct amba_pl011_data *plat;
1756  
1757  		plat = dev_get_platdata(uap->port.dev);
1758  		if (plat->init)
1759  			plat->init();
1760  	}
1761  	return 0;
1762  }
1763  
pl011_split_lcrh(const struct uart_amba_port * uap)1764  static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1765  {
1766  	return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1767  	       pl011_reg_to_offset(uap, REG_LCRH_TX);
1768  }
1769  
pl011_write_lcr_h(struct uart_amba_port * uap,unsigned int lcr_h)1770  static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1771  {
1772  	pl011_write(lcr_h, uap, REG_LCRH_RX);
1773  	if (pl011_split_lcrh(uap)) {
1774  		int i;
1775  		/*
1776  		 * Wait 10 PCLKs before writing LCRH_TX register,
1777  		 * to get this delay write read only register 10 times
1778  		 */
1779  		for (i = 0; i < 10; ++i)
1780  			pl011_write(0xff, uap, REG_MIS);
1781  		pl011_write(lcr_h, uap, REG_LCRH_TX);
1782  	}
1783  }
1784  
pl011_allocate_irq(struct uart_amba_port * uap)1785  static int pl011_allocate_irq(struct uart_amba_port *uap)
1786  {
1787  	pl011_write(uap->im, uap, REG_IMSC);
1788  
1789  	return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1790  }
1791  
1792  /*
1793   * Enable interrupts, only timeouts when using DMA
1794   * if initial RX DMA job failed, start in interrupt mode
1795   * as well.
1796   */
pl011_enable_interrupts(struct uart_amba_port * uap)1797  static void pl011_enable_interrupts(struct uart_amba_port *uap)
1798  {
1799  	unsigned long flags;
1800  	unsigned int i;
1801  
1802  	uart_port_lock_irqsave(&uap->port, &flags);
1803  
1804  	/* Clear out any spuriously appearing RX interrupts */
1805  	pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1806  
1807  	/*
1808  	 * RXIS is asserted only when the RX FIFO transitions from below
1809  	 * to above the trigger threshold.  If the RX FIFO is already
1810  	 * full to the threshold this can't happen and RXIS will now be
1811  	 * stuck off.  Drain the RX FIFO explicitly to fix this:
1812  	 */
1813  	for (i = 0; i < uap->fifosize * 2; ++i) {
1814  		if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1815  			break;
1816  
1817  		pl011_read(uap, REG_DR);
1818  	}
1819  
1820  	uap->im = UART011_RTIM;
1821  	if (!pl011_dma_rx_running(uap))
1822  		uap->im |= UART011_RXIM;
1823  	pl011_write(uap->im, uap, REG_IMSC);
1824  	uart_port_unlock_irqrestore(&uap->port, flags);
1825  }
1826  
pl011_unthrottle_rx(struct uart_port * port)1827  static void pl011_unthrottle_rx(struct uart_port *port)
1828  {
1829  	struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1830  	unsigned long flags;
1831  
1832  	uart_port_lock_irqsave(&uap->port, &flags);
1833  
1834  	uap->im = UART011_RTIM;
1835  	if (!pl011_dma_rx_running(uap))
1836  		uap->im |= UART011_RXIM;
1837  
1838  	pl011_write(uap->im, uap, REG_IMSC);
1839  
1840  #ifdef CONFIG_DMA_ENGINE
1841  	if (uap->using_rx_dma) {
1842  		uap->dmacr |= UART011_RXDMAE;
1843  		pl011_write(uap->dmacr, uap, REG_DMACR);
1844  	}
1845  #endif
1846  
1847  	uart_port_unlock_irqrestore(&uap->port, flags);
1848  }
1849  
pl011_startup(struct uart_port * port)1850  static int pl011_startup(struct uart_port *port)
1851  {
1852  	struct uart_amba_port *uap =
1853  	    container_of(port, struct uart_amba_port, port);
1854  	unsigned int cr;
1855  	int retval;
1856  
1857  	retval = pl011_hwinit(port);
1858  	if (retval)
1859  		goto clk_dis;
1860  
1861  	retval = pl011_allocate_irq(uap);
1862  	if (retval)
1863  		goto clk_dis;
1864  
1865  	pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1866  
1867  	uart_port_lock_irq(&uap->port);
1868  
1869  	cr = pl011_read(uap, REG_CR);
1870  	cr &= UART011_CR_RTS | UART011_CR_DTR;
1871  	cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
1872  
1873  	if (!(port->rs485.flags & SER_RS485_ENABLED))
1874  		cr |= UART011_CR_TXE;
1875  
1876  	pl011_write(cr, uap, REG_CR);
1877  
1878  	uart_port_unlock_irq(&uap->port);
1879  
1880  	/*
1881  	 * initialise the old status of the modem signals
1882  	 */
1883  	uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1884  
1885  	/* Startup DMA */
1886  	pl011_dma_startup(uap);
1887  
1888  	pl011_enable_interrupts(uap);
1889  
1890  	return 0;
1891  
1892   clk_dis:
1893  	clk_disable_unprepare(uap->clk);
1894  	return retval;
1895  }
1896  
sbsa_uart_startup(struct uart_port * port)1897  static int sbsa_uart_startup(struct uart_port *port)
1898  {
1899  	struct uart_amba_port *uap =
1900  		container_of(port, struct uart_amba_port, port);
1901  	int retval;
1902  
1903  	retval = pl011_hwinit(port);
1904  	if (retval)
1905  		return retval;
1906  
1907  	retval = pl011_allocate_irq(uap);
1908  	if (retval)
1909  		return retval;
1910  
1911  	/* The SBSA UART does not support any modem status lines. */
1912  	uap->old_status = 0;
1913  
1914  	pl011_enable_interrupts(uap);
1915  
1916  	return 0;
1917  }
1918  
pl011_shutdown_channel(struct uart_amba_port * uap,unsigned int lcrh)1919  static void pl011_shutdown_channel(struct uart_amba_port *uap,
1920  					unsigned int lcrh)
1921  {
1922        unsigned long val;
1923  
1924        val = pl011_read(uap, lcrh);
1925        val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1926        pl011_write(val, uap, lcrh);
1927  }
1928  
1929  /*
1930   * disable the port. It should not disable RTS and DTR.
1931   * Also RTS and DTR state should be preserved to restore
1932   * it during startup().
1933   */
pl011_disable_uart(struct uart_amba_port * uap)1934  static void pl011_disable_uart(struct uart_amba_port *uap)
1935  {
1936  	unsigned int cr;
1937  
1938  	uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1939  	uart_port_lock_irq(&uap->port);
1940  	cr = pl011_read(uap, REG_CR);
1941  	cr &= UART011_CR_RTS | UART011_CR_DTR;
1942  	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1943  	pl011_write(cr, uap, REG_CR);
1944  	uart_port_unlock_irq(&uap->port);
1945  
1946  	/*
1947  	 * disable break condition and fifos
1948  	 */
1949  	pl011_shutdown_channel(uap, REG_LCRH_RX);
1950  	if (pl011_split_lcrh(uap))
1951  		pl011_shutdown_channel(uap, REG_LCRH_TX);
1952  }
1953  
pl011_disable_interrupts(struct uart_amba_port * uap)1954  static void pl011_disable_interrupts(struct uart_amba_port *uap)
1955  {
1956  	uart_port_lock_irq(&uap->port);
1957  
1958  	/* mask all interrupts and clear all pending ones */
1959  	uap->im = 0;
1960  	pl011_write(uap->im, uap, REG_IMSC);
1961  	pl011_write(0xffff, uap, REG_ICR);
1962  
1963  	uart_port_unlock_irq(&uap->port);
1964  }
1965  
pl011_shutdown(struct uart_port * port)1966  static void pl011_shutdown(struct uart_port *port)
1967  {
1968  	struct uart_amba_port *uap =
1969  		container_of(port, struct uart_amba_port, port);
1970  
1971  	pl011_disable_interrupts(uap);
1972  
1973  	pl011_dma_shutdown(uap);
1974  
1975  	if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1976  		pl011_rs485_tx_stop(uap);
1977  
1978  	free_irq(uap->port.irq, uap);
1979  
1980  	pl011_disable_uart(uap);
1981  
1982  	/*
1983  	 * Shut down the clock producer
1984  	 */
1985  	clk_disable_unprepare(uap->clk);
1986  	/* Optionally let pins go into sleep states */
1987  	pinctrl_pm_select_sleep_state(port->dev);
1988  
1989  	if (dev_get_platdata(uap->port.dev)) {
1990  		struct amba_pl011_data *plat;
1991  
1992  		plat = dev_get_platdata(uap->port.dev);
1993  		if (plat->exit)
1994  			plat->exit();
1995  	}
1996  
1997  	if (uap->port.ops->flush_buffer)
1998  		uap->port.ops->flush_buffer(port);
1999  }
2000  
sbsa_uart_shutdown(struct uart_port * port)2001  static void sbsa_uart_shutdown(struct uart_port *port)
2002  {
2003  	struct uart_amba_port *uap =
2004  		container_of(port, struct uart_amba_port, port);
2005  
2006  	pl011_disable_interrupts(uap);
2007  
2008  	free_irq(uap->port.irq, uap);
2009  
2010  	if (uap->port.ops->flush_buffer)
2011  		uap->port.ops->flush_buffer(port);
2012  }
2013  
2014  static void
pl011_setup_status_masks(struct uart_port * port,struct ktermios * termios)2015  pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
2016  {
2017  	port->read_status_mask = UART011_DR_OE | 255;
2018  	if (termios->c_iflag & INPCK)
2019  		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
2020  	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2021  		port->read_status_mask |= UART011_DR_BE;
2022  
2023  	/*
2024  	 * Characters to ignore
2025  	 */
2026  	port->ignore_status_mask = 0;
2027  	if (termios->c_iflag & IGNPAR)
2028  		port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
2029  	if (termios->c_iflag & IGNBRK) {
2030  		port->ignore_status_mask |= UART011_DR_BE;
2031  		/*
2032  		 * If we're ignoring parity and break indicators,
2033  		 * ignore overruns too (for real raw support).
2034  		 */
2035  		if (termios->c_iflag & IGNPAR)
2036  			port->ignore_status_mask |= UART011_DR_OE;
2037  	}
2038  
2039  	/*
2040  	 * Ignore all characters if CREAD is not set.
2041  	 */
2042  	if ((termios->c_cflag & CREAD) == 0)
2043  		port->ignore_status_mask |= UART_DUMMY_DR_RX;
2044  }
2045  
2046  static void
pl011_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)2047  pl011_set_termios(struct uart_port *port, struct ktermios *termios,
2048  		  const struct ktermios *old)
2049  {
2050  	struct uart_amba_port *uap =
2051  	    container_of(port, struct uart_amba_port, port);
2052  	unsigned int lcr_h, old_cr;
2053  	unsigned long flags;
2054  	unsigned int baud, quot, clkdiv;
2055  	unsigned int bits;
2056  
2057  	if (uap->vendor->oversampling)
2058  		clkdiv = 8;
2059  	else
2060  		clkdiv = 16;
2061  
2062  	/*
2063  	 * Ask the core to calculate the divisor for us.
2064  	 */
2065  	baud = uart_get_baud_rate(port, termios, old, 0,
2066  				  port->uartclk / clkdiv);
2067  #ifdef CONFIG_DMA_ENGINE
2068  	/*
2069  	 * Adjust RX DMA polling rate with baud rate if not specified.
2070  	 */
2071  	if (uap->dmarx.auto_poll_rate)
2072  		uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
2073  #endif
2074  
2075  	if (baud > port->uartclk/16)
2076  		quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
2077  	else
2078  		quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2079  
2080  	switch (termios->c_cflag & CSIZE) {
2081  	case CS5:
2082  		lcr_h = UART01x_LCRH_WLEN_5;
2083  		break;
2084  	case CS6:
2085  		lcr_h = UART01x_LCRH_WLEN_6;
2086  		break;
2087  	case CS7:
2088  		lcr_h = UART01x_LCRH_WLEN_7;
2089  		break;
2090  	default: // CS8
2091  		lcr_h = UART01x_LCRH_WLEN_8;
2092  		break;
2093  	}
2094  	if (termios->c_cflag & CSTOPB)
2095  		lcr_h |= UART01x_LCRH_STP2;
2096  	if (termios->c_cflag & PARENB) {
2097  		lcr_h |= UART01x_LCRH_PEN;
2098  		if (!(termios->c_cflag & PARODD))
2099  			lcr_h |= UART01x_LCRH_EPS;
2100  		if (termios->c_cflag & CMSPAR)
2101  			lcr_h |= UART011_LCRH_SPS;
2102  	}
2103  	if (uap->fifosize > 1)
2104  		lcr_h |= UART01x_LCRH_FEN;
2105  
2106  	bits = tty_get_frame_size(termios->c_cflag);
2107  
2108  	uart_port_lock_irqsave(port, &flags);
2109  
2110  	/*
2111  	 * Update the per-port timeout.
2112  	 */
2113  	uart_update_timeout(port, termios->c_cflag, baud);
2114  
2115  	/*
2116  	 * Calculate the approximated time it takes to transmit one character
2117  	 * with the given baud rate. We use this as the poll interval when we
2118  	 * wait for the tx queue to empty.
2119  	 */
2120  	uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
2121  
2122  	pl011_setup_status_masks(port, termios);
2123  
2124  	if (UART_ENABLE_MS(port, termios->c_cflag))
2125  		pl011_enable_ms(port);
2126  
2127  	if (port->rs485.flags & SER_RS485_ENABLED)
2128  		termios->c_cflag &= ~CRTSCTS;
2129  
2130  	old_cr = pl011_read(uap, REG_CR);
2131  
2132  	if (termios->c_cflag & CRTSCTS) {
2133  		if (old_cr & UART011_CR_RTS)
2134  			old_cr |= UART011_CR_RTSEN;
2135  
2136  		old_cr |= UART011_CR_CTSEN;
2137  		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2138  	} else {
2139  		old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2140  		port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2141  	}
2142  
2143  	if (uap->vendor->oversampling) {
2144  		if (baud > port->uartclk / 16)
2145  			old_cr |= ST_UART011_CR_OVSFACT;
2146  		else
2147  			old_cr &= ~ST_UART011_CR_OVSFACT;
2148  	}
2149  
2150  	/*
2151  	 * Workaround for the ST Micro oversampling variants to
2152  	 * increase the bitrate slightly, by lowering the divisor,
2153  	 * to avoid delayed sampling of start bit at high speeds,
2154  	 * else we see data corruption.
2155  	 */
2156  	if (uap->vendor->oversampling) {
2157  		if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2158  			quot -= 1;
2159  		else if ((baud > 3250000) && (quot > 2))
2160  			quot -= 2;
2161  	}
2162  	/* Set baud rate */
2163  	pl011_write(quot & 0x3f, uap, REG_FBRD);
2164  	pl011_write(quot >> 6, uap, REG_IBRD);
2165  
2166  	/*
2167  	 * ----------v----------v----------v----------v-----
2168  	 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2169  	 * REG_FBRD & REG_IBRD.
2170  	 * ----------^----------^----------^----------^-----
2171  	 */
2172  	pl011_write_lcr_h(uap, lcr_h);
2173  
2174  	/*
2175  	 * Receive was disabled by pl011_disable_uart during shutdown.
2176  	 * Need to reenable receive if you need to use a tty_driver
2177  	 * returns from tty_find_polling_driver() after a port shutdown.
2178  	 */
2179  	old_cr |= UART011_CR_RXE;
2180  	pl011_write(old_cr, uap, REG_CR);
2181  
2182  	uart_port_unlock_irqrestore(port, flags);
2183  }
2184  
2185  static void
sbsa_uart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)2186  sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2187  		      const struct ktermios *old)
2188  {
2189  	struct uart_amba_port *uap =
2190  	    container_of(port, struct uart_amba_port, port);
2191  	unsigned long flags;
2192  
2193  	tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2194  
2195  	/* The SBSA UART only supports 8n1 without hardware flow control. */
2196  	termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2197  	termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2198  	termios->c_cflag |= CS8 | CLOCAL;
2199  
2200  	uart_port_lock_irqsave(port, &flags);
2201  	uart_update_timeout(port, CS8, uap->fixed_baud);
2202  	pl011_setup_status_masks(port, termios);
2203  	uart_port_unlock_irqrestore(port, flags);
2204  }
2205  
pl011_type(struct uart_port * port)2206  static const char *pl011_type(struct uart_port *port)
2207  {
2208  	struct uart_amba_port *uap =
2209  	    container_of(port, struct uart_amba_port, port);
2210  	return uap->port.type == PORT_AMBA ? uap->type : NULL;
2211  }
2212  
2213  /*
2214   * Configure/autoconfigure the port.
2215   */
pl011_config_port(struct uart_port * port,int flags)2216  static void pl011_config_port(struct uart_port *port, int flags)
2217  {
2218  	if (flags & UART_CONFIG_TYPE)
2219  		port->type = PORT_AMBA;
2220  }
2221  
2222  /*
2223   * verify the new serial_struct (for TIOCSSERIAL).
2224   */
pl011_verify_port(struct uart_port * port,struct serial_struct * ser)2225  static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2226  {
2227  	int ret = 0;
2228  	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2229  		ret = -EINVAL;
2230  	if (ser->irq < 0 || ser->irq >= nr_irqs)
2231  		ret = -EINVAL;
2232  	if (ser->baud_base < 9600)
2233  		ret = -EINVAL;
2234  	if (port->mapbase != (unsigned long) ser->iomem_base)
2235  		ret = -EINVAL;
2236  	return ret;
2237  }
2238  
pl011_rs485_config(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485)2239  static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
2240  			      struct serial_rs485 *rs485)
2241  {
2242  	struct uart_amba_port *uap =
2243  		container_of(port, struct uart_amba_port, port);
2244  
2245  	if (port->rs485.flags & SER_RS485_ENABLED)
2246  		pl011_rs485_tx_stop(uap);
2247  
2248  	/* Make sure auto RTS is disabled */
2249  	if (rs485->flags & SER_RS485_ENABLED) {
2250  		u32 cr = pl011_read(uap, REG_CR);
2251  
2252  		cr &= ~UART011_CR_RTSEN;
2253  		pl011_write(cr, uap, REG_CR);
2254  		port->status &= ~UPSTAT_AUTORTS;
2255  	}
2256  
2257  	return 0;
2258  }
2259  
2260  static const struct uart_ops amba_pl011_pops = {
2261  	.tx_empty	= pl011_tx_empty,
2262  	.set_mctrl	= pl011_set_mctrl,
2263  	.get_mctrl	= pl011_get_mctrl,
2264  	.stop_tx	= pl011_stop_tx,
2265  	.start_tx	= pl011_start_tx,
2266  	.stop_rx	= pl011_stop_rx,
2267  	.throttle	= pl011_throttle_rx,
2268  	.unthrottle	= pl011_unthrottle_rx,
2269  	.enable_ms	= pl011_enable_ms,
2270  	.break_ctl	= pl011_break_ctl,
2271  	.startup	= pl011_startup,
2272  	.shutdown	= pl011_shutdown,
2273  	.flush_buffer	= pl011_dma_flush_buffer,
2274  	.set_termios	= pl011_set_termios,
2275  	.type		= pl011_type,
2276  	.config_port	= pl011_config_port,
2277  	.verify_port	= pl011_verify_port,
2278  #ifdef CONFIG_CONSOLE_POLL
2279  	.poll_init     = pl011_hwinit,
2280  	.poll_get_char = pl011_get_poll_char,
2281  	.poll_put_char = pl011_put_poll_char,
2282  #endif
2283  };
2284  
sbsa_uart_set_mctrl(struct uart_port * port,unsigned int mctrl)2285  static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2286  {
2287  }
2288  
sbsa_uart_get_mctrl(struct uart_port * port)2289  static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2290  {
2291  	return 0;
2292  }
2293  
2294  static const struct uart_ops sbsa_uart_pops = {
2295  	.tx_empty	= pl011_tx_empty,
2296  	.set_mctrl	= sbsa_uart_set_mctrl,
2297  	.get_mctrl	= sbsa_uart_get_mctrl,
2298  	.stop_tx	= pl011_stop_tx,
2299  	.start_tx	= pl011_start_tx,
2300  	.stop_rx	= pl011_stop_rx,
2301  	.startup	= sbsa_uart_startup,
2302  	.shutdown	= sbsa_uart_shutdown,
2303  	.set_termios	= sbsa_uart_set_termios,
2304  	.type		= pl011_type,
2305  	.config_port	= pl011_config_port,
2306  	.verify_port	= pl011_verify_port,
2307  #ifdef CONFIG_CONSOLE_POLL
2308  	.poll_init     = pl011_hwinit,
2309  	.poll_get_char = pl011_get_poll_char,
2310  	.poll_put_char = pl011_put_poll_char,
2311  #endif
2312  };
2313  
2314  static struct uart_amba_port *amba_ports[UART_NR];
2315  
2316  #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2317  
pl011_console_putchar(struct uart_port * port,unsigned char ch)2318  static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
2319  {
2320  	struct uart_amba_port *uap =
2321  	    container_of(port, struct uart_amba_port, port);
2322  
2323  	while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2324  		cpu_relax();
2325  	pl011_write(ch, uap, REG_DR);
2326  }
2327  
2328  static void
pl011_console_write(struct console * co,const char * s,unsigned int count)2329  pl011_console_write(struct console *co, const char *s, unsigned int count)
2330  {
2331  	struct uart_amba_port *uap = amba_ports[co->index];
2332  	unsigned int old_cr = 0, new_cr;
2333  	unsigned long flags;
2334  	int locked = 1;
2335  
2336  	clk_enable(uap->clk);
2337  
2338  	local_irq_save(flags);
2339  	if (uap->port.sysrq)
2340  		locked = 0;
2341  	else if (oops_in_progress)
2342  		locked = uart_port_trylock(&uap->port);
2343  	else
2344  		uart_port_lock(&uap->port);
2345  
2346  	/*
2347  	 *	First save the CR then disable the interrupts
2348  	 */
2349  	if (!uap->vendor->always_enabled) {
2350  		old_cr = pl011_read(uap, REG_CR);
2351  		new_cr = old_cr & ~UART011_CR_CTSEN;
2352  		new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2353  		pl011_write(new_cr, uap, REG_CR);
2354  	}
2355  
2356  	uart_console_write(&uap->port, s, count, pl011_console_putchar);
2357  
2358  	/*
2359  	 *	Finally, wait for transmitter to become empty and restore the
2360  	 *	TCR. Allow feature register bits to be inverted to work around
2361  	 *	errata.
2362  	 */
2363  	while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2364  						& uap->vendor->fr_busy)
2365  		cpu_relax();
2366  	if (!uap->vendor->always_enabled)
2367  		pl011_write(old_cr, uap, REG_CR);
2368  
2369  	if (locked)
2370  		uart_port_unlock(&uap->port);
2371  	local_irq_restore(flags);
2372  
2373  	clk_disable(uap->clk);
2374  }
2375  
pl011_console_get_options(struct uart_amba_port * uap,int * baud,int * parity,int * bits)2376  static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2377  				      int *parity, int *bits)
2378  {
2379  	if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2380  		unsigned int lcr_h, ibrd, fbrd;
2381  
2382  		lcr_h = pl011_read(uap, REG_LCRH_TX);
2383  
2384  		*parity = 'n';
2385  		if (lcr_h & UART01x_LCRH_PEN) {
2386  			if (lcr_h & UART01x_LCRH_EPS)
2387  				*parity = 'e';
2388  			else
2389  				*parity = 'o';
2390  		}
2391  
2392  		if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2393  			*bits = 7;
2394  		else
2395  			*bits = 8;
2396  
2397  		ibrd = pl011_read(uap, REG_IBRD);
2398  		fbrd = pl011_read(uap, REG_FBRD);
2399  
2400  		*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2401  
2402  		if (uap->vendor->oversampling) {
2403  			if (pl011_read(uap, REG_CR)
2404  				  & ST_UART011_CR_OVSFACT)
2405  				*baud *= 2;
2406  		}
2407  	}
2408  }
2409  
pl011_console_setup(struct console * co,char * options)2410  static int pl011_console_setup(struct console *co, char *options)
2411  {
2412  	struct uart_amba_port *uap;
2413  	int baud = 38400;
2414  	int bits = 8;
2415  	int parity = 'n';
2416  	int flow = 'n';
2417  	int ret;
2418  
2419  	/*
2420  	 * Check whether an invalid uart number has been specified, and
2421  	 * if so, search for the first available port that does have
2422  	 * console support.
2423  	 */
2424  	if (co->index >= UART_NR)
2425  		co->index = 0;
2426  	uap = amba_ports[co->index];
2427  	if (!uap)
2428  		return -ENODEV;
2429  
2430  	/* Allow pins to be muxed in and configured */
2431  	pinctrl_pm_select_default_state(uap->port.dev);
2432  
2433  	ret = clk_prepare(uap->clk);
2434  	if (ret)
2435  		return ret;
2436  
2437  	if (dev_get_platdata(uap->port.dev)) {
2438  		struct amba_pl011_data *plat;
2439  
2440  		plat = dev_get_platdata(uap->port.dev);
2441  		if (plat->init)
2442  			plat->init();
2443  	}
2444  
2445  	uap->port.uartclk = clk_get_rate(uap->clk);
2446  
2447  	if (uap->vendor->fixed_options) {
2448  		baud = uap->fixed_baud;
2449  	} else {
2450  		if (options)
2451  			uart_parse_options(options,
2452  					   &baud, &parity, &bits, &flow);
2453  		else
2454  			pl011_console_get_options(uap, &baud, &parity, &bits);
2455  	}
2456  
2457  	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2458  }
2459  
2460  /**
2461   *	pl011_console_match - non-standard console matching
2462   *	@co:	  registering console
2463   *	@name:	  name from console command line
2464   *	@idx:	  index from console command line
2465   *	@options: ptr to option string from console command line
2466   *
2467   *	Only attempts to match console command lines of the form:
2468   *	    console=pl011,mmio|mmio32,<addr>[,<options>]
2469   *	    console=pl011,0x<addr>[,<options>]
2470   *	This form is used to register an initial earlycon boot console and
2471   *	replace it with the amba_console at pl011 driver init.
2472   *
2473   *	Performs console setup for a match (as required by interface)
2474   *	If no <options> are specified, then assume the h/w is already setup.
2475   *
2476   *	Returns 0 if console matches; otherwise non-zero to use default matching
2477   */
pl011_console_match(struct console * co,char * name,int idx,char * options)2478  static int pl011_console_match(struct console *co, char *name, int idx,
2479  			       char *options)
2480  {
2481  	unsigned char iotype;
2482  	resource_size_t addr;
2483  	int i;
2484  
2485  	/*
2486  	 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2487  	 * have a distinct console name, so make sure we check for that.
2488  	 * The actual implementation of the erratum occurs in the probe
2489  	 * function.
2490  	 */
2491  	if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2492  		return -ENODEV;
2493  
2494  	if (uart_parse_earlycon(options, &iotype, &addr, &options))
2495  		return -ENODEV;
2496  
2497  	if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2498  		return -ENODEV;
2499  
2500  	/* try to match the port specified on the command line */
2501  	for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2502  		struct uart_port *port;
2503  
2504  		if (!amba_ports[i])
2505  			continue;
2506  
2507  		port = &amba_ports[i]->port;
2508  
2509  		if (port->mapbase != addr)
2510  			continue;
2511  
2512  		co->index = i;
2513  		port->cons = co;
2514  		return pl011_console_setup(co, options);
2515  	}
2516  
2517  	return -ENODEV;
2518  }
2519  
2520  static struct uart_driver amba_reg;
2521  static struct console amba_console = {
2522  	.name		= "ttyAMA",
2523  	.write		= pl011_console_write,
2524  	.device		= uart_console_device,
2525  	.setup		= pl011_console_setup,
2526  	.match		= pl011_console_match,
2527  	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
2528  	.index		= -1,
2529  	.data		= &amba_reg,
2530  };
2531  
2532  #define AMBA_CONSOLE	(&amba_console)
2533  
qdf2400_e44_putc(struct uart_port * port,unsigned char c)2534  static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
2535  {
2536  	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2537  		cpu_relax();
2538  	writel(c, port->membase + UART01x_DR);
2539  	while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2540  		cpu_relax();
2541  }
2542  
qdf2400_e44_early_write(struct console * con,const char * s,unsigned n)2543  static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2544  {
2545  	struct earlycon_device *dev = con->data;
2546  
2547  	uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2548  }
2549  
pl011_putc(struct uart_port * port,unsigned char c)2550  static void pl011_putc(struct uart_port *port, unsigned char c)
2551  {
2552  	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2553  		cpu_relax();
2554  	if (port->iotype == UPIO_MEM32)
2555  		writel(c, port->membase + UART01x_DR);
2556  	else
2557  		writeb(c, port->membase + UART01x_DR);
2558  	while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2559  		cpu_relax();
2560  }
2561  
pl011_early_write(struct console * con,const char * s,unsigned n)2562  static void pl011_early_write(struct console *con, const char *s, unsigned n)
2563  {
2564  	struct earlycon_device *dev = con->data;
2565  
2566  	uart_console_write(&dev->port, s, n, pl011_putc);
2567  }
2568  
2569  #ifdef CONFIG_CONSOLE_POLL
pl011_getc(struct uart_port * port)2570  static int pl011_getc(struct uart_port *port)
2571  {
2572  	if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2573  		return NO_POLL_CHAR;
2574  
2575  	if (port->iotype == UPIO_MEM32)
2576  		return readl(port->membase + UART01x_DR);
2577  	else
2578  		return readb(port->membase + UART01x_DR);
2579  }
2580  
pl011_early_read(struct console * con,char * s,unsigned int n)2581  static int pl011_early_read(struct console *con, char *s, unsigned int n)
2582  {
2583  	struct earlycon_device *dev = con->data;
2584  	int ch, num_read = 0;
2585  
2586  	while (num_read < n) {
2587  		ch = pl011_getc(&dev->port);
2588  		if (ch == NO_POLL_CHAR)
2589  			break;
2590  
2591  		s[num_read++] = ch;
2592  	}
2593  
2594  	return num_read;
2595  }
2596  #else
2597  #define pl011_early_read NULL
2598  #endif
2599  
2600  /*
2601   * On non-ACPI systems, earlycon is enabled by specifying
2602   * "earlycon=pl011,<address>" on the kernel command line.
2603   *
2604   * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2605   * by specifying only "earlycon" on the command line.  Because it requires
2606   * SPCR, the console starts after ACPI is parsed, which is later than a
2607   * traditional early console.
2608   *
2609   * To get the traditional early console that starts before ACPI is parsed,
2610   * specify the full "earlycon=pl011,<address>" option.
2611   */
pl011_early_console_setup(struct earlycon_device * device,const char * opt)2612  static int __init pl011_early_console_setup(struct earlycon_device *device,
2613  					    const char *opt)
2614  {
2615  	if (!device->port.membase)
2616  		return -ENODEV;
2617  
2618  	device->con->write = pl011_early_write;
2619  	device->con->read = pl011_early_read;
2620  
2621  	return 0;
2622  }
2623  OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2624  OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2625  
2626  /*
2627   * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2628   * Erratum 44, traditional earlycon can be enabled by specifying
2629   * "earlycon=qdf2400_e44,<address>".  Any options are ignored.
2630   *
2631   * Alternatively, you can just specify "earlycon", and the early console
2632   * will be enabled with the information from the SPCR table.  In this
2633   * case, the SPCR code will detect the need for the E44 work-around,
2634   * and set the console name to "qdf2400_e44".
2635   */
2636  static int __init
qdf2400_e44_early_console_setup(struct earlycon_device * device,const char * opt)2637  qdf2400_e44_early_console_setup(struct earlycon_device *device,
2638  				const char *opt)
2639  {
2640  	if (!device->port.membase)
2641  		return -ENODEV;
2642  
2643  	device->con->write = qdf2400_e44_early_write;
2644  	return 0;
2645  }
2646  EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2647  
2648  #else
2649  #define AMBA_CONSOLE	NULL
2650  #endif
2651  
2652  static struct uart_driver amba_reg = {
2653  	.owner			= THIS_MODULE,
2654  	.driver_name		= "ttyAMA",
2655  	.dev_name		= "ttyAMA",
2656  	.major			= SERIAL_AMBA_MAJOR,
2657  	.minor			= SERIAL_AMBA_MINOR,
2658  	.nr			= UART_NR,
2659  	.cons			= AMBA_CONSOLE,
2660  };
2661  
pl011_probe_dt_alias(int index,struct device * dev)2662  static int pl011_probe_dt_alias(int index, struct device *dev)
2663  {
2664  	struct device_node *np;
2665  	static bool seen_dev_with_alias = false;
2666  	static bool seen_dev_without_alias = false;
2667  	int ret = index;
2668  
2669  	if (!IS_ENABLED(CONFIG_OF))
2670  		return ret;
2671  
2672  	np = dev->of_node;
2673  	if (!np)
2674  		return ret;
2675  
2676  	ret = of_alias_get_id(np, "serial");
2677  	if (ret < 0) {
2678  		seen_dev_without_alias = true;
2679  		ret = index;
2680  	} else {
2681  		seen_dev_with_alias = true;
2682  		if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2683  			dev_warn(dev, "requested serial port %d  not available.\n", ret);
2684  			ret = index;
2685  		}
2686  	}
2687  
2688  	if (seen_dev_with_alias && seen_dev_without_alias)
2689  		dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2690  
2691  	return ret;
2692  }
2693  
2694  /* unregisters the driver also if no more ports are left */
pl011_unregister_port(struct uart_amba_port * uap)2695  static void pl011_unregister_port(struct uart_amba_port *uap)
2696  {
2697  	int i;
2698  	bool busy = false;
2699  
2700  	for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2701  		if (amba_ports[i] == uap)
2702  			amba_ports[i] = NULL;
2703  		else if (amba_ports[i])
2704  			busy = true;
2705  	}
2706  	pl011_dma_remove(uap);
2707  	if (!busy)
2708  		uart_unregister_driver(&amba_reg);
2709  }
2710  
pl011_find_free_port(void)2711  static int pl011_find_free_port(void)
2712  {
2713  	int i;
2714  
2715  	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2716  		if (amba_ports[i] == NULL)
2717  			return i;
2718  
2719  	return -EBUSY;
2720  }
2721  
pl011_get_rs485_mode(struct uart_amba_port * uap)2722  static int pl011_get_rs485_mode(struct uart_amba_port *uap)
2723  {
2724  	struct uart_port *port = &uap->port;
2725  	int ret;
2726  
2727  	ret = uart_get_rs485_mode(port);
2728  	if (ret)
2729  		return ret;
2730  
2731  	return 0;
2732  }
2733  
pl011_setup_port(struct device * dev,struct uart_amba_port * uap,struct resource * mmiobase,int index)2734  static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2735  			    struct resource *mmiobase, int index)
2736  {
2737  	void __iomem *base;
2738  	int ret;
2739  
2740  	base = devm_ioremap_resource(dev, mmiobase);
2741  	if (IS_ERR(base))
2742  		return PTR_ERR(base);
2743  
2744  	index = pl011_probe_dt_alias(index, dev);
2745  
2746  	uap->port.dev = dev;
2747  	uap->port.mapbase = mmiobase->start;
2748  	uap->port.membase = base;
2749  	uap->port.fifosize = uap->fifosize;
2750  	uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2751  	uap->port.flags = UPF_BOOT_AUTOCONF;
2752  	uap->port.line = index;
2753  
2754  	ret = pl011_get_rs485_mode(uap);
2755  	if (ret)
2756  		return ret;
2757  
2758  	amba_ports[index] = uap;
2759  
2760  	return 0;
2761  }
2762  
pl011_register_port(struct uart_amba_port * uap)2763  static int pl011_register_port(struct uart_amba_port *uap)
2764  {
2765  	int ret, i;
2766  
2767  	/* Ensure interrupts from this UART are masked and cleared */
2768  	pl011_write(0, uap, REG_IMSC);
2769  	pl011_write(0xffff, uap, REG_ICR);
2770  
2771  	if (!amba_reg.state) {
2772  		ret = uart_register_driver(&amba_reg);
2773  		if (ret < 0) {
2774  			dev_err(uap->port.dev,
2775  				"Failed to register AMBA-PL011 driver\n");
2776  			for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2777  				if (amba_ports[i] == uap)
2778  					amba_ports[i] = NULL;
2779  			return ret;
2780  		}
2781  	}
2782  
2783  	ret = uart_add_one_port(&amba_reg, &uap->port);
2784  	if (ret)
2785  		pl011_unregister_port(uap);
2786  
2787  	return ret;
2788  }
2789  
2790  static const struct serial_rs485 pl011_rs485_supported = {
2791  	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
2792  		 SER_RS485_RX_DURING_TX,
2793  	.delay_rts_before_send = 1,
2794  	.delay_rts_after_send = 1,
2795  };
2796  
pl011_probe(struct amba_device * dev,const struct amba_id * id)2797  static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2798  {
2799  	struct uart_amba_port *uap;
2800  	struct vendor_data *vendor = id->data;
2801  	int portnr, ret;
2802  	u32 val;
2803  
2804  	portnr = pl011_find_free_port();
2805  	if (portnr < 0)
2806  		return portnr;
2807  
2808  	uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2809  			   GFP_KERNEL);
2810  	if (!uap)
2811  		return -ENOMEM;
2812  
2813  	uap->clk = devm_clk_get(&dev->dev, NULL);
2814  	if (IS_ERR(uap->clk))
2815  		return PTR_ERR(uap->clk);
2816  
2817  	uap->reg_offset = vendor->reg_offset;
2818  	uap->vendor = vendor;
2819  	uap->fifosize = vendor->get_fifosize(dev);
2820  	uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2821  	uap->port.irq = dev->irq[0];
2822  	uap->port.ops = &amba_pl011_pops;
2823  	uap->port.rs485_config = pl011_rs485_config;
2824  	uap->port.rs485_supported = pl011_rs485_supported;
2825  	snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2826  
2827  	if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) {
2828  		switch (val) {
2829  		case 1:
2830  			uap->port.iotype = UPIO_MEM;
2831  			break;
2832  		case 4:
2833  			uap->port.iotype = UPIO_MEM32;
2834  			break;
2835  		default:
2836  			dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n",
2837  				 val);
2838  			return -EINVAL;
2839  		}
2840  	}
2841  
2842  	ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2843  	if (ret)
2844  		return ret;
2845  
2846  	amba_set_drvdata(dev, uap);
2847  
2848  	return pl011_register_port(uap);
2849  }
2850  
pl011_remove(struct amba_device * dev)2851  static void pl011_remove(struct amba_device *dev)
2852  {
2853  	struct uart_amba_port *uap = amba_get_drvdata(dev);
2854  
2855  	uart_remove_one_port(&amba_reg, &uap->port);
2856  	pl011_unregister_port(uap);
2857  }
2858  
2859  #ifdef CONFIG_PM_SLEEP
pl011_suspend(struct device * dev)2860  static int pl011_suspend(struct device *dev)
2861  {
2862  	struct uart_amba_port *uap = dev_get_drvdata(dev);
2863  
2864  	if (!uap)
2865  		return -EINVAL;
2866  
2867  	return uart_suspend_port(&amba_reg, &uap->port);
2868  }
2869  
pl011_resume(struct device * dev)2870  static int pl011_resume(struct device *dev)
2871  {
2872  	struct uart_amba_port *uap = dev_get_drvdata(dev);
2873  
2874  	if (!uap)
2875  		return -EINVAL;
2876  
2877  	return uart_resume_port(&amba_reg, &uap->port);
2878  }
2879  #endif
2880  
2881  static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2882  
sbsa_uart_probe(struct platform_device * pdev)2883  static int sbsa_uart_probe(struct platform_device *pdev)
2884  {
2885  	struct uart_amba_port *uap;
2886  	struct resource *r;
2887  	int portnr, ret;
2888  	int baudrate;
2889  
2890  	/*
2891  	 * Check the mandatory baud rate parameter in the DT node early
2892  	 * so that we can easily exit with the error.
2893  	 */
2894  	if (pdev->dev.of_node) {
2895  		struct device_node *np = pdev->dev.of_node;
2896  
2897  		ret = of_property_read_u32(np, "current-speed", &baudrate);
2898  		if (ret)
2899  			return ret;
2900  	} else {
2901  		baudrate = 115200;
2902  	}
2903  
2904  	portnr = pl011_find_free_port();
2905  	if (portnr < 0)
2906  		return portnr;
2907  
2908  	uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2909  			   GFP_KERNEL);
2910  	if (!uap)
2911  		return -ENOMEM;
2912  
2913  	ret = platform_get_irq(pdev, 0);
2914  	if (ret < 0)
2915  		return ret;
2916  	uap->port.irq	= ret;
2917  
2918  #ifdef CONFIG_ACPI_SPCR_TABLE
2919  	if (qdf2400_e44_present) {
2920  		dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2921  		uap->vendor = &vendor_qdt_qdf2400_e44;
2922  	} else
2923  #endif
2924  		uap->vendor = &vendor_sbsa;
2925  
2926  	uap->reg_offset	= uap->vendor->reg_offset;
2927  	uap->fifosize	= 32;
2928  	uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2929  	uap->port.ops	= &sbsa_uart_pops;
2930  	uap->fixed_baud = baudrate;
2931  
2932  	snprintf(uap->type, sizeof(uap->type), "SBSA");
2933  
2934  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2935  
2936  	ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2937  	if (ret)
2938  		return ret;
2939  
2940  	platform_set_drvdata(pdev, uap);
2941  
2942  	return pl011_register_port(uap);
2943  }
2944  
sbsa_uart_remove(struct platform_device * pdev)2945  static int sbsa_uart_remove(struct platform_device *pdev)
2946  {
2947  	struct uart_amba_port *uap = platform_get_drvdata(pdev);
2948  
2949  	uart_remove_one_port(&amba_reg, &uap->port);
2950  	pl011_unregister_port(uap);
2951  	return 0;
2952  }
2953  
2954  static const struct of_device_id sbsa_uart_of_match[] = {
2955  	{ .compatible = "arm,sbsa-uart", },
2956  	{},
2957  };
2958  MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2959  
2960  static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
2961  	{ "ARMH0011", 0 },
2962  	{ "ARMHB000", 0 },
2963  	{},
2964  };
2965  MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2966  
2967  static struct platform_driver arm_sbsa_uart_platform_driver = {
2968  	.probe		= sbsa_uart_probe,
2969  	.remove		= sbsa_uart_remove,
2970  	.driver	= {
2971  		.name	= "sbsa-uart",
2972  		.pm	= &pl011_dev_pm_ops,
2973  		.of_match_table = of_match_ptr(sbsa_uart_of_match),
2974  		.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2975  		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2976  	},
2977  };
2978  
2979  static const struct amba_id pl011_ids[] = {
2980  	{
2981  		.id	= 0x00041011,
2982  		.mask	= 0x000fffff,
2983  		.data	= &vendor_arm,
2984  	},
2985  	{
2986  		.id	= 0x00380802,
2987  		.mask	= 0x00ffffff,
2988  		.data	= &vendor_st,
2989  	},
2990  	{ 0, 0 },
2991  };
2992  
2993  MODULE_DEVICE_TABLE(amba, pl011_ids);
2994  
2995  static struct amba_driver pl011_driver = {
2996  	.drv = {
2997  		.name	= "uart-pl011",
2998  		.pm	= &pl011_dev_pm_ops,
2999  		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
3000  	},
3001  	.id_table	= pl011_ids,
3002  	.probe		= pl011_probe,
3003  	.remove		= pl011_remove,
3004  };
3005  
pl011_init(void)3006  static int __init pl011_init(void)
3007  {
3008  	printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
3009  
3010  	if (platform_driver_register(&arm_sbsa_uart_platform_driver))
3011  		pr_warn("could not register SBSA UART platform driver\n");
3012  	return amba_driver_register(&pl011_driver);
3013  }
3014  
pl011_exit(void)3015  static void __exit pl011_exit(void)
3016  {
3017  	platform_driver_unregister(&arm_sbsa_uart_platform_driver);
3018  	amba_driver_unregister(&pl011_driver);
3019  }
3020  
3021  /*
3022   * While this can be a module, if builtin it's most likely the console
3023   * So let's leave module_exit but move module_init to an earlier place
3024   */
3025  arch_initcall(pl011_init);
3026  module_exit(pl011_exit);
3027  
3028  MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
3029  MODULE_DESCRIPTION("ARM AMBA serial port driver");
3030  MODULE_LICENSE("GPL");
3031