xref: /openbmc/linux/drivers/tty/serial/amba-pl011.c (revision 94c7b6fc)
1 /*
2  *  Driver for AMBA serial ports
3  *
4  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5  *
6  *  Copyright 1999 ARM Limited
7  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
8  *  Copyright (C) 2010 ST-Ericsson SA
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * This is a generic driver for ARM AMBA-type serial ports.  They
25  * have a lot of 16550-like features, but are not register compatible.
26  * Note that although they do have CTS, DCD and DSR inputs, they do
27  * not have an RI input, nor do they have DTR or RTS outputs.  If
28  * required, these have to be supplied via some other means (eg, GPIO)
29  * and hooked into this driver.
30  */
31 
32 
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
34 #define SUPPORT_SYSRQ
35 #endif
36 
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
56 #include <linux/of.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
60 #include <linux/io.h>
61 
62 #define UART_NR			14
63 
64 #define SERIAL_AMBA_MAJOR	204
65 #define SERIAL_AMBA_MINOR	64
66 #define SERIAL_AMBA_NR		UART_NR
67 
68 #define AMBA_ISR_PASS_LIMIT	256
69 
70 #define UART_DR_ERROR		(UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
71 #define UART_DUMMY_DR_RX	(1 << 16)
72 
73 /* There is by now at least one vendor with differing details, so handle it */
74 struct vendor_data {
75 	unsigned int		ifls;
76 	unsigned int		lcrh_tx;
77 	unsigned int		lcrh_rx;
78 	bool			oversampling;
79 	bool			dma_threshold;
80 	bool			cts_event_workaround;
81 
82 	unsigned int (*get_fifosize)(struct amba_device *dev);
83 };
84 
85 static unsigned int get_fifosize_arm(struct amba_device *dev)
86 {
87 	return amba_rev(dev) < 3 ? 16 : 32;
88 }
89 
90 static struct vendor_data vendor_arm = {
91 	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
92 	.lcrh_tx		= UART011_LCRH,
93 	.lcrh_rx		= UART011_LCRH,
94 	.oversampling		= false,
95 	.dma_threshold		= false,
96 	.cts_event_workaround	= false,
97 	.get_fifosize		= get_fifosize_arm,
98 };
99 
100 static unsigned int get_fifosize_st(struct amba_device *dev)
101 {
102 	return 64;
103 }
104 
105 static struct vendor_data vendor_st = {
106 	.ifls			= UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
107 	.lcrh_tx		= ST_UART011_LCRH_TX,
108 	.lcrh_rx		= ST_UART011_LCRH_RX,
109 	.oversampling		= true,
110 	.dma_threshold		= true,
111 	.cts_event_workaround	= true,
112 	.get_fifosize		= get_fifosize_st,
113 };
114 
115 /* Deals with DMA transactions */
116 
117 struct pl011_sgbuf {
118 	struct scatterlist sg;
119 	char *buf;
120 };
121 
122 struct pl011_dmarx_data {
123 	struct dma_chan		*chan;
124 	struct completion	complete;
125 	bool			use_buf_b;
126 	struct pl011_sgbuf	sgbuf_a;
127 	struct pl011_sgbuf	sgbuf_b;
128 	dma_cookie_t		cookie;
129 	bool			running;
130 	struct timer_list	timer;
131 	unsigned int last_residue;
132 	unsigned long last_jiffies;
133 	bool auto_poll_rate;
134 	unsigned int poll_rate;
135 	unsigned int poll_timeout;
136 };
137 
138 struct pl011_dmatx_data {
139 	struct dma_chan		*chan;
140 	struct scatterlist	sg;
141 	char			*buf;
142 	bool			queued;
143 };
144 
145 /*
146  * We wrap our port structure around the generic uart_port.
147  */
148 struct uart_amba_port {
149 	struct uart_port	port;
150 	struct clk		*clk;
151 	const struct vendor_data *vendor;
152 	unsigned int		dmacr;		/* dma control reg */
153 	unsigned int		im;		/* interrupt mask */
154 	unsigned int		old_status;
155 	unsigned int		fifosize;	/* vendor-specific */
156 	unsigned int		lcrh_tx;	/* vendor-specific */
157 	unsigned int		lcrh_rx;	/* vendor-specific */
158 	unsigned int		old_cr;		/* state during shutdown */
159 	bool			autorts;
160 	char			type[12];
161 #ifdef CONFIG_DMA_ENGINE
162 	/* DMA stuff */
163 	bool			using_tx_dma;
164 	bool			using_rx_dma;
165 	struct pl011_dmarx_data dmarx;
166 	struct pl011_dmatx_data	dmatx;
167 #endif
168 };
169 
170 /*
171  * Reads up to 256 characters from the FIFO or until it's empty and
172  * inserts them into the TTY layer. Returns the number of characters
173  * read from the FIFO.
174  */
175 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
176 {
177 	u16 status, ch;
178 	unsigned int flag, max_count = 256;
179 	int fifotaken = 0;
180 
181 	while (max_count--) {
182 		status = readw(uap->port.membase + UART01x_FR);
183 		if (status & UART01x_FR_RXFE)
184 			break;
185 
186 		/* Take chars from the FIFO and update status */
187 		ch = readw(uap->port.membase + UART01x_DR) |
188 			UART_DUMMY_DR_RX;
189 		flag = TTY_NORMAL;
190 		uap->port.icount.rx++;
191 		fifotaken++;
192 
193 		if (unlikely(ch & UART_DR_ERROR)) {
194 			if (ch & UART011_DR_BE) {
195 				ch &= ~(UART011_DR_FE | UART011_DR_PE);
196 				uap->port.icount.brk++;
197 				if (uart_handle_break(&uap->port))
198 					continue;
199 			} else if (ch & UART011_DR_PE)
200 				uap->port.icount.parity++;
201 			else if (ch & UART011_DR_FE)
202 				uap->port.icount.frame++;
203 			if (ch & UART011_DR_OE)
204 				uap->port.icount.overrun++;
205 
206 			ch &= uap->port.read_status_mask;
207 
208 			if (ch & UART011_DR_BE)
209 				flag = TTY_BREAK;
210 			else if (ch & UART011_DR_PE)
211 				flag = TTY_PARITY;
212 			else if (ch & UART011_DR_FE)
213 				flag = TTY_FRAME;
214 		}
215 
216 		if (uart_handle_sysrq_char(&uap->port, ch & 255))
217 			continue;
218 
219 		uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
220 	}
221 
222 	return fifotaken;
223 }
224 
225 
226 /*
227  * All the DMA operation mode stuff goes inside this ifdef.
228  * This assumes that you have a generic DMA device interface,
229  * no custom DMA interfaces are supported.
230  */
231 #ifdef CONFIG_DMA_ENGINE
232 
233 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
234 
235 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
236 	enum dma_data_direction dir)
237 {
238 	dma_addr_t dma_addr;
239 
240 	sg->buf = dma_alloc_coherent(chan->device->dev,
241 		PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
242 	if (!sg->buf)
243 		return -ENOMEM;
244 
245 	sg_init_table(&sg->sg, 1);
246 	sg_set_page(&sg->sg, phys_to_page(dma_addr),
247 		PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
248 	sg_dma_address(&sg->sg) = dma_addr;
249 
250 	return 0;
251 }
252 
253 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
254 	enum dma_data_direction dir)
255 {
256 	if (sg->buf) {
257 		dma_free_coherent(chan->device->dev,
258 			PL011_DMA_BUFFER_SIZE, sg->buf,
259 			sg_dma_address(&sg->sg));
260 	}
261 }
262 
263 static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
264 {
265 	/* DMA is the sole user of the platform data right now */
266 	struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
267 	struct dma_slave_config tx_conf = {
268 		.dst_addr = uap->port.mapbase + UART01x_DR,
269 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
270 		.direction = DMA_MEM_TO_DEV,
271 		.dst_maxburst = uap->fifosize >> 1,
272 		.device_fc = false,
273 	};
274 	struct dma_chan *chan;
275 	dma_cap_mask_t mask;
276 
277 	chan = dma_request_slave_channel(dev, "tx");
278 
279 	if (!chan) {
280 		/* We need platform data */
281 		if (!plat || !plat->dma_filter) {
282 			dev_info(uap->port.dev, "no DMA platform data\n");
283 			return;
284 		}
285 
286 		/* Try to acquire a generic DMA engine slave TX channel */
287 		dma_cap_zero(mask);
288 		dma_cap_set(DMA_SLAVE, mask);
289 
290 		chan = dma_request_channel(mask, plat->dma_filter,
291 						plat->dma_tx_param);
292 		if (!chan) {
293 			dev_err(uap->port.dev, "no TX DMA channel!\n");
294 			return;
295 		}
296 	}
297 
298 	dmaengine_slave_config(chan, &tx_conf);
299 	uap->dmatx.chan = chan;
300 
301 	dev_info(uap->port.dev, "DMA channel TX %s\n",
302 		 dma_chan_name(uap->dmatx.chan));
303 
304 	/* Optionally make use of an RX channel as well */
305 	chan = dma_request_slave_channel(dev, "rx");
306 
307 	if (!chan && plat->dma_rx_param) {
308 		chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
309 
310 		if (!chan) {
311 			dev_err(uap->port.dev, "no RX DMA channel!\n");
312 			return;
313 		}
314 	}
315 
316 	if (chan) {
317 		struct dma_slave_config rx_conf = {
318 			.src_addr = uap->port.mapbase + UART01x_DR,
319 			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
320 			.direction = DMA_DEV_TO_MEM,
321 			.src_maxburst = uap->fifosize >> 2,
322 			.device_fc = false,
323 		};
324 
325 		dmaengine_slave_config(chan, &rx_conf);
326 		uap->dmarx.chan = chan;
327 
328 		if (plat && plat->dma_rx_poll_enable) {
329 			/* Set poll rate if specified. */
330 			if (plat->dma_rx_poll_rate) {
331 				uap->dmarx.auto_poll_rate = false;
332 				uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
333 			} else {
334 				/*
335 				 * 100 ms defaults to poll rate if not
336 				 * specified. This will be adjusted with
337 				 * the baud rate at set_termios.
338 				 */
339 				uap->dmarx.auto_poll_rate = true;
340 				uap->dmarx.poll_rate =  100;
341 			}
342 			/* 3 secs defaults poll_timeout if not specified. */
343 			if (plat->dma_rx_poll_timeout)
344 				uap->dmarx.poll_timeout =
345 					plat->dma_rx_poll_timeout;
346 			else
347 				uap->dmarx.poll_timeout = 3000;
348 		} else
349 			uap->dmarx.auto_poll_rate = false;
350 
351 		dev_info(uap->port.dev, "DMA channel RX %s\n",
352 			 dma_chan_name(uap->dmarx.chan));
353 	}
354 }
355 
356 #ifndef MODULE
357 /*
358  * Stack up the UARTs and let the above initcall be done at device
359  * initcall time, because the serial driver is called as an arch
360  * initcall, and at this time the DMA subsystem is not yet registered.
361  * At this point the driver will switch over to using DMA where desired.
362  */
363 struct dma_uap {
364 	struct list_head node;
365 	struct uart_amba_port *uap;
366 	struct device *dev;
367 };
368 
369 static LIST_HEAD(pl011_dma_uarts);
370 
371 static int __init pl011_dma_initcall(void)
372 {
373 	struct list_head *node, *tmp;
374 
375 	list_for_each_safe(node, tmp, &pl011_dma_uarts) {
376 		struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
377 		pl011_dma_probe_initcall(dmau->dev, dmau->uap);
378 		list_del(node);
379 		kfree(dmau);
380 	}
381 	return 0;
382 }
383 
384 device_initcall(pl011_dma_initcall);
385 
386 static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
387 {
388 	struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
389 	if (dmau) {
390 		dmau->uap = uap;
391 		dmau->dev = dev;
392 		list_add_tail(&dmau->node, &pl011_dma_uarts);
393 	}
394 }
395 #else
396 static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
397 {
398 	pl011_dma_probe_initcall(dev, uap);
399 }
400 #endif
401 
402 static void pl011_dma_remove(struct uart_amba_port *uap)
403 {
404 	/* TODO: remove the initcall if it has not yet executed */
405 	if (uap->dmatx.chan)
406 		dma_release_channel(uap->dmatx.chan);
407 	if (uap->dmarx.chan)
408 		dma_release_channel(uap->dmarx.chan);
409 }
410 
411 /* Forward declare this for the refill routine */
412 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
413 
414 /*
415  * The current DMA TX buffer has been sent.
416  * Try to queue up another DMA buffer.
417  */
418 static void pl011_dma_tx_callback(void *data)
419 {
420 	struct uart_amba_port *uap = data;
421 	struct pl011_dmatx_data *dmatx = &uap->dmatx;
422 	unsigned long flags;
423 	u16 dmacr;
424 
425 	spin_lock_irqsave(&uap->port.lock, flags);
426 	if (uap->dmatx.queued)
427 		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
428 			     DMA_TO_DEVICE);
429 
430 	dmacr = uap->dmacr;
431 	uap->dmacr = dmacr & ~UART011_TXDMAE;
432 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
433 
434 	/*
435 	 * If TX DMA was disabled, it means that we've stopped the DMA for
436 	 * some reason (eg, XOFF received, or we want to send an X-char.)
437 	 *
438 	 * Note: we need to be careful here of a potential race between DMA
439 	 * and the rest of the driver - if the driver disables TX DMA while
440 	 * a TX buffer completing, we must update the tx queued status to
441 	 * get further refills (hence we check dmacr).
442 	 */
443 	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
444 	    uart_circ_empty(&uap->port.state->xmit)) {
445 		uap->dmatx.queued = false;
446 		spin_unlock_irqrestore(&uap->port.lock, flags);
447 		return;
448 	}
449 
450 	if (pl011_dma_tx_refill(uap) <= 0) {
451 		/*
452 		 * We didn't queue a DMA buffer for some reason, but we
453 		 * have data pending to be sent.  Re-enable the TX IRQ.
454 		 */
455 		uap->im |= UART011_TXIM;
456 		writew(uap->im, uap->port.membase + UART011_IMSC);
457 	}
458 	spin_unlock_irqrestore(&uap->port.lock, flags);
459 }
460 
461 /*
462  * Try to refill the TX DMA buffer.
463  * Locking: called with port lock held and IRQs disabled.
464  * Returns:
465  *   1 if we queued up a TX DMA buffer.
466  *   0 if we didn't want to handle this by DMA
467  *  <0 on error
468  */
469 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
470 {
471 	struct pl011_dmatx_data *dmatx = &uap->dmatx;
472 	struct dma_chan *chan = dmatx->chan;
473 	struct dma_device *dma_dev = chan->device;
474 	struct dma_async_tx_descriptor *desc;
475 	struct circ_buf *xmit = &uap->port.state->xmit;
476 	unsigned int count;
477 
478 	/*
479 	 * Try to avoid the overhead involved in using DMA if the
480 	 * transaction fits in the first half of the FIFO, by using
481 	 * the standard interrupt handling.  This ensures that we
482 	 * issue a uart_write_wakeup() at the appropriate time.
483 	 */
484 	count = uart_circ_chars_pending(xmit);
485 	if (count < (uap->fifosize >> 1)) {
486 		uap->dmatx.queued = false;
487 		return 0;
488 	}
489 
490 	/*
491 	 * Bodge: don't send the last character by DMA, as this
492 	 * will prevent XON from notifying us to restart DMA.
493 	 */
494 	count -= 1;
495 
496 	/* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
497 	if (count > PL011_DMA_BUFFER_SIZE)
498 		count = PL011_DMA_BUFFER_SIZE;
499 
500 	if (xmit->tail < xmit->head)
501 		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
502 	else {
503 		size_t first = UART_XMIT_SIZE - xmit->tail;
504 		size_t second = xmit->head;
505 
506 		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
507 		if (second)
508 			memcpy(&dmatx->buf[first], &xmit->buf[0], second);
509 	}
510 
511 	dmatx->sg.length = count;
512 
513 	if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
514 		uap->dmatx.queued = false;
515 		dev_dbg(uap->port.dev, "unable to map TX DMA\n");
516 		return -EBUSY;
517 	}
518 
519 	desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
520 					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
521 	if (!desc) {
522 		dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
523 		uap->dmatx.queued = false;
524 		/*
525 		 * If DMA cannot be used right now, we complete this
526 		 * transaction via IRQ and let the TTY layer retry.
527 		 */
528 		dev_dbg(uap->port.dev, "TX DMA busy\n");
529 		return -EBUSY;
530 	}
531 
532 	/* Some data to go along to the callback */
533 	desc->callback = pl011_dma_tx_callback;
534 	desc->callback_param = uap;
535 
536 	/* All errors should happen at prepare time */
537 	dmaengine_submit(desc);
538 
539 	/* Fire the DMA transaction */
540 	dma_dev->device_issue_pending(chan);
541 
542 	uap->dmacr |= UART011_TXDMAE;
543 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
544 	uap->dmatx.queued = true;
545 
546 	/*
547 	 * Now we know that DMA will fire, so advance the ring buffer
548 	 * with the stuff we just dispatched.
549 	 */
550 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
551 	uap->port.icount.tx += count;
552 
553 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
554 		uart_write_wakeup(&uap->port);
555 
556 	return 1;
557 }
558 
559 /*
560  * We received a transmit interrupt without a pending X-char but with
561  * pending characters.
562  * Locking: called with port lock held and IRQs disabled.
563  * Returns:
564  *   false if we want to use PIO to transmit
565  *   true if we queued a DMA buffer
566  */
567 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
568 {
569 	if (!uap->using_tx_dma)
570 		return false;
571 
572 	/*
573 	 * If we already have a TX buffer queued, but received a
574 	 * TX interrupt, it will be because we've just sent an X-char.
575 	 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
576 	 */
577 	if (uap->dmatx.queued) {
578 		uap->dmacr |= UART011_TXDMAE;
579 		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
580 		uap->im &= ~UART011_TXIM;
581 		writew(uap->im, uap->port.membase + UART011_IMSC);
582 		return true;
583 	}
584 
585 	/*
586 	 * We don't have a TX buffer queued, so try to queue one.
587 	 * If we successfully queued a buffer, mask the TX IRQ.
588 	 */
589 	if (pl011_dma_tx_refill(uap) > 0) {
590 		uap->im &= ~UART011_TXIM;
591 		writew(uap->im, uap->port.membase + UART011_IMSC);
592 		return true;
593 	}
594 	return false;
595 }
596 
597 /*
598  * Stop the DMA transmit (eg, due to received XOFF).
599  * Locking: called with port lock held and IRQs disabled.
600  */
601 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
602 {
603 	if (uap->dmatx.queued) {
604 		uap->dmacr &= ~UART011_TXDMAE;
605 		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
606 	}
607 }
608 
609 /*
610  * Try to start a DMA transmit, or in the case of an XON/OFF
611  * character queued for send, try to get that character out ASAP.
612  * Locking: called with port lock held and IRQs disabled.
613  * Returns:
614  *   false if we want the TX IRQ to be enabled
615  *   true if we have a buffer queued
616  */
617 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
618 {
619 	u16 dmacr;
620 
621 	if (!uap->using_tx_dma)
622 		return false;
623 
624 	if (!uap->port.x_char) {
625 		/* no X-char, try to push chars out in DMA mode */
626 		bool ret = true;
627 
628 		if (!uap->dmatx.queued) {
629 			if (pl011_dma_tx_refill(uap) > 0) {
630 				uap->im &= ~UART011_TXIM;
631 				ret = true;
632 			} else {
633 				uap->im |= UART011_TXIM;
634 				ret = false;
635 			}
636 			writew(uap->im, uap->port.membase + UART011_IMSC);
637 		} else if (!(uap->dmacr & UART011_TXDMAE)) {
638 			uap->dmacr |= UART011_TXDMAE;
639 			writew(uap->dmacr,
640 				       uap->port.membase + UART011_DMACR);
641 		}
642 		return ret;
643 	}
644 
645 	/*
646 	 * We have an X-char to send.  Disable DMA to prevent it loading
647 	 * the TX fifo, and then see if we can stuff it into the FIFO.
648 	 */
649 	dmacr = uap->dmacr;
650 	uap->dmacr &= ~UART011_TXDMAE;
651 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
652 
653 	if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
654 		/*
655 		 * No space in the FIFO, so enable the transmit interrupt
656 		 * so we know when there is space.  Note that once we've
657 		 * loaded the character, we should just re-enable DMA.
658 		 */
659 		return false;
660 	}
661 
662 	writew(uap->port.x_char, uap->port.membase + UART01x_DR);
663 	uap->port.icount.tx++;
664 	uap->port.x_char = 0;
665 
666 	/* Success - restore the DMA state */
667 	uap->dmacr = dmacr;
668 	writew(dmacr, uap->port.membase + UART011_DMACR);
669 
670 	return true;
671 }
672 
673 /*
674  * Flush the transmit buffer.
675  * Locking: called with port lock held and IRQs disabled.
676  */
677 static void pl011_dma_flush_buffer(struct uart_port *port)
678 __releases(&uap->port.lock)
679 __acquires(&uap->port.lock)
680 {
681 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
682 
683 	if (!uap->using_tx_dma)
684 		return;
685 
686 	/* Avoid deadlock with the DMA engine callback */
687 	spin_unlock(&uap->port.lock);
688 	dmaengine_terminate_all(uap->dmatx.chan);
689 	spin_lock(&uap->port.lock);
690 	if (uap->dmatx.queued) {
691 		dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
692 			     DMA_TO_DEVICE);
693 		uap->dmatx.queued = false;
694 		uap->dmacr &= ~UART011_TXDMAE;
695 		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
696 	}
697 }
698 
699 static void pl011_dma_rx_callback(void *data);
700 
701 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
702 {
703 	struct dma_chan *rxchan = uap->dmarx.chan;
704 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
705 	struct dma_async_tx_descriptor *desc;
706 	struct pl011_sgbuf *sgbuf;
707 
708 	if (!rxchan)
709 		return -EIO;
710 
711 	/* Start the RX DMA job */
712 	sgbuf = uap->dmarx.use_buf_b ?
713 		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
714 	desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
715 					DMA_DEV_TO_MEM,
716 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
717 	/*
718 	 * If the DMA engine is busy and cannot prepare a
719 	 * channel, no big deal, the driver will fall back
720 	 * to interrupt mode as a result of this error code.
721 	 */
722 	if (!desc) {
723 		uap->dmarx.running = false;
724 		dmaengine_terminate_all(rxchan);
725 		return -EBUSY;
726 	}
727 
728 	/* Some data to go along to the callback */
729 	desc->callback = pl011_dma_rx_callback;
730 	desc->callback_param = uap;
731 	dmarx->cookie = dmaengine_submit(desc);
732 	dma_async_issue_pending(rxchan);
733 
734 	uap->dmacr |= UART011_RXDMAE;
735 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
736 	uap->dmarx.running = true;
737 
738 	uap->im &= ~UART011_RXIM;
739 	writew(uap->im, uap->port.membase + UART011_IMSC);
740 
741 	return 0;
742 }
743 
744 /*
745  * This is called when either the DMA job is complete, or
746  * the FIFO timeout interrupt occurred. This must be called
747  * with the port spinlock uap->port.lock held.
748  */
749 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
750 			       u32 pending, bool use_buf_b,
751 			       bool readfifo)
752 {
753 	struct tty_port *port = &uap->port.state->port;
754 	struct pl011_sgbuf *sgbuf = use_buf_b ?
755 		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
756 	int dma_count = 0;
757 	u32 fifotaken = 0; /* only used for vdbg() */
758 
759 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
760 	int dmataken = 0;
761 
762 	if (uap->dmarx.poll_rate) {
763 		/* The data can be taken by polling */
764 		dmataken = sgbuf->sg.length - dmarx->last_residue;
765 		/* Recalculate the pending size */
766 		if (pending >= dmataken)
767 			pending -= dmataken;
768 	}
769 
770 	/* Pick the remain data from the DMA */
771 	if (pending) {
772 
773 		/*
774 		 * First take all chars in the DMA pipe, then look in the FIFO.
775 		 * Note that tty_insert_flip_buf() tries to take as many chars
776 		 * as it can.
777 		 */
778 		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
779 				pending);
780 
781 		uap->port.icount.rx += dma_count;
782 		if (dma_count < pending)
783 			dev_warn(uap->port.dev,
784 				 "couldn't insert all characters (TTY is full?)\n");
785 	}
786 
787 	/* Reset the last_residue for Rx DMA poll */
788 	if (uap->dmarx.poll_rate)
789 		dmarx->last_residue = sgbuf->sg.length;
790 
791 	/*
792 	 * Only continue with trying to read the FIFO if all DMA chars have
793 	 * been taken first.
794 	 */
795 	if (dma_count == pending && readfifo) {
796 		/* Clear any error flags */
797 		writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
798 		       uap->port.membase + UART011_ICR);
799 
800 		/*
801 		 * If we read all the DMA'd characters, and we had an
802 		 * incomplete buffer, that could be due to an rx error, or
803 		 * maybe we just timed out. Read any pending chars and check
804 		 * the error status.
805 		 *
806 		 * Error conditions will only occur in the FIFO, these will
807 		 * trigger an immediate interrupt and stop the DMA job, so we
808 		 * will always find the error in the FIFO, never in the DMA
809 		 * buffer.
810 		 */
811 		fifotaken = pl011_fifo_to_tty(uap);
812 	}
813 
814 	spin_unlock(&uap->port.lock);
815 	dev_vdbg(uap->port.dev,
816 		 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
817 		 dma_count, fifotaken);
818 	tty_flip_buffer_push(port);
819 	spin_lock(&uap->port.lock);
820 }
821 
822 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
823 {
824 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
825 	struct dma_chan *rxchan = dmarx->chan;
826 	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
827 		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
828 	size_t pending;
829 	struct dma_tx_state state;
830 	enum dma_status dmastat;
831 
832 	/*
833 	 * Pause the transfer so we can trust the current counter,
834 	 * do this before we pause the PL011 block, else we may
835 	 * overflow the FIFO.
836 	 */
837 	if (dmaengine_pause(rxchan))
838 		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
839 	dmastat = rxchan->device->device_tx_status(rxchan,
840 						   dmarx->cookie, &state);
841 	if (dmastat != DMA_PAUSED)
842 		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
843 
844 	/* Disable RX DMA - incoming data will wait in the FIFO */
845 	uap->dmacr &= ~UART011_RXDMAE;
846 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
847 	uap->dmarx.running = false;
848 
849 	pending = sgbuf->sg.length - state.residue;
850 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
851 	/* Then we terminate the transfer - we now know our residue */
852 	dmaengine_terminate_all(rxchan);
853 
854 	/*
855 	 * This will take the chars we have so far and insert
856 	 * into the framework.
857 	 */
858 	pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
859 
860 	/* Switch buffer & re-trigger DMA job */
861 	dmarx->use_buf_b = !dmarx->use_buf_b;
862 	if (pl011_dma_rx_trigger_dma(uap)) {
863 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
864 			"fall back to interrupt mode\n");
865 		uap->im |= UART011_RXIM;
866 		writew(uap->im, uap->port.membase + UART011_IMSC);
867 	}
868 }
869 
870 static void pl011_dma_rx_callback(void *data)
871 {
872 	struct uart_amba_port *uap = data;
873 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
874 	struct dma_chan *rxchan = dmarx->chan;
875 	bool lastbuf = dmarx->use_buf_b;
876 	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
877 		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
878 	size_t pending;
879 	struct dma_tx_state state;
880 	int ret;
881 
882 	/*
883 	 * This completion interrupt occurs typically when the
884 	 * RX buffer is totally stuffed but no timeout has yet
885 	 * occurred. When that happens, we just want the RX
886 	 * routine to flush out the secondary DMA buffer while
887 	 * we immediately trigger the next DMA job.
888 	 */
889 	spin_lock_irq(&uap->port.lock);
890 	/*
891 	 * Rx data can be taken by the UART interrupts during
892 	 * the DMA irq handler. So we check the residue here.
893 	 */
894 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
895 	pending = sgbuf->sg.length - state.residue;
896 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
897 	/* Then we terminate the transfer - we now know our residue */
898 	dmaengine_terminate_all(rxchan);
899 
900 	uap->dmarx.running = false;
901 	dmarx->use_buf_b = !lastbuf;
902 	ret = pl011_dma_rx_trigger_dma(uap);
903 
904 	pl011_dma_rx_chars(uap, pending, lastbuf, false);
905 	spin_unlock_irq(&uap->port.lock);
906 	/*
907 	 * Do this check after we picked the DMA chars so we don't
908 	 * get some IRQ immediately from RX.
909 	 */
910 	if (ret) {
911 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
912 			"fall back to interrupt mode\n");
913 		uap->im |= UART011_RXIM;
914 		writew(uap->im, uap->port.membase + UART011_IMSC);
915 	}
916 }
917 
918 /*
919  * Stop accepting received characters, when we're shutting down or
920  * suspending this port.
921  * Locking: called with port lock held and IRQs disabled.
922  */
923 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
924 {
925 	/* FIXME.  Just disable the DMA enable */
926 	uap->dmacr &= ~UART011_RXDMAE;
927 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
928 }
929 
930 /*
931  * Timer handler for Rx DMA polling.
932  * Every polling, It checks the residue in the dma buffer and transfer
933  * data to the tty. Also, last_residue is updated for the next polling.
934  */
935 static void pl011_dma_rx_poll(unsigned long args)
936 {
937 	struct uart_amba_port *uap = (struct uart_amba_port *)args;
938 	struct tty_port *port = &uap->port.state->port;
939 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
940 	struct dma_chan *rxchan = uap->dmarx.chan;
941 	unsigned long flags = 0;
942 	unsigned int dmataken = 0;
943 	unsigned int size = 0;
944 	struct pl011_sgbuf *sgbuf;
945 	int dma_count;
946 	struct dma_tx_state state;
947 
948 	sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
949 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
950 	if (likely(state.residue < dmarx->last_residue)) {
951 		dmataken = sgbuf->sg.length - dmarx->last_residue;
952 		size = dmarx->last_residue - state.residue;
953 		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
954 				size);
955 		if (dma_count == size)
956 			dmarx->last_residue =  state.residue;
957 		dmarx->last_jiffies = jiffies;
958 	}
959 	tty_flip_buffer_push(port);
960 
961 	/*
962 	 * If no data is received in poll_timeout, the driver will fall back
963 	 * to interrupt mode. We will retrigger DMA at the first interrupt.
964 	 */
965 	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
966 			> uap->dmarx.poll_timeout) {
967 
968 		spin_lock_irqsave(&uap->port.lock, flags);
969 		pl011_dma_rx_stop(uap);
970 		uap->im |= UART011_RXIM;
971 		writew(uap->im, uap->port.membase + UART011_IMSC);
972 		spin_unlock_irqrestore(&uap->port.lock, flags);
973 
974 		uap->dmarx.running = false;
975 		dmaengine_terminate_all(rxchan);
976 		del_timer(&uap->dmarx.timer);
977 	} else {
978 		mod_timer(&uap->dmarx.timer,
979 			jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
980 	}
981 }
982 
983 static void pl011_dma_startup(struct uart_amba_port *uap)
984 {
985 	int ret;
986 
987 	if (!uap->dmatx.chan)
988 		return;
989 
990 	uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
991 	if (!uap->dmatx.buf) {
992 		dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
993 		uap->port.fifosize = uap->fifosize;
994 		return;
995 	}
996 
997 	sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
998 
999 	/* The DMA buffer is now the FIFO the TTY subsystem can use */
1000 	uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1001 	uap->using_tx_dma = true;
1002 
1003 	if (!uap->dmarx.chan)
1004 		goto skip_rx;
1005 
1006 	/* Allocate and map DMA RX buffers */
1007 	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1008 			       DMA_FROM_DEVICE);
1009 	if (ret) {
1010 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1011 			"RX buffer A", ret);
1012 		goto skip_rx;
1013 	}
1014 
1015 	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1016 			       DMA_FROM_DEVICE);
1017 	if (ret) {
1018 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1019 			"RX buffer B", ret);
1020 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1021 				 DMA_FROM_DEVICE);
1022 		goto skip_rx;
1023 	}
1024 
1025 	uap->using_rx_dma = true;
1026 
1027 skip_rx:
1028 	/* Turn on DMA error (RX/TX will be enabled on demand) */
1029 	uap->dmacr |= UART011_DMAONERR;
1030 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1031 
1032 	/*
1033 	 * ST Micro variants has some specific dma burst threshold
1034 	 * compensation. Set this to 16 bytes, so burst will only
1035 	 * be issued above/below 16 bytes.
1036 	 */
1037 	if (uap->vendor->dma_threshold)
1038 		writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1039 			       uap->port.membase + ST_UART011_DMAWM);
1040 
1041 	if (uap->using_rx_dma) {
1042 		if (pl011_dma_rx_trigger_dma(uap))
1043 			dev_dbg(uap->port.dev, "could not trigger initial "
1044 				"RX DMA job, fall back to interrupt mode\n");
1045 		if (uap->dmarx.poll_rate) {
1046 			init_timer(&(uap->dmarx.timer));
1047 			uap->dmarx.timer.function = pl011_dma_rx_poll;
1048 			uap->dmarx.timer.data = (unsigned long)uap;
1049 			mod_timer(&uap->dmarx.timer,
1050 				jiffies +
1051 				msecs_to_jiffies(uap->dmarx.poll_rate));
1052 			uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1053 			uap->dmarx.last_jiffies = jiffies;
1054 		}
1055 	}
1056 }
1057 
1058 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1059 {
1060 	if (!(uap->using_tx_dma || uap->using_rx_dma))
1061 		return;
1062 
1063 	/* Disable RX and TX DMA */
1064 	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1065 		barrier();
1066 
1067 	spin_lock_irq(&uap->port.lock);
1068 	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1069 	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1070 	spin_unlock_irq(&uap->port.lock);
1071 
1072 	if (uap->using_tx_dma) {
1073 		/* In theory, this should already be done by pl011_dma_flush_buffer */
1074 		dmaengine_terminate_all(uap->dmatx.chan);
1075 		if (uap->dmatx.queued) {
1076 			dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1077 				     DMA_TO_DEVICE);
1078 			uap->dmatx.queued = false;
1079 		}
1080 
1081 		kfree(uap->dmatx.buf);
1082 		uap->using_tx_dma = false;
1083 	}
1084 
1085 	if (uap->using_rx_dma) {
1086 		dmaengine_terminate_all(uap->dmarx.chan);
1087 		/* Clean up the RX DMA */
1088 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1089 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1090 		if (uap->dmarx.poll_rate)
1091 			del_timer_sync(&uap->dmarx.timer);
1092 		uap->using_rx_dma = false;
1093 	}
1094 }
1095 
1096 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1097 {
1098 	return uap->using_rx_dma;
1099 }
1100 
1101 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1102 {
1103 	return uap->using_rx_dma && uap->dmarx.running;
1104 }
1105 
1106 #else
1107 /* Blank functions if the DMA engine is not available */
1108 static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
1109 {
1110 }
1111 
1112 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1113 {
1114 }
1115 
1116 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1117 {
1118 }
1119 
1120 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1121 {
1122 }
1123 
1124 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1125 {
1126 	return false;
1127 }
1128 
1129 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1130 {
1131 }
1132 
1133 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1134 {
1135 	return false;
1136 }
1137 
1138 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1139 {
1140 }
1141 
1142 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1143 {
1144 }
1145 
1146 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1147 {
1148 	return -EIO;
1149 }
1150 
1151 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1152 {
1153 	return false;
1154 }
1155 
1156 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1157 {
1158 	return false;
1159 }
1160 
1161 #define pl011_dma_flush_buffer	NULL
1162 #endif
1163 
1164 static void pl011_stop_tx(struct uart_port *port)
1165 {
1166 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1167 
1168 	uap->im &= ~UART011_TXIM;
1169 	writew(uap->im, uap->port.membase + UART011_IMSC);
1170 	pl011_dma_tx_stop(uap);
1171 }
1172 
1173 static void pl011_start_tx(struct uart_port *port)
1174 {
1175 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1176 
1177 	if (!pl011_dma_tx_start(uap)) {
1178 		uap->im |= UART011_TXIM;
1179 		writew(uap->im, uap->port.membase + UART011_IMSC);
1180 	}
1181 }
1182 
1183 static void pl011_stop_rx(struct uart_port *port)
1184 {
1185 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1186 
1187 	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1188 		     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1189 	writew(uap->im, uap->port.membase + UART011_IMSC);
1190 
1191 	pl011_dma_rx_stop(uap);
1192 }
1193 
1194 static void pl011_enable_ms(struct uart_port *port)
1195 {
1196 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1197 
1198 	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1199 	writew(uap->im, uap->port.membase + UART011_IMSC);
1200 }
1201 
1202 static void pl011_rx_chars(struct uart_amba_port *uap)
1203 __releases(&uap->port.lock)
1204 __acquires(&uap->port.lock)
1205 {
1206 	pl011_fifo_to_tty(uap);
1207 
1208 	spin_unlock(&uap->port.lock);
1209 	tty_flip_buffer_push(&uap->port.state->port);
1210 	/*
1211 	 * If we were temporarily out of DMA mode for a while,
1212 	 * attempt to switch back to DMA mode again.
1213 	 */
1214 	if (pl011_dma_rx_available(uap)) {
1215 		if (pl011_dma_rx_trigger_dma(uap)) {
1216 			dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1217 				"fall back to interrupt mode again\n");
1218 			uap->im |= UART011_RXIM;
1219 			writew(uap->im, uap->port.membase + UART011_IMSC);
1220 		} else {
1221 #ifdef CONFIG_DMA_ENGINE
1222 			/* Start Rx DMA poll */
1223 			if (uap->dmarx.poll_rate) {
1224 				uap->dmarx.last_jiffies = jiffies;
1225 				uap->dmarx.last_residue	= PL011_DMA_BUFFER_SIZE;
1226 				mod_timer(&uap->dmarx.timer,
1227 					jiffies +
1228 					msecs_to_jiffies(uap->dmarx.poll_rate));
1229 			}
1230 #endif
1231 		}
1232 	}
1233 	spin_lock(&uap->port.lock);
1234 }
1235 
1236 static void pl011_tx_chars(struct uart_amba_port *uap)
1237 {
1238 	struct circ_buf *xmit = &uap->port.state->xmit;
1239 	int count;
1240 
1241 	if (uap->port.x_char) {
1242 		writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1243 		uap->port.icount.tx++;
1244 		uap->port.x_char = 0;
1245 		return;
1246 	}
1247 	if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1248 		pl011_stop_tx(&uap->port);
1249 		return;
1250 	}
1251 
1252 	/* If we are using DMA mode, try to send some characters. */
1253 	if (pl011_dma_tx_irq(uap))
1254 		return;
1255 
1256 	count = uap->fifosize >> 1;
1257 	do {
1258 		writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1259 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1260 		uap->port.icount.tx++;
1261 		if (uart_circ_empty(xmit))
1262 			break;
1263 	} while (--count > 0);
1264 
1265 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1266 		uart_write_wakeup(&uap->port);
1267 
1268 	if (uart_circ_empty(xmit))
1269 		pl011_stop_tx(&uap->port);
1270 }
1271 
1272 static void pl011_modem_status(struct uart_amba_port *uap)
1273 {
1274 	unsigned int status, delta;
1275 
1276 	status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1277 
1278 	delta = status ^ uap->old_status;
1279 	uap->old_status = status;
1280 
1281 	if (!delta)
1282 		return;
1283 
1284 	if (delta & UART01x_FR_DCD)
1285 		uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1286 
1287 	if (delta & UART01x_FR_DSR)
1288 		uap->port.icount.dsr++;
1289 
1290 	if (delta & UART01x_FR_CTS)
1291 		uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1292 
1293 	wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1294 }
1295 
1296 static irqreturn_t pl011_int(int irq, void *dev_id)
1297 {
1298 	struct uart_amba_port *uap = dev_id;
1299 	unsigned long flags;
1300 	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1301 	int handled = 0;
1302 	unsigned int dummy_read;
1303 
1304 	spin_lock_irqsave(&uap->port.lock, flags);
1305 	status = readw(uap->port.membase + UART011_MIS);
1306 	if (status) {
1307 		do {
1308 			if (uap->vendor->cts_event_workaround) {
1309 				/* workaround to make sure that all bits are unlocked.. */
1310 				writew(0x00, uap->port.membase + UART011_ICR);
1311 
1312 				/*
1313 				 * WA: introduce 26ns(1 uart clk) delay before W1C;
1314 				 * single apb access will incur 2 pclk(133.12Mhz) delay,
1315 				 * so add 2 dummy reads
1316 				 */
1317 				dummy_read = readw(uap->port.membase + UART011_ICR);
1318 				dummy_read = readw(uap->port.membase + UART011_ICR);
1319 			}
1320 
1321 			writew(status & ~(UART011_TXIS|UART011_RTIS|
1322 					  UART011_RXIS),
1323 			       uap->port.membase + UART011_ICR);
1324 
1325 			if (status & (UART011_RTIS|UART011_RXIS)) {
1326 				if (pl011_dma_rx_running(uap))
1327 					pl011_dma_rx_irq(uap);
1328 				else
1329 					pl011_rx_chars(uap);
1330 			}
1331 			if (status & (UART011_DSRMIS|UART011_DCDMIS|
1332 				      UART011_CTSMIS|UART011_RIMIS))
1333 				pl011_modem_status(uap);
1334 			if (status & UART011_TXIS)
1335 				pl011_tx_chars(uap);
1336 
1337 			if (pass_counter-- == 0)
1338 				break;
1339 
1340 			status = readw(uap->port.membase + UART011_MIS);
1341 		} while (status != 0);
1342 		handled = 1;
1343 	}
1344 
1345 	spin_unlock_irqrestore(&uap->port.lock, flags);
1346 
1347 	return IRQ_RETVAL(handled);
1348 }
1349 
1350 static unsigned int pl011_tx_empty(struct uart_port *port)
1351 {
1352 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1353 	unsigned int status = readw(uap->port.membase + UART01x_FR);
1354 	return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1355 }
1356 
1357 static unsigned int pl011_get_mctrl(struct uart_port *port)
1358 {
1359 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1360 	unsigned int result = 0;
1361 	unsigned int status = readw(uap->port.membase + UART01x_FR);
1362 
1363 #define TIOCMBIT(uartbit, tiocmbit)	\
1364 	if (status & uartbit)		\
1365 		result |= tiocmbit
1366 
1367 	TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1368 	TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1369 	TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1370 	TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1371 #undef TIOCMBIT
1372 	return result;
1373 }
1374 
1375 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1376 {
1377 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1378 	unsigned int cr;
1379 
1380 	cr = readw(uap->port.membase + UART011_CR);
1381 
1382 #define	TIOCMBIT(tiocmbit, uartbit)		\
1383 	if (mctrl & tiocmbit)		\
1384 		cr |= uartbit;		\
1385 	else				\
1386 		cr &= ~uartbit
1387 
1388 	TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1389 	TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1390 	TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1391 	TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1392 	TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1393 
1394 	if (uap->autorts) {
1395 		/* We need to disable auto-RTS if we want to turn RTS off */
1396 		TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1397 	}
1398 #undef TIOCMBIT
1399 
1400 	writew(cr, uap->port.membase + UART011_CR);
1401 }
1402 
1403 static void pl011_break_ctl(struct uart_port *port, int break_state)
1404 {
1405 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1406 	unsigned long flags;
1407 	unsigned int lcr_h;
1408 
1409 	spin_lock_irqsave(&uap->port.lock, flags);
1410 	lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1411 	if (break_state == -1)
1412 		lcr_h |= UART01x_LCRH_BRK;
1413 	else
1414 		lcr_h &= ~UART01x_LCRH_BRK;
1415 	writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1416 	spin_unlock_irqrestore(&uap->port.lock, flags);
1417 }
1418 
1419 #ifdef CONFIG_CONSOLE_POLL
1420 
1421 static void pl011_quiesce_irqs(struct uart_port *port)
1422 {
1423 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1424 	unsigned char __iomem *regs = uap->port.membase;
1425 
1426 	writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1427 	/*
1428 	 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1429 	 * we simply mask it. start_tx() will unmask it.
1430 	 *
1431 	 * Note we can race with start_tx(), and if the race happens, the
1432 	 * polling user might get another interrupt just after we clear it.
1433 	 * But it should be OK and can happen even w/o the race, e.g.
1434 	 * controller immediately got some new data and raised the IRQ.
1435 	 *
1436 	 * And whoever uses polling routines assumes that it manages the device
1437 	 * (including tx queue), so we're also fine with start_tx()'s caller
1438 	 * side.
1439 	 */
1440 	writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1441 }
1442 
1443 static int pl011_get_poll_char(struct uart_port *port)
1444 {
1445 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1446 	unsigned int status;
1447 
1448 	/*
1449 	 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1450 	 * debugger.
1451 	 */
1452 	pl011_quiesce_irqs(port);
1453 
1454 	status = readw(uap->port.membase + UART01x_FR);
1455 	if (status & UART01x_FR_RXFE)
1456 		return NO_POLL_CHAR;
1457 
1458 	return readw(uap->port.membase + UART01x_DR);
1459 }
1460 
1461 static void pl011_put_poll_char(struct uart_port *port,
1462 			 unsigned char ch)
1463 {
1464 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1465 
1466 	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1467 		barrier();
1468 
1469 	writew(ch, uap->port.membase + UART01x_DR);
1470 }
1471 
1472 #endif /* CONFIG_CONSOLE_POLL */
1473 
1474 static int pl011_hwinit(struct uart_port *port)
1475 {
1476 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1477 	int retval;
1478 
1479 	/* Optionaly enable pins to be muxed in and configured */
1480 	pinctrl_pm_select_default_state(port->dev);
1481 
1482 	/*
1483 	 * Try to enable the clock producer.
1484 	 */
1485 	retval = clk_prepare_enable(uap->clk);
1486 	if (retval)
1487 		goto out;
1488 
1489 	uap->port.uartclk = clk_get_rate(uap->clk);
1490 
1491 	/* Clear pending error and receive interrupts */
1492 	writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1493 	       UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1494 
1495 	/*
1496 	 * Save interrupts enable mask, and enable RX interrupts in case if
1497 	 * the interrupt is used for NMI entry.
1498 	 */
1499 	uap->im = readw(uap->port.membase + UART011_IMSC);
1500 	writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1501 
1502 	if (dev_get_platdata(uap->port.dev)) {
1503 		struct amba_pl011_data *plat;
1504 
1505 		plat = dev_get_platdata(uap->port.dev);
1506 		if (plat->init)
1507 			plat->init();
1508 	}
1509 	return 0;
1510  out:
1511 	return retval;
1512 }
1513 
1514 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1515 {
1516 	writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1517 	if (uap->lcrh_rx != uap->lcrh_tx) {
1518 		int i;
1519 		/*
1520 		 * Wait 10 PCLKs before writing LCRH_TX register,
1521 		 * to get this delay write read only register 10 times
1522 		 */
1523 		for (i = 0; i < 10; ++i)
1524 			writew(0xff, uap->port.membase + UART011_MIS);
1525 		writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1526 	}
1527 }
1528 
1529 static int pl011_startup(struct uart_port *port)
1530 {
1531 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1532 	unsigned int cr, lcr_h, fbrd, ibrd;
1533 	int retval;
1534 
1535 	retval = pl011_hwinit(port);
1536 	if (retval)
1537 		goto clk_dis;
1538 
1539 	writew(uap->im, uap->port.membase + UART011_IMSC);
1540 
1541 	/*
1542 	 * Allocate the IRQ
1543 	 */
1544 	retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1545 	if (retval)
1546 		goto clk_dis;
1547 
1548 	writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1549 
1550 	/*
1551 	 * Provoke TX FIFO interrupt into asserting. Taking care to preserve
1552 	 * baud rate and data format specified by FBRD, IBRD and LCRH as the
1553 	 * UART may already be in use as a console.
1554 	 */
1555 	spin_lock_irq(&uap->port.lock);
1556 
1557 	fbrd = readw(uap->port.membase + UART011_FBRD);
1558 	ibrd = readw(uap->port.membase + UART011_IBRD);
1559 	lcr_h = readw(uap->port.membase + uap->lcrh_rx);
1560 
1561 	cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1562 	writew(cr, uap->port.membase + UART011_CR);
1563 	writew(0, uap->port.membase + UART011_FBRD);
1564 	writew(1, uap->port.membase + UART011_IBRD);
1565 	pl011_write_lcr_h(uap, 0);
1566 	writew(0, uap->port.membase + UART01x_DR);
1567 	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1568 		barrier();
1569 
1570 	writew(fbrd, uap->port.membase + UART011_FBRD);
1571 	writew(ibrd, uap->port.membase + UART011_IBRD);
1572 	pl011_write_lcr_h(uap, lcr_h);
1573 
1574 	/* restore RTS and DTR */
1575 	cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1576 	cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1577 	writew(cr, uap->port.membase + UART011_CR);
1578 
1579 	spin_unlock_irq(&uap->port.lock);
1580 
1581 	/*
1582 	 * initialise the old status of the modem signals
1583 	 */
1584 	uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1585 
1586 	/* Startup DMA */
1587 	pl011_dma_startup(uap);
1588 
1589 	/*
1590 	 * Finally, enable interrupts, only timeouts when using DMA
1591 	 * if initial RX DMA job failed, start in interrupt mode
1592 	 * as well.
1593 	 */
1594 	spin_lock_irq(&uap->port.lock);
1595 	/* Clear out any spuriously appearing RX interrupts */
1596 	 writew(UART011_RTIS | UART011_RXIS,
1597 		uap->port.membase + UART011_ICR);
1598 	uap->im = UART011_RTIM;
1599 	if (!pl011_dma_rx_running(uap))
1600 		uap->im |= UART011_RXIM;
1601 	writew(uap->im, uap->port.membase + UART011_IMSC);
1602 	spin_unlock_irq(&uap->port.lock);
1603 
1604 	return 0;
1605 
1606  clk_dis:
1607 	clk_disable_unprepare(uap->clk);
1608 	return retval;
1609 }
1610 
1611 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1612 					unsigned int lcrh)
1613 {
1614       unsigned long val;
1615 
1616       val = readw(uap->port.membase + lcrh);
1617       val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1618       writew(val, uap->port.membase + lcrh);
1619 }
1620 
1621 static void pl011_shutdown(struct uart_port *port)
1622 {
1623 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1624 	unsigned int cr;
1625 
1626 	/*
1627 	 * disable all interrupts
1628 	 */
1629 	spin_lock_irq(&uap->port.lock);
1630 	uap->im = 0;
1631 	writew(uap->im, uap->port.membase + UART011_IMSC);
1632 	writew(0xffff, uap->port.membase + UART011_ICR);
1633 	spin_unlock_irq(&uap->port.lock);
1634 
1635 	pl011_dma_shutdown(uap);
1636 
1637 	/*
1638 	 * Free the interrupt
1639 	 */
1640 	free_irq(uap->port.irq, uap);
1641 
1642 	/*
1643 	 * disable the port
1644 	 * disable the port. It should not disable RTS and DTR.
1645 	 * Also RTS and DTR state should be preserved to restore
1646 	 * it during startup().
1647 	 */
1648 	uap->autorts = false;
1649 	spin_lock_irq(&uap->port.lock);
1650 	cr = readw(uap->port.membase + UART011_CR);
1651 	uap->old_cr = cr;
1652 	cr &= UART011_CR_RTS | UART011_CR_DTR;
1653 	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1654 	writew(cr, uap->port.membase + UART011_CR);
1655 	spin_unlock_irq(&uap->port.lock);
1656 
1657 	/*
1658 	 * disable break condition and fifos
1659 	 */
1660 	pl011_shutdown_channel(uap, uap->lcrh_rx);
1661 	if (uap->lcrh_rx != uap->lcrh_tx)
1662 		pl011_shutdown_channel(uap, uap->lcrh_tx);
1663 
1664 	/*
1665 	 * Shut down the clock producer
1666 	 */
1667 	clk_disable_unprepare(uap->clk);
1668 	/* Optionally let pins go into sleep states */
1669 	pinctrl_pm_select_sleep_state(port->dev);
1670 
1671 	if (dev_get_platdata(uap->port.dev)) {
1672 		struct amba_pl011_data *plat;
1673 
1674 		plat = dev_get_platdata(uap->port.dev);
1675 		if (plat->exit)
1676 			plat->exit();
1677 	}
1678 
1679 }
1680 
1681 static void
1682 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1683 		     struct ktermios *old)
1684 {
1685 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1686 	unsigned int lcr_h, old_cr;
1687 	unsigned long flags;
1688 	unsigned int baud, quot, clkdiv;
1689 
1690 	if (uap->vendor->oversampling)
1691 		clkdiv = 8;
1692 	else
1693 		clkdiv = 16;
1694 
1695 	/*
1696 	 * Ask the core to calculate the divisor for us.
1697 	 */
1698 	baud = uart_get_baud_rate(port, termios, old, 0,
1699 				  port->uartclk / clkdiv);
1700 #ifdef CONFIG_DMA_ENGINE
1701 	/*
1702 	 * Adjust RX DMA polling rate with baud rate if not specified.
1703 	 */
1704 	if (uap->dmarx.auto_poll_rate)
1705 		uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1706 #endif
1707 
1708 	if (baud > port->uartclk/16)
1709 		quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1710 	else
1711 		quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1712 
1713 	switch (termios->c_cflag & CSIZE) {
1714 	case CS5:
1715 		lcr_h = UART01x_LCRH_WLEN_5;
1716 		break;
1717 	case CS6:
1718 		lcr_h = UART01x_LCRH_WLEN_6;
1719 		break;
1720 	case CS7:
1721 		lcr_h = UART01x_LCRH_WLEN_7;
1722 		break;
1723 	default: // CS8
1724 		lcr_h = UART01x_LCRH_WLEN_8;
1725 		break;
1726 	}
1727 	if (termios->c_cflag & CSTOPB)
1728 		lcr_h |= UART01x_LCRH_STP2;
1729 	if (termios->c_cflag & PARENB) {
1730 		lcr_h |= UART01x_LCRH_PEN;
1731 		if (!(termios->c_cflag & PARODD))
1732 			lcr_h |= UART01x_LCRH_EPS;
1733 	}
1734 	if (uap->fifosize > 1)
1735 		lcr_h |= UART01x_LCRH_FEN;
1736 
1737 	spin_lock_irqsave(&port->lock, flags);
1738 
1739 	/*
1740 	 * Update the per-port timeout.
1741 	 */
1742 	uart_update_timeout(port, termios->c_cflag, baud);
1743 
1744 	port->read_status_mask = UART011_DR_OE | 255;
1745 	if (termios->c_iflag & INPCK)
1746 		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1747 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1748 		port->read_status_mask |= UART011_DR_BE;
1749 
1750 	/*
1751 	 * Characters to ignore
1752 	 */
1753 	port->ignore_status_mask = 0;
1754 	if (termios->c_iflag & IGNPAR)
1755 		port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1756 	if (termios->c_iflag & IGNBRK) {
1757 		port->ignore_status_mask |= UART011_DR_BE;
1758 		/*
1759 		 * If we're ignoring parity and break indicators,
1760 		 * ignore overruns too (for real raw support).
1761 		 */
1762 		if (termios->c_iflag & IGNPAR)
1763 			port->ignore_status_mask |= UART011_DR_OE;
1764 	}
1765 
1766 	/*
1767 	 * Ignore all characters if CREAD is not set.
1768 	 */
1769 	if ((termios->c_cflag & CREAD) == 0)
1770 		port->ignore_status_mask |= UART_DUMMY_DR_RX;
1771 
1772 	if (UART_ENABLE_MS(port, termios->c_cflag))
1773 		pl011_enable_ms(port);
1774 
1775 	/* first, disable everything */
1776 	old_cr = readw(port->membase + UART011_CR);
1777 	writew(0, port->membase + UART011_CR);
1778 
1779 	if (termios->c_cflag & CRTSCTS) {
1780 		if (old_cr & UART011_CR_RTS)
1781 			old_cr |= UART011_CR_RTSEN;
1782 
1783 		old_cr |= UART011_CR_CTSEN;
1784 		uap->autorts = true;
1785 	} else {
1786 		old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1787 		uap->autorts = false;
1788 	}
1789 
1790 	if (uap->vendor->oversampling) {
1791 		if (baud > port->uartclk / 16)
1792 			old_cr |= ST_UART011_CR_OVSFACT;
1793 		else
1794 			old_cr &= ~ST_UART011_CR_OVSFACT;
1795 	}
1796 
1797 	/*
1798 	 * Workaround for the ST Micro oversampling variants to
1799 	 * increase the bitrate slightly, by lowering the divisor,
1800 	 * to avoid delayed sampling of start bit at high speeds,
1801 	 * else we see data corruption.
1802 	 */
1803 	if (uap->vendor->oversampling) {
1804 		if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1805 			quot -= 1;
1806 		else if ((baud > 3250000) && (quot > 2))
1807 			quot -= 2;
1808 	}
1809 	/* Set baud rate */
1810 	writew(quot & 0x3f, port->membase + UART011_FBRD);
1811 	writew(quot >> 6, port->membase + UART011_IBRD);
1812 
1813 	/*
1814 	 * ----------v----------v----------v----------v-----
1815 	 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1816 	 * UART011_FBRD & UART011_IBRD.
1817 	 * ----------^----------^----------^----------^-----
1818 	 */
1819 	pl011_write_lcr_h(uap, lcr_h);
1820 	writew(old_cr, port->membase + UART011_CR);
1821 
1822 	spin_unlock_irqrestore(&port->lock, flags);
1823 }
1824 
1825 static const char *pl011_type(struct uart_port *port)
1826 {
1827 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1828 	return uap->port.type == PORT_AMBA ? uap->type : NULL;
1829 }
1830 
1831 /*
1832  * Release the memory region(s) being used by 'port'
1833  */
1834 static void pl011_release_port(struct uart_port *port)
1835 {
1836 	release_mem_region(port->mapbase, SZ_4K);
1837 }
1838 
1839 /*
1840  * Request the memory region(s) being used by 'port'
1841  */
1842 static int pl011_request_port(struct uart_port *port)
1843 {
1844 	return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1845 			!= NULL ? 0 : -EBUSY;
1846 }
1847 
1848 /*
1849  * Configure/autoconfigure the port.
1850  */
1851 static void pl011_config_port(struct uart_port *port, int flags)
1852 {
1853 	if (flags & UART_CONFIG_TYPE) {
1854 		port->type = PORT_AMBA;
1855 		pl011_request_port(port);
1856 	}
1857 }
1858 
1859 /*
1860  * verify the new serial_struct (for TIOCSSERIAL).
1861  */
1862 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1863 {
1864 	int ret = 0;
1865 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1866 		ret = -EINVAL;
1867 	if (ser->irq < 0 || ser->irq >= nr_irqs)
1868 		ret = -EINVAL;
1869 	if (ser->baud_base < 9600)
1870 		ret = -EINVAL;
1871 	return ret;
1872 }
1873 
1874 static struct uart_ops amba_pl011_pops = {
1875 	.tx_empty	= pl011_tx_empty,
1876 	.set_mctrl	= pl011_set_mctrl,
1877 	.get_mctrl	= pl011_get_mctrl,
1878 	.stop_tx	= pl011_stop_tx,
1879 	.start_tx	= pl011_start_tx,
1880 	.stop_rx	= pl011_stop_rx,
1881 	.enable_ms	= pl011_enable_ms,
1882 	.break_ctl	= pl011_break_ctl,
1883 	.startup	= pl011_startup,
1884 	.shutdown	= pl011_shutdown,
1885 	.flush_buffer	= pl011_dma_flush_buffer,
1886 	.set_termios	= pl011_set_termios,
1887 	.type		= pl011_type,
1888 	.release_port	= pl011_release_port,
1889 	.request_port	= pl011_request_port,
1890 	.config_port	= pl011_config_port,
1891 	.verify_port	= pl011_verify_port,
1892 #ifdef CONFIG_CONSOLE_POLL
1893 	.poll_init     = pl011_hwinit,
1894 	.poll_get_char = pl011_get_poll_char,
1895 	.poll_put_char = pl011_put_poll_char,
1896 #endif
1897 };
1898 
1899 static struct uart_amba_port *amba_ports[UART_NR];
1900 
1901 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1902 
1903 static void pl011_console_putchar(struct uart_port *port, int ch)
1904 {
1905 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
1906 
1907 	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1908 		barrier();
1909 	writew(ch, uap->port.membase + UART01x_DR);
1910 }
1911 
1912 static void
1913 pl011_console_write(struct console *co, const char *s, unsigned int count)
1914 {
1915 	struct uart_amba_port *uap = amba_ports[co->index];
1916 	unsigned int status, old_cr, new_cr;
1917 	unsigned long flags;
1918 	int locked = 1;
1919 
1920 	clk_enable(uap->clk);
1921 
1922 	local_irq_save(flags);
1923 	if (uap->port.sysrq)
1924 		locked = 0;
1925 	else if (oops_in_progress)
1926 		locked = spin_trylock(&uap->port.lock);
1927 	else
1928 		spin_lock(&uap->port.lock);
1929 
1930 	/*
1931 	 *	First save the CR then disable the interrupts
1932 	 */
1933 	old_cr = readw(uap->port.membase + UART011_CR);
1934 	new_cr = old_cr & ~UART011_CR_CTSEN;
1935 	new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1936 	writew(new_cr, uap->port.membase + UART011_CR);
1937 
1938 	uart_console_write(&uap->port, s, count, pl011_console_putchar);
1939 
1940 	/*
1941 	 *	Finally, wait for transmitter to become empty
1942 	 *	and restore the TCR
1943 	 */
1944 	do {
1945 		status = readw(uap->port.membase + UART01x_FR);
1946 	} while (status & UART01x_FR_BUSY);
1947 	writew(old_cr, uap->port.membase + UART011_CR);
1948 
1949 	if (locked)
1950 		spin_unlock(&uap->port.lock);
1951 	local_irq_restore(flags);
1952 
1953 	clk_disable(uap->clk);
1954 }
1955 
1956 static void __init
1957 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1958 			     int *parity, int *bits)
1959 {
1960 	if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1961 		unsigned int lcr_h, ibrd, fbrd;
1962 
1963 		lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1964 
1965 		*parity = 'n';
1966 		if (lcr_h & UART01x_LCRH_PEN) {
1967 			if (lcr_h & UART01x_LCRH_EPS)
1968 				*parity = 'e';
1969 			else
1970 				*parity = 'o';
1971 		}
1972 
1973 		if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1974 			*bits = 7;
1975 		else
1976 			*bits = 8;
1977 
1978 		ibrd = readw(uap->port.membase + UART011_IBRD);
1979 		fbrd = readw(uap->port.membase + UART011_FBRD);
1980 
1981 		*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
1982 
1983 		if (uap->vendor->oversampling) {
1984 			if (readw(uap->port.membase + UART011_CR)
1985 				  & ST_UART011_CR_OVSFACT)
1986 				*baud *= 2;
1987 		}
1988 	}
1989 }
1990 
1991 static int __init pl011_console_setup(struct console *co, char *options)
1992 {
1993 	struct uart_amba_port *uap;
1994 	int baud = 38400;
1995 	int bits = 8;
1996 	int parity = 'n';
1997 	int flow = 'n';
1998 	int ret;
1999 
2000 	/*
2001 	 * Check whether an invalid uart number has been specified, and
2002 	 * if so, search for the first available port that does have
2003 	 * console support.
2004 	 */
2005 	if (co->index >= UART_NR)
2006 		co->index = 0;
2007 	uap = amba_ports[co->index];
2008 	if (!uap)
2009 		return -ENODEV;
2010 
2011 	/* Allow pins to be muxed in and configured */
2012 	pinctrl_pm_select_default_state(uap->port.dev);
2013 
2014 	ret = clk_prepare(uap->clk);
2015 	if (ret)
2016 		return ret;
2017 
2018 	if (dev_get_platdata(uap->port.dev)) {
2019 		struct amba_pl011_data *plat;
2020 
2021 		plat = dev_get_platdata(uap->port.dev);
2022 		if (plat->init)
2023 			plat->init();
2024 	}
2025 
2026 	uap->port.uartclk = clk_get_rate(uap->clk);
2027 
2028 	if (options)
2029 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2030 	else
2031 		pl011_console_get_options(uap, &baud, &parity, &bits);
2032 
2033 	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2034 }
2035 
2036 static struct uart_driver amba_reg;
2037 static struct console amba_console = {
2038 	.name		= "ttyAMA",
2039 	.write		= pl011_console_write,
2040 	.device		= uart_console_device,
2041 	.setup		= pl011_console_setup,
2042 	.flags		= CON_PRINTBUFFER,
2043 	.index		= -1,
2044 	.data		= &amba_reg,
2045 };
2046 
2047 #define AMBA_CONSOLE	(&amba_console)
2048 
2049 static void pl011_putc(struct uart_port *port, int c)
2050 {
2051 	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2052 		;
2053 	writeb(c, port->membase + UART01x_DR);
2054 	while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2055 		;
2056 }
2057 
2058 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2059 {
2060 	struct earlycon_device *dev = con->data;
2061 
2062 	uart_console_write(&dev->port, s, n, pl011_putc);
2063 }
2064 
2065 static int __init pl011_early_console_setup(struct earlycon_device *device,
2066 					    const char *opt)
2067 {
2068 	if (!device->port.membase)
2069 		return -ENODEV;
2070 
2071 	device->con->write = pl011_early_write;
2072 	return 0;
2073 }
2074 EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2075 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2076 
2077 #else
2078 #define AMBA_CONSOLE	NULL
2079 #endif
2080 
2081 static struct uart_driver amba_reg = {
2082 	.owner			= THIS_MODULE,
2083 	.driver_name		= "ttyAMA",
2084 	.dev_name		= "ttyAMA",
2085 	.major			= SERIAL_AMBA_MAJOR,
2086 	.minor			= SERIAL_AMBA_MINOR,
2087 	.nr			= UART_NR,
2088 	.cons			= AMBA_CONSOLE,
2089 };
2090 
2091 static int pl011_probe_dt_alias(int index, struct device *dev)
2092 {
2093 	struct device_node *np;
2094 	static bool seen_dev_with_alias = false;
2095 	static bool seen_dev_without_alias = false;
2096 	int ret = index;
2097 
2098 	if (!IS_ENABLED(CONFIG_OF))
2099 		return ret;
2100 
2101 	np = dev->of_node;
2102 	if (!np)
2103 		return ret;
2104 
2105 	ret = of_alias_get_id(np, "serial");
2106 	if (IS_ERR_VALUE(ret)) {
2107 		seen_dev_without_alias = true;
2108 		ret = index;
2109 	} else {
2110 		seen_dev_with_alias = true;
2111 		if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2112 			dev_warn(dev, "requested serial port %d  not available.\n", ret);
2113 			ret = index;
2114 		}
2115 	}
2116 
2117 	if (seen_dev_with_alias && seen_dev_without_alias)
2118 		dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2119 
2120 	return ret;
2121 }
2122 
2123 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2124 {
2125 	struct uart_amba_port *uap;
2126 	struct vendor_data *vendor = id->data;
2127 	void __iomem *base;
2128 	int i, ret;
2129 
2130 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2131 		if (amba_ports[i] == NULL)
2132 			break;
2133 
2134 	if (i == ARRAY_SIZE(amba_ports)) {
2135 		ret = -EBUSY;
2136 		goto out;
2137 	}
2138 
2139 	uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2140 			   GFP_KERNEL);
2141 	if (uap == NULL) {
2142 		ret = -ENOMEM;
2143 		goto out;
2144 	}
2145 
2146 	i = pl011_probe_dt_alias(i, &dev->dev);
2147 
2148 	base = devm_ioremap(&dev->dev, dev->res.start,
2149 			    resource_size(&dev->res));
2150 	if (!base) {
2151 		ret = -ENOMEM;
2152 		goto out;
2153 	}
2154 
2155 	uap->clk = devm_clk_get(&dev->dev, NULL);
2156 	if (IS_ERR(uap->clk)) {
2157 		ret = PTR_ERR(uap->clk);
2158 		goto out;
2159 	}
2160 
2161 	uap->vendor = vendor;
2162 	uap->lcrh_rx = vendor->lcrh_rx;
2163 	uap->lcrh_tx = vendor->lcrh_tx;
2164 	uap->old_cr = 0;
2165 	uap->fifosize = vendor->get_fifosize(dev);
2166 	uap->port.dev = &dev->dev;
2167 	uap->port.mapbase = dev->res.start;
2168 	uap->port.membase = base;
2169 	uap->port.iotype = UPIO_MEM;
2170 	uap->port.irq = dev->irq[0];
2171 	uap->port.fifosize = uap->fifosize;
2172 	uap->port.ops = &amba_pl011_pops;
2173 	uap->port.flags = UPF_BOOT_AUTOCONF;
2174 	uap->port.line = i;
2175 	pl011_dma_probe(&dev->dev, uap);
2176 
2177 	/* Ensure interrupts from this UART are masked and cleared */
2178 	writew(0, uap->port.membase + UART011_IMSC);
2179 	writew(0xffff, uap->port.membase + UART011_ICR);
2180 
2181 	snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2182 
2183 	amba_ports[i] = uap;
2184 
2185 	amba_set_drvdata(dev, uap);
2186 
2187 	if (!amba_reg.state) {
2188 		ret = uart_register_driver(&amba_reg);
2189 		if (ret < 0) {
2190 			pr_err("Failed to register AMBA-PL011 driver\n");
2191 			return ret;
2192 		}
2193 	}
2194 
2195 	ret = uart_add_one_port(&amba_reg, &uap->port);
2196 	if (ret) {
2197 		amba_ports[i] = NULL;
2198 		uart_unregister_driver(&amba_reg);
2199 		pl011_dma_remove(uap);
2200 	}
2201  out:
2202 	return ret;
2203 }
2204 
2205 static int pl011_remove(struct amba_device *dev)
2206 {
2207 	struct uart_amba_port *uap = amba_get_drvdata(dev);
2208 	bool busy = false;
2209 	int i;
2210 
2211 	uart_remove_one_port(&amba_reg, &uap->port);
2212 
2213 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2214 		if (amba_ports[i] == uap)
2215 			amba_ports[i] = NULL;
2216 		else if (amba_ports[i])
2217 			busy = true;
2218 
2219 	pl011_dma_remove(uap);
2220 	if (!busy)
2221 		uart_unregister_driver(&amba_reg);
2222 	return 0;
2223 }
2224 
2225 #ifdef CONFIG_PM_SLEEP
2226 static int pl011_suspend(struct device *dev)
2227 {
2228 	struct uart_amba_port *uap = dev_get_drvdata(dev);
2229 
2230 	if (!uap)
2231 		return -EINVAL;
2232 
2233 	return uart_suspend_port(&amba_reg, &uap->port);
2234 }
2235 
2236 static int pl011_resume(struct device *dev)
2237 {
2238 	struct uart_amba_port *uap = dev_get_drvdata(dev);
2239 
2240 	if (!uap)
2241 		return -EINVAL;
2242 
2243 	return uart_resume_port(&amba_reg, &uap->port);
2244 }
2245 #endif
2246 
2247 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2248 
2249 static struct amba_id pl011_ids[] = {
2250 	{
2251 		.id	= 0x00041011,
2252 		.mask	= 0x000fffff,
2253 		.data	= &vendor_arm,
2254 	},
2255 	{
2256 		.id	= 0x00380802,
2257 		.mask	= 0x00ffffff,
2258 		.data	= &vendor_st,
2259 	},
2260 	{ 0, 0 },
2261 };
2262 
2263 MODULE_DEVICE_TABLE(amba, pl011_ids);
2264 
2265 static struct amba_driver pl011_driver = {
2266 	.drv = {
2267 		.name	= "uart-pl011",
2268 		.pm	= &pl011_dev_pm_ops,
2269 	},
2270 	.id_table	= pl011_ids,
2271 	.probe		= pl011_probe,
2272 	.remove		= pl011_remove,
2273 };
2274 
2275 static int __init pl011_init(void)
2276 {
2277 	printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2278 
2279 	return amba_driver_register(&pl011_driver);
2280 }
2281 
2282 static void __exit pl011_exit(void)
2283 {
2284 	amba_driver_unregister(&pl011_driver);
2285 }
2286 
2287 /*
2288  * While this can be a module, if builtin it's most likely the console
2289  * So let's leave module_exit but move module_init to an earlier place
2290  */
2291 arch_initcall(pl011_init);
2292 module_exit(pl011_exit);
2293 
2294 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2295 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2296 MODULE_LICENSE("GPL");
2297