xref: /openbmc/linux/drivers/tty/serial/sh-sci.c (revision 878fbb91)
1 /*
2  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
3  *
4  *  Copyright (C) 2002 - 2011  Paul Mundt
5  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
6  *
7  * based off of the old drivers/char/sh-sci.c by:
8  *
9  *   Copyright (C) 1999, 2000  Niibe Yutaka
10  *   Copyright (C) 2000  Sugioka Toshinobu
11  *   Modified to support multiple serial ports. Stuart Menefy (May 2000).
12  *   Modified to support SecureEdge. David McCullough (2002)
13  *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14  *   Removed SH7300 support (Jul 2007).
15  *
16  * This file is subject to the terms and conditions of the GNU General Public
17  * License.  See the file "COPYING" in the main directory of this archive
18  * for more details.
19  */
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ
22 #endif
23 
24 #undef DEBUG
25 
26 #include <linux/clk.h>
27 #include <linux/console.h>
28 #include <linux/ctype.h>
29 #include <linux/cpufreq.h>
30 #include <linux/delay.h>
31 #include <linux/dmaengine.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/err.h>
34 #include <linux/errno.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
37 #include <linux/ioport.h>
38 #include <linux/major.h>
39 #include <linux/module.h>
40 #include <linux/mm.h>
41 #include <linux/notifier.h>
42 #include <linux/platform_device.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/scatterlist.h>
45 #include <linux/serial.h>
46 #include <linux/serial_sci.h>
47 #include <linux/sh_dma.h>
48 #include <linux/slab.h>
49 #include <linux/string.h>
50 #include <linux/sysrq.h>
51 #include <linux/timer.h>
52 #include <linux/tty.h>
53 #include <linux/tty_flip.h>
54 
55 #ifdef CONFIG_SUPERH
56 #include <asm/sh_bios.h>
57 #endif
58 
59 #include "sh-sci.h"
60 
61 struct sci_port {
62 	struct uart_port	port;
63 
64 	/* Platform configuration */
65 	struct plat_sci_port	*cfg;
66 	int			overrun_bit;
67 	unsigned int		error_mask;
68 	unsigned int		sampling_rate;
69 
70 
71 	/* Break timer */
72 	struct timer_list	break_timer;
73 	int			break_flag;
74 
75 	/* Interface clock */
76 	struct clk		*iclk;
77 	/* Function clock */
78 	struct clk		*fclk;
79 
80 	int			irqs[SCIx_NR_IRQS];
81 	char			*irqstr[SCIx_NR_IRQS];
82 
83 	struct dma_chan			*chan_tx;
84 	struct dma_chan			*chan_rx;
85 
86 #ifdef CONFIG_SERIAL_SH_SCI_DMA
87 	struct dma_async_tx_descriptor	*desc_tx;
88 	struct dma_async_tx_descriptor	*desc_rx[2];
89 	dma_cookie_t			cookie_tx;
90 	dma_cookie_t			cookie_rx[2];
91 	dma_cookie_t			active_rx;
92 	struct scatterlist		sg_tx;
93 	unsigned int			sg_len_tx;
94 	struct scatterlist		sg_rx[2];
95 	size_t				buf_len_rx;
96 	struct sh_dmae_slave		param_tx;
97 	struct sh_dmae_slave		param_rx;
98 	struct work_struct		work_tx;
99 	struct work_struct		work_rx;
100 	struct timer_list		rx_timer;
101 	unsigned int			rx_timeout;
102 #endif
103 
104 	struct notifier_block		freq_transition;
105 };
106 
107 /* Function prototypes */
108 static void sci_start_tx(struct uart_port *port);
109 static void sci_stop_tx(struct uart_port *port);
110 static void sci_start_rx(struct uart_port *port);
111 
112 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
113 
114 static struct sci_port sci_ports[SCI_NPORTS];
115 static struct uart_driver sci_uart_driver;
116 
117 static inline struct sci_port *
118 to_sci_port(struct uart_port *uart)
119 {
120 	return container_of(uart, struct sci_port, port);
121 }
122 
123 struct plat_sci_reg {
124 	u8 offset, size;
125 };
126 
127 /* Helper for invalidating specific entries of an inherited map. */
128 #define sci_reg_invalid	{ .offset = 0, .size = 0 }
129 
130 static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
131 	[SCIx_PROBE_REGTYPE] = {
132 		[0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
133 	},
134 
135 	/*
136 	 * Common SCI definitions, dependent on the port's regshift
137 	 * value.
138 	 */
139 	[SCIx_SCI_REGTYPE] = {
140 		[SCSMR]		= { 0x00,  8 },
141 		[SCBRR]		= { 0x01,  8 },
142 		[SCSCR]		= { 0x02,  8 },
143 		[SCxTDR]	= { 0x03,  8 },
144 		[SCxSR]		= { 0x04,  8 },
145 		[SCxRDR]	= { 0x05,  8 },
146 		[SCFCR]		= sci_reg_invalid,
147 		[SCFDR]		= sci_reg_invalid,
148 		[SCTFDR]	= sci_reg_invalid,
149 		[SCRFDR]	= sci_reg_invalid,
150 		[SCSPTR]	= sci_reg_invalid,
151 		[SCLSR]		= sci_reg_invalid,
152 		[HSSRR]		= sci_reg_invalid,
153 	},
154 
155 	/*
156 	 * Common definitions for legacy IrDA ports, dependent on
157 	 * regshift value.
158 	 */
159 	[SCIx_IRDA_REGTYPE] = {
160 		[SCSMR]		= { 0x00,  8 },
161 		[SCBRR]		= { 0x01,  8 },
162 		[SCSCR]		= { 0x02,  8 },
163 		[SCxTDR]	= { 0x03,  8 },
164 		[SCxSR]		= { 0x04,  8 },
165 		[SCxRDR]	= { 0x05,  8 },
166 		[SCFCR]		= { 0x06,  8 },
167 		[SCFDR]		= { 0x07, 16 },
168 		[SCTFDR]	= sci_reg_invalid,
169 		[SCRFDR]	= sci_reg_invalid,
170 		[SCSPTR]	= sci_reg_invalid,
171 		[SCLSR]		= sci_reg_invalid,
172 		[HSSRR]		= sci_reg_invalid,
173 	},
174 
175 	/*
176 	 * Common SCIFA definitions.
177 	 */
178 	[SCIx_SCIFA_REGTYPE] = {
179 		[SCSMR]		= { 0x00, 16 },
180 		[SCBRR]		= { 0x04,  8 },
181 		[SCSCR]		= { 0x08, 16 },
182 		[SCxTDR]	= { 0x20,  8 },
183 		[SCxSR]		= { 0x14, 16 },
184 		[SCxRDR]	= { 0x24,  8 },
185 		[SCFCR]		= { 0x18, 16 },
186 		[SCFDR]		= { 0x1c, 16 },
187 		[SCTFDR]	= sci_reg_invalid,
188 		[SCRFDR]	= sci_reg_invalid,
189 		[SCSPTR]	= sci_reg_invalid,
190 		[SCLSR]		= sci_reg_invalid,
191 		[HSSRR]		= sci_reg_invalid,
192 	},
193 
194 	/*
195 	 * Common SCIFB definitions.
196 	 */
197 	[SCIx_SCIFB_REGTYPE] = {
198 		[SCSMR]		= { 0x00, 16 },
199 		[SCBRR]		= { 0x04,  8 },
200 		[SCSCR]		= { 0x08, 16 },
201 		[SCxTDR]	= { 0x40,  8 },
202 		[SCxSR]		= { 0x14, 16 },
203 		[SCxRDR]	= { 0x60,  8 },
204 		[SCFCR]		= { 0x18, 16 },
205 		[SCFDR]		= sci_reg_invalid,
206 		[SCTFDR]	= { 0x38, 16 },
207 		[SCRFDR]	= { 0x3c, 16 },
208 		[SCSPTR]	= sci_reg_invalid,
209 		[SCLSR]		= sci_reg_invalid,
210 		[HSSRR]		= sci_reg_invalid,
211 	},
212 
213 	/*
214 	 * Common SH-2(A) SCIF definitions for ports with FIFO data
215 	 * count registers.
216 	 */
217 	[SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
218 		[SCSMR]		= { 0x00, 16 },
219 		[SCBRR]		= { 0x04,  8 },
220 		[SCSCR]		= { 0x08, 16 },
221 		[SCxTDR]	= { 0x0c,  8 },
222 		[SCxSR]		= { 0x10, 16 },
223 		[SCxRDR]	= { 0x14,  8 },
224 		[SCFCR]		= { 0x18, 16 },
225 		[SCFDR]		= { 0x1c, 16 },
226 		[SCTFDR]	= sci_reg_invalid,
227 		[SCRFDR]	= sci_reg_invalid,
228 		[SCSPTR]	= { 0x20, 16 },
229 		[SCLSR]		= { 0x24, 16 },
230 		[HSSRR]		= sci_reg_invalid,
231 	},
232 
233 	/*
234 	 * Common SH-3 SCIF definitions.
235 	 */
236 	[SCIx_SH3_SCIF_REGTYPE] = {
237 		[SCSMR]		= { 0x00,  8 },
238 		[SCBRR]		= { 0x02,  8 },
239 		[SCSCR]		= { 0x04,  8 },
240 		[SCxTDR]	= { 0x06,  8 },
241 		[SCxSR]		= { 0x08, 16 },
242 		[SCxRDR]	= { 0x0a,  8 },
243 		[SCFCR]		= { 0x0c,  8 },
244 		[SCFDR]		= { 0x0e, 16 },
245 		[SCTFDR]	= sci_reg_invalid,
246 		[SCRFDR]	= sci_reg_invalid,
247 		[SCSPTR]	= sci_reg_invalid,
248 		[SCLSR]		= sci_reg_invalid,
249 		[HSSRR]		= sci_reg_invalid,
250 	},
251 
252 	/*
253 	 * Common SH-4(A) SCIF(B) definitions.
254 	 */
255 	[SCIx_SH4_SCIF_REGTYPE] = {
256 		[SCSMR]		= { 0x00, 16 },
257 		[SCBRR]		= { 0x04,  8 },
258 		[SCSCR]		= { 0x08, 16 },
259 		[SCxTDR]	= { 0x0c,  8 },
260 		[SCxSR]		= { 0x10, 16 },
261 		[SCxRDR]	= { 0x14,  8 },
262 		[SCFCR]		= { 0x18, 16 },
263 		[SCFDR]		= { 0x1c, 16 },
264 		[SCTFDR]	= sci_reg_invalid,
265 		[SCRFDR]	= sci_reg_invalid,
266 		[SCSPTR]	= { 0x20, 16 },
267 		[SCLSR]		= { 0x24, 16 },
268 		[HSSRR]		= sci_reg_invalid,
269 	},
270 
271 	/*
272 	 * Common HSCIF definitions.
273 	 */
274 	[SCIx_HSCIF_REGTYPE] = {
275 		[SCSMR]		= { 0x00, 16 },
276 		[SCBRR]		= { 0x04,  8 },
277 		[SCSCR]		= { 0x08, 16 },
278 		[SCxTDR]	= { 0x0c,  8 },
279 		[SCxSR]		= { 0x10, 16 },
280 		[SCxRDR]	= { 0x14,  8 },
281 		[SCFCR]		= { 0x18, 16 },
282 		[SCFDR]		= { 0x1c, 16 },
283 		[SCTFDR]	= sci_reg_invalid,
284 		[SCRFDR]	= sci_reg_invalid,
285 		[SCSPTR]	= { 0x20, 16 },
286 		[SCLSR]		= { 0x24, 16 },
287 		[HSSRR]		= { 0x40, 16 },
288 	},
289 
290 	/*
291 	 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
292 	 * register.
293 	 */
294 	[SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
295 		[SCSMR]		= { 0x00, 16 },
296 		[SCBRR]		= { 0x04,  8 },
297 		[SCSCR]		= { 0x08, 16 },
298 		[SCxTDR]	= { 0x0c,  8 },
299 		[SCxSR]		= { 0x10, 16 },
300 		[SCxRDR]	= { 0x14,  8 },
301 		[SCFCR]		= { 0x18, 16 },
302 		[SCFDR]		= { 0x1c, 16 },
303 		[SCTFDR]	= sci_reg_invalid,
304 		[SCRFDR]	= sci_reg_invalid,
305 		[SCSPTR]	= sci_reg_invalid,
306 		[SCLSR]		= { 0x24, 16 },
307 		[HSSRR]		= sci_reg_invalid,
308 	},
309 
310 	/*
311 	 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
312 	 * count registers.
313 	 */
314 	[SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
315 		[SCSMR]		= { 0x00, 16 },
316 		[SCBRR]		= { 0x04,  8 },
317 		[SCSCR]		= { 0x08, 16 },
318 		[SCxTDR]	= { 0x0c,  8 },
319 		[SCxSR]		= { 0x10, 16 },
320 		[SCxRDR]	= { 0x14,  8 },
321 		[SCFCR]		= { 0x18, 16 },
322 		[SCFDR]		= { 0x1c, 16 },
323 		[SCTFDR]	= { 0x1c, 16 },	/* aliased to SCFDR */
324 		[SCRFDR]	= { 0x20, 16 },
325 		[SCSPTR]	= { 0x24, 16 },
326 		[SCLSR]		= { 0x28, 16 },
327 		[HSSRR]		= sci_reg_invalid,
328 	},
329 
330 	/*
331 	 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
332 	 * registers.
333 	 */
334 	[SCIx_SH7705_SCIF_REGTYPE] = {
335 		[SCSMR]		= { 0x00, 16 },
336 		[SCBRR]		= { 0x04,  8 },
337 		[SCSCR]		= { 0x08, 16 },
338 		[SCxTDR]	= { 0x20,  8 },
339 		[SCxSR]		= { 0x14, 16 },
340 		[SCxRDR]	= { 0x24,  8 },
341 		[SCFCR]		= { 0x18, 16 },
342 		[SCFDR]		= { 0x1c, 16 },
343 		[SCTFDR]	= sci_reg_invalid,
344 		[SCRFDR]	= sci_reg_invalid,
345 		[SCSPTR]	= sci_reg_invalid,
346 		[SCLSR]		= sci_reg_invalid,
347 		[HSSRR]		= sci_reg_invalid,
348 	},
349 };
350 
351 #define sci_getreg(up, offset)		(sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
352 
353 /*
354  * The "offset" here is rather misleading, in that it refers to an enum
355  * value relative to the port mapping rather than the fixed offset
356  * itself, which needs to be manually retrieved from the platform's
357  * register map for the given port.
358  */
359 static unsigned int sci_serial_in(struct uart_port *p, int offset)
360 {
361 	struct plat_sci_reg *reg = sci_getreg(p, offset);
362 
363 	if (reg->size == 8)
364 		return ioread8(p->membase + (reg->offset << p->regshift));
365 	else if (reg->size == 16)
366 		return ioread16(p->membase + (reg->offset << p->regshift));
367 	else
368 		WARN(1, "Invalid register access\n");
369 
370 	return 0;
371 }
372 
373 static void sci_serial_out(struct uart_port *p, int offset, int value)
374 {
375 	struct plat_sci_reg *reg = sci_getreg(p, offset);
376 
377 	if (reg->size == 8)
378 		iowrite8(value, p->membase + (reg->offset << p->regshift));
379 	else if (reg->size == 16)
380 		iowrite16(value, p->membase + (reg->offset << p->regshift));
381 	else
382 		WARN(1, "Invalid register access\n");
383 }
384 
385 static int sci_probe_regmap(struct plat_sci_port *cfg)
386 {
387 	switch (cfg->type) {
388 	case PORT_SCI:
389 		cfg->regtype = SCIx_SCI_REGTYPE;
390 		break;
391 	case PORT_IRDA:
392 		cfg->regtype = SCIx_IRDA_REGTYPE;
393 		break;
394 	case PORT_SCIFA:
395 		cfg->regtype = SCIx_SCIFA_REGTYPE;
396 		break;
397 	case PORT_SCIFB:
398 		cfg->regtype = SCIx_SCIFB_REGTYPE;
399 		break;
400 	case PORT_SCIF:
401 		/*
402 		 * The SH-4 is a bit of a misnomer here, although that's
403 		 * where this particular port layout originated. This
404 		 * configuration (or some slight variation thereof)
405 		 * remains the dominant model for all SCIFs.
406 		 */
407 		cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
408 		break;
409 	case PORT_HSCIF:
410 		cfg->regtype = SCIx_HSCIF_REGTYPE;
411 		break;
412 	default:
413 		printk(KERN_ERR "Can't probe register map for given port\n");
414 		return -EINVAL;
415 	}
416 
417 	return 0;
418 }
419 
420 static void sci_port_enable(struct sci_port *sci_port)
421 {
422 	if (!sci_port->port.dev)
423 		return;
424 
425 	pm_runtime_get_sync(sci_port->port.dev);
426 
427 	clk_prepare_enable(sci_port->iclk);
428 	sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
429 	clk_prepare_enable(sci_port->fclk);
430 }
431 
432 static void sci_port_disable(struct sci_port *sci_port)
433 {
434 	if (!sci_port->port.dev)
435 		return;
436 
437 	/* Cancel the break timer to ensure that the timer handler will not try
438 	 * to access the hardware with clocks and power disabled. Reset the
439 	 * break flag to make the break debouncing state machine ready for the
440 	 * next break.
441 	 */
442 	del_timer_sync(&sci_port->break_timer);
443 	sci_port->break_flag = 0;
444 
445 	clk_disable_unprepare(sci_port->fclk);
446 	clk_disable_unprepare(sci_port->iclk);
447 
448 	pm_runtime_put_sync(sci_port->port.dev);
449 }
450 
451 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
452 
453 #ifdef CONFIG_CONSOLE_POLL
454 static int sci_poll_get_char(struct uart_port *port)
455 {
456 	unsigned short status;
457 	int c;
458 
459 	do {
460 		status = serial_port_in(port, SCxSR);
461 		if (status & SCxSR_ERRORS(port)) {
462 			serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
463 			continue;
464 		}
465 		break;
466 	} while (1);
467 
468 	if (!(status & SCxSR_RDxF(port)))
469 		return NO_POLL_CHAR;
470 
471 	c = serial_port_in(port, SCxRDR);
472 
473 	/* Dummy read */
474 	serial_port_in(port, SCxSR);
475 	serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
476 
477 	return c;
478 }
479 #endif
480 
481 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
482 {
483 	unsigned short status;
484 
485 	do {
486 		status = serial_port_in(port, SCxSR);
487 	} while (!(status & SCxSR_TDxE(port)));
488 
489 	serial_port_out(port, SCxTDR, c);
490 	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
491 }
492 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
493 
494 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
495 {
496 	struct sci_port *s = to_sci_port(port);
497 	struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
498 
499 	/*
500 	 * Use port-specific handler if provided.
501 	 */
502 	if (s->cfg->ops && s->cfg->ops->init_pins) {
503 		s->cfg->ops->init_pins(port, cflag);
504 		return;
505 	}
506 
507 	/*
508 	 * For the generic path SCSPTR is necessary. Bail out if that's
509 	 * unavailable, too.
510 	 */
511 	if (!reg->size)
512 		return;
513 
514 	if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
515 	    ((!(cflag & CRTSCTS)))) {
516 		unsigned short status;
517 
518 		status = serial_port_in(port, SCSPTR);
519 		status &= ~SCSPTR_CTSIO;
520 		status |= SCSPTR_RTSIO;
521 		serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
522 	}
523 }
524 
525 static int sci_txfill(struct uart_port *port)
526 {
527 	struct plat_sci_reg *reg;
528 
529 	reg = sci_getreg(port, SCTFDR);
530 	if (reg->size)
531 		return serial_port_in(port, SCTFDR) & ((port->fifosize << 1) - 1);
532 
533 	reg = sci_getreg(port, SCFDR);
534 	if (reg->size)
535 		return serial_port_in(port, SCFDR) >> 8;
536 
537 	return !(serial_port_in(port, SCxSR) & SCI_TDRE);
538 }
539 
540 static int sci_txroom(struct uart_port *port)
541 {
542 	return port->fifosize - sci_txfill(port);
543 }
544 
545 static int sci_rxfill(struct uart_port *port)
546 {
547 	struct plat_sci_reg *reg;
548 
549 	reg = sci_getreg(port, SCRFDR);
550 	if (reg->size)
551 		return serial_port_in(port, SCRFDR) & ((port->fifosize << 1) - 1);
552 
553 	reg = sci_getreg(port, SCFDR);
554 	if (reg->size)
555 		return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1);
556 
557 	return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
558 }
559 
560 /*
561  * SCI helper for checking the state of the muxed port/RXD pins.
562  */
563 static inline int sci_rxd_in(struct uart_port *port)
564 {
565 	struct sci_port *s = to_sci_port(port);
566 
567 	if (s->cfg->port_reg <= 0)
568 		return 1;
569 
570 	/* Cast for ARM damage */
571 	return !!__raw_readb((void __iomem *)(uintptr_t)s->cfg->port_reg);
572 }
573 
574 /* ********************************************************************** *
575  *                   the interrupt related routines                       *
576  * ********************************************************************** */
577 
578 static void sci_transmit_chars(struct uart_port *port)
579 {
580 	struct circ_buf *xmit = &port->state->xmit;
581 	unsigned int stopped = uart_tx_stopped(port);
582 	unsigned short status;
583 	unsigned short ctrl;
584 	int count;
585 
586 	status = serial_port_in(port, SCxSR);
587 	if (!(status & SCxSR_TDxE(port))) {
588 		ctrl = serial_port_in(port, SCSCR);
589 		if (uart_circ_empty(xmit))
590 			ctrl &= ~SCSCR_TIE;
591 		else
592 			ctrl |= SCSCR_TIE;
593 		serial_port_out(port, SCSCR, ctrl);
594 		return;
595 	}
596 
597 	count = sci_txroom(port);
598 
599 	do {
600 		unsigned char c;
601 
602 		if (port->x_char) {
603 			c = port->x_char;
604 			port->x_char = 0;
605 		} else if (!uart_circ_empty(xmit) && !stopped) {
606 			c = xmit->buf[xmit->tail];
607 			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
608 		} else {
609 			break;
610 		}
611 
612 		serial_port_out(port, SCxTDR, c);
613 
614 		port->icount.tx++;
615 	} while (--count > 0);
616 
617 	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
618 
619 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
620 		uart_write_wakeup(port);
621 	if (uart_circ_empty(xmit)) {
622 		sci_stop_tx(port);
623 	} else {
624 		ctrl = serial_port_in(port, SCSCR);
625 
626 		if (port->type != PORT_SCI) {
627 			serial_port_in(port, SCxSR); /* Dummy read */
628 			serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
629 		}
630 
631 		ctrl |= SCSCR_TIE;
632 		serial_port_out(port, SCSCR, ctrl);
633 	}
634 }
635 
636 /* On SH3, SCIF may read end-of-break as a space->mark char */
637 #define STEPFN(c)  ({int __c = (c); (((__c-1)|(__c)) == -1); })
638 
639 static void sci_receive_chars(struct uart_port *port)
640 {
641 	struct sci_port *sci_port = to_sci_port(port);
642 	struct tty_port *tport = &port->state->port;
643 	int i, count, copied = 0;
644 	unsigned short status;
645 	unsigned char flag;
646 
647 	status = serial_port_in(port, SCxSR);
648 	if (!(status & SCxSR_RDxF(port)))
649 		return;
650 
651 	while (1) {
652 		/* Don't copy more bytes than there is room for in the buffer */
653 		count = tty_buffer_request_room(tport, sci_rxfill(port));
654 
655 		/* If for any reason we can't copy more data, we're done! */
656 		if (count == 0)
657 			break;
658 
659 		if (port->type == PORT_SCI) {
660 			char c = serial_port_in(port, SCxRDR);
661 			if (uart_handle_sysrq_char(port, c) ||
662 			    sci_port->break_flag)
663 				count = 0;
664 			else
665 				tty_insert_flip_char(tport, c, TTY_NORMAL);
666 		} else {
667 			for (i = 0; i < count; i++) {
668 				char c = serial_port_in(port, SCxRDR);
669 
670 				status = serial_port_in(port, SCxSR);
671 #if defined(CONFIG_CPU_SH3)
672 				/* Skip "chars" during break */
673 				if (sci_port->break_flag) {
674 					if ((c == 0) &&
675 					    (status & SCxSR_FER(port))) {
676 						count--; i--;
677 						continue;
678 					}
679 
680 					/* Nonzero => end-of-break */
681 					dev_dbg(port->dev, "debounce<%02x>\n", c);
682 					sci_port->break_flag = 0;
683 
684 					if (STEPFN(c)) {
685 						count--; i--;
686 						continue;
687 					}
688 				}
689 #endif /* CONFIG_CPU_SH3 */
690 				if (uart_handle_sysrq_char(port, c)) {
691 					count--; i--;
692 					continue;
693 				}
694 
695 				/* Store data and status */
696 				if (status & SCxSR_FER(port)) {
697 					flag = TTY_FRAME;
698 					port->icount.frame++;
699 					dev_notice(port->dev, "frame error\n");
700 				} else if (status & SCxSR_PER(port)) {
701 					flag = TTY_PARITY;
702 					port->icount.parity++;
703 					dev_notice(port->dev, "parity error\n");
704 				} else
705 					flag = TTY_NORMAL;
706 
707 				tty_insert_flip_char(tport, c, flag);
708 			}
709 		}
710 
711 		serial_port_in(port, SCxSR); /* dummy read */
712 		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
713 
714 		copied += count;
715 		port->icount.rx += count;
716 	}
717 
718 	if (copied) {
719 		/* Tell the rest of the system the news. New characters! */
720 		tty_flip_buffer_push(tport);
721 	} else {
722 		serial_port_in(port, SCxSR); /* dummy read */
723 		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
724 	}
725 }
726 
727 #define SCI_BREAK_JIFFIES (HZ/20)
728 
729 /*
730  * The sci generates interrupts during the break,
731  * 1 per millisecond or so during the break period, for 9600 baud.
732  * So dont bother disabling interrupts.
733  * But dont want more than 1 break event.
734  * Use a kernel timer to periodically poll the rx line until
735  * the break is finished.
736  */
737 static inline void sci_schedule_break_timer(struct sci_port *port)
738 {
739 	mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
740 }
741 
742 /* Ensure that two consecutive samples find the break over. */
743 static void sci_break_timer(unsigned long data)
744 {
745 	struct sci_port *port = (struct sci_port *)data;
746 
747 	if (sci_rxd_in(&port->port) == 0) {
748 		port->break_flag = 1;
749 		sci_schedule_break_timer(port);
750 	} else if (port->break_flag == 1) {
751 		/* break is over. */
752 		port->break_flag = 2;
753 		sci_schedule_break_timer(port);
754 	} else
755 		port->break_flag = 0;
756 }
757 
758 static int sci_handle_errors(struct uart_port *port)
759 {
760 	int copied = 0;
761 	unsigned short status = serial_port_in(port, SCxSR);
762 	struct tty_port *tport = &port->state->port;
763 	struct sci_port *s = to_sci_port(port);
764 
765 	/* Handle overruns */
766 	if (status & (1 << s->overrun_bit)) {
767 		port->icount.overrun++;
768 
769 		/* overrun error */
770 		if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
771 			copied++;
772 
773 		dev_notice(port->dev, "overrun error");
774 	}
775 
776 	if (status & SCxSR_FER(port)) {
777 		if (sci_rxd_in(port) == 0) {
778 			/* Notify of BREAK */
779 			struct sci_port *sci_port = to_sci_port(port);
780 
781 			if (!sci_port->break_flag) {
782 				port->icount.brk++;
783 
784 				sci_port->break_flag = 1;
785 				sci_schedule_break_timer(sci_port);
786 
787 				/* Do sysrq handling. */
788 				if (uart_handle_break(port))
789 					return 0;
790 
791 				dev_dbg(port->dev, "BREAK detected\n");
792 
793 				if (tty_insert_flip_char(tport, 0, TTY_BREAK))
794 					copied++;
795 			}
796 
797 		} else {
798 			/* frame error */
799 			port->icount.frame++;
800 
801 			if (tty_insert_flip_char(tport, 0, TTY_FRAME))
802 				copied++;
803 
804 			dev_notice(port->dev, "frame error\n");
805 		}
806 	}
807 
808 	if (status & SCxSR_PER(port)) {
809 		/* parity error */
810 		port->icount.parity++;
811 
812 		if (tty_insert_flip_char(tport, 0, TTY_PARITY))
813 			copied++;
814 
815 		dev_notice(port->dev, "parity error");
816 	}
817 
818 	if (copied)
819 		tty_flip_buffer_push(tport);
820 
821 	return copied;
822 }
823 
824 static int sci_handle_fifo_overrun(struct uart_port *port)
825 {
826 	struct tty_port *tport = &port->state->port;
827 	struct sci_port *s = to_sci_port(port);
828 	struct plat_sci_reg *reg;
829 	int copied = 0;
830 
831 	reg = sci_getreg(port, SCLSR);
832 	if (!reg->size)
833 		return 0;
834 
835 	if ((serial_port_in(port, SCLSR) & (1 << s->overrun_bit))) {
836 		serial_port_out(port, SCLSR, 0);
837 
838 		port->icount.overrun++;
839 
840 		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
841 		tty_flip_buffer_push(tport);
842 
843 		dev_notice(port->dev, "overrun error\n");
844 		copied++;
845 	}
846 
847 	return copied;
848 }
849 
850 static int sci_handle_breaks(struct uart_port *port)
851 {
852 	int copied = 0;
853 	unsigned short status = serial_port_in(port, SCxSR);
854 	struct tty_port *tport = &port->state->port;
855 	struct sci_port *s = to_sci_port(port);
856 
857 	if (uart_handle_break(port))
858 		return 0;
859 
860 	if (!s->break_flag && status & SCxSR_BRK(port)) {
861 #if defined(CONFIG_CPU_SH3)
862 		/* Debounce break */
863 		s->break_flag = 1;
864 #endif
865 
866 		port->icount.brk++;
867 
868 		/* Notify of BREAK */
869 		if (tty_insert_flip_char(tport, 0, TTY_BREAK))
870 			copied++;
871 
872 		dev_dbg(port->dev, "BREAK detected\n");
873 	}
874 
875 	if (copied)
876 		tty_flip_buffer_push(tport);
877 
878 	copied += sci_handle_fifo_overrun(port);
879 
880 	return copied;
881 }
882 
883 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
884 {
885 #ifdef CONFIG_SERIAL_SH_SCI_DMA
886 	struct uart_port *port = ptr;
887 	struct sci_port *s = to_sci_port(port);
888 
889 	if (s->chan_rx) {
890 		u16 scr = serial_port_in(port, SCSCR);
891 		u16 ssr = serial_port_in(port, SCxSR);
892 
893 		/* Disable future Rx interrupts */
894 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
895 			disable_irq_nosync(irq);
896 			scr |= 0x4000;
897 		} else {
898 			scr &= ~SCSCR_RIE;
899 		}
900 		serial_port_out(port, SCSCR, scr);
901 		/* Clear current interrupt */
902 		serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
903 		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
904 			jiffies, s->rx_timeout);
905 		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
906 
907 		return IRQ_HANDLED;
908 	}
909 #endif
910 
911 	/* I think sci_receive_chars has to be called irrespective
912 	 * of whether the I_IXOFF is set, otherwise, how is the interrupt
913 	 * to be disabled?
914 	 */
915 	sci_receive_chars(ptr);
916 
917 	return IRQ_HANDLED;
918 }
919 
920 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
921 {
922 	struct uart_port *port = ptr;
923 	unsigned long flags;
924 
925 	spin_lock_irqsave(&port->lock, flags);
926 	sci_transmit_chars(port);
927 	spin_unlock_irqrestore(&port->lock, flags);
928 
929 	return IRQ_HANDLED;
930 }
931 
932 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
933 {
934 	struct uart_port *port = ptr;
935 
936 	/* Handle errors */
937 	if (port->type == PORT_SCI) {
938 		if (sci_handle_errors(port)) {
939 			/* discard character in rx buffer */
940 			serial_port_in(port, SCxSR);
941 			serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
942 		}
943 	} else {
944 		sci_handle_fifo_overrun(port);
945 		sci_rx_interrupt(irq, ptr);
946 	}
947 
948 	serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
949 
950 	/* Kick the transmission */
951 	sci_tx_interrupt(irq, ptr);
952 
953 	return IRQ_HANDLED;
954 }
955 
956 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
957 {
958 	struct uart_port *port = ptr;
959 
960 	/* Handle BREAKs */
961 	sci_handle_breaks(port);
962 	serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
963 
964 	return IRQ_HANDLED;
965 }
966 
967 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
968 {
969 	/*
970 	 * Not all ports (such as SCIFA) will support REIE. Rather than
971 	 * special-casing the port type, we check the port initialization
972 	 * IRQ enable mask to see whether the IRQ is desired at all. If
973 	 * it's unset, it's logically inferred that there's no point in
974 	 * testing for it.
975 	 */
976 	return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
977 }
978 
979 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
980 {
981 	unsigned short ssr_status, scr_status, err_enabled;
982 	struct uart_port *port = ptr;
983 	struct sci_port *s = to_sci_port(port);
984 	irqreturn_t ret = IRQ_NONE;
985 
986 	ssr_status = serial_port_in(port, SCxSR);
987 	scr_status = serial_port_in(port, SCSCR);
988 	err_enabled = scr_status & port_rx_irq_mask(port);
989 
990 	/* Tx Interrupt */
991 	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
992 	    !s->chan_tx)
993 		ret = sci_tx_interrupt(irq, ptr);
994 
995 	/*
996 	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
997 	 * DR flags
998 	 */
999 	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
1000 	    (scr_status & SCSCR_RIE))
1001 		ret = sci_rx_interrupt(irq, ptr);
1002 
1003 	/* Error Interrupt */
1004 	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
1005 		ret = sci_er_interrupt(irq, ptr);
1006 
1007 	/* Break Interrupt */
1008 	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
1009 		ret = sci_br_interrupt(irq, ptr);
1010 
1011 	return ret;
1012 }
1013 
1014 /*
1015  * Here we define a transition notifier so that we can update all of our
1016  * ports' baud rate when the peripheral clock changes.
1017  */
1018 static int sci_notifier(struct notifier_block *self,
1019 			unsigned long phase, void *p)
1020 {
1021 	struct sci_port *sci_port;
1022 	unsigned long flags;
1023 
1024 	sci_port = container_of(self, struct sci_port, freq_transition);
1025 
1026 	if ((phase == CPUFREQ_POSTCHANGE) ||
1027 	    (phase == CPUFREQ_RESUMECHANGE)) {
1028 		struct uart_port *port = &sci_port->port;
1029 
1030 		spin_lock_irqsave(&port->lock, flags);
1031 		port->uartclk = clk_get_rate(sci_port->iclk);
1032 		spin_unlock_irqrestore(&port->lock, flags);
1033 	}
1034 
1035 	return NOTIFY_OK;
1036 }
1037 
1038 static struct sci_irq_desc {
1039 	const char	*desc;
1040 	irq_handler_t	handler;
1041 } sci_irq_desc[] = {
1042 	/*
1043 	 * Split out handlers, the default case.
1044 	 */
1045 	[SCIx_ERI_IRQ] = {
1046 		.desc = "rx err",
1047 		.handler = sci_er_interrupt,
1048 	},
1049 
1050 	[SCIx_RXI_IRQ] = {
1051 		.desc = "rx full",
1052 		.handler = sci_rx_interrupt,
1053 	},
1054 
1055 	[SCIx_TXI_IRQ] = {
1056 		.desc = "tx empty",
1057 		.handler = sci_tx_interrupt,
1058 	},
1059 
1060 	[SCIx_BRI_IRQ] = {
1061 		.desc = "break",
1062 		.handler = sci_br_interrupt,
1063 	},
1064 
1065 	/*
1066 	 * Special muxed handler.
1067 	 */
1068 	[SCIx_MUX_IRQ] = {
1069 		.desc = "mux",
1070 		.handler = sci_mpxed_interrupt,
1071 	},
1072 };
1073 
1074 static int sci_request_irq(struct sci_port *port)
1075 {
1076 	struct uart_port *up = &port->port;
1077 	int i, j, ret = 0;
1078 
1079 	for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1080 		struct sci_irq_desc *desc;
1081 		int irq;
1082 
1083 		if (SCIx_IRQ_IS_MUXED(port)) {
1084 			i = SCIx_MUX_IRQ;
1085 			irq = up->irq;
1086 		} else {
1087 			irq = port->irqs[i];
1088 
1089 			/*
1090 			 * Certain port types won't support all of the
1091 			 * available interrupt sources.
1092 			 */
1093 			if (unlikely(irq < 0))
1094 				continue;
1095 		}
1096 
1097 		desc = sci_irq_desc + i;
1098 		port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1099 					    dev_name(up->dev), desc->desc);
1100 		if (!port->irqstr[j]) {
1101 			dev_err(up->dev, "Failed to allocate %s IRQ string\n",
1102 				desc->desc);
1103 			goto out_nomem;
1104 		}
1105 
1106 		ret = request_irq(irq, desc->handler, up->irqflags,
1107 				  port->irqstr[j], port);
1108 		if (unlikely(ret)) {
1109 			dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1110 			goto out_noirq;
1111 		}
1112 	}
1113 
1114 	return 0;
1115 
1116 out_noirq:
1117 	while (--i >= 0)
1118 		free_irq(port->irqs[i], port);
1119 
1120 out_nomem:
1121 	while (--j >= 0)
1122 		kfree(port->irqstr[j]);
1123 
1124 	return ret;
1125 }
1126 
1127 static void sci_free_irq(struct sci_port *port)
1128 {
1129 	int i;
1130 
1131 	/*
1132 	 * Intentionally in reverse order so we iterate over the muxed
1133 	 * IRQ first.
1134 	 */
1135 	for (i = 0; i < SCIx_NR_IRQS; i++) {
1136 		int irq = port->irqs[i];
1137 
1138 		/*
1139 		 * Certain port types won't support all of the available
1140 		 * interrupt sources.
1141 		 */
1142 		if (unlikely(irq < 0))
1143 			continue;
1144 
1145 		free_irq(port->irqs[i], port);
1146 		kfree(port->irqstr[i]);
1147 
1148 		if (SCIx_IRQ_IS_MUXED(port)) {
1149 			/* If there's only one IRQ, we're done. */
1150 			return;
1151 		}
1152 	}
1153 }
1154 
1155 static unsigned int sci_tx_empty(struct uart_port *port)
1156 {
1157 	unsigned short status = serial_port_in(port, SCxSR);
1158 	unsigned short in_tx_fifo = sci_txfill(port);
1159 
1160 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
1161 }
1162 
1163 /*
1164  * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
1165  * CTS/RTS is supported in hardware by at least one port and controlled
1166  * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
1167  * handled via the ->init_pins() op, which is a bit of a one-way street,
1168  * lacking any ability to defer pin control -- this will later be
1169  * converted over to the GPIO framework).
1170  *
1171  * Other modes (such as loopback) are supported generically on certain
1172  * port types, but not others. For these it's sufficient to test for the
1173  * existence of the support register and simply ignore the port type.
1174  */
1175 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
1176 {
1177 	if (mctrl & TIOCM_LOOP) {
1178 		struct plat_sci_reg *reg;
1179 
1180 		/*
1181 		 * Standard loopback mode for SCFCR ports.
1182 		 */
1183 		reg = sci_getreg(port, SCFCR);
1184 		if (reg->size)
1185 			serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
1186 	}
1187 }
1188 
1189 static unsigned int sci_get_mctrl(struct uart_port *port)
1190 {
1191 	/*
1192 	 * CTS/RTS is handled in hardware when supported, while nothing
1193 	 * else is wired up. Keep it simple and simply assert DSR/CAR.
1194 	 */
1195 	return TIOCM_DSR | TIOCM_CAR;
1196 }
1197 
1198 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1199 static void sci_dma_tx_complete(void *arg)
1200 {
1201 	struct sci_port *s = arg;
1202 	struct uart_port *port = &s->port;
1203 	struct circ_buf *xmit = &port->state->xmit;
1204 	unsigned long flags;
1205 
1206 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1207 
1208 	spin_lock_irqsave(&port->lock, flags);
1209 
1210 	xmit->tail += sg_dma_len(&s->sg_tx);
1211 	xmit->tail &= UART_XMIT_SIZE - 1;
1212 
1213 	port->icount.tx += sg_dma_len(&s->sg_tx);
1214 
1215 	async_tx_ack(s->desc_tx);
1216 	s->desc_tx = NULL;
1217 
1218 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1219 		uart_write_wakeup(port);
1220 
1221 	if (!uart_circ_empty(xmit)) {
1222 		s->cookie_tx = 0;
1223 		schedule_work(&s->work_tx);
1224 	} else {
1225 		s->cookie_tx = -EINVAL;
1226 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1227 			u16 ctrl = serial_port_in(port, SCSCR);
1228 			serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1229 		}
1230 	}
1231 
1232 	spin_unlock_irqrestore(&port->lock, flags);
1233 }
1234 
1235 /* Locking: called with port lock held */
1236 static int sci_dma_rx_push(struct sci_port *s, size_t count)
1237 {
1238 	struct uart_port *port = &s->port;
1239 	struct tty_port *tport = &port->state->port;
1240 	int i, active, room;
1241 
1242 	room = tty_buffer_request_room(tport, count);
1243 
1244 	if (s->active_rx == s->cookie_rx[0]) {
1245 		active = 0;
1246 	} else if (s->active_rx == s->cookie_rx[1]) {
1247 		active = 1;
1248 	} else {
1249 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1250 		return 0;
1251 	}
1252 
1253 	if (room < count)
1254 		dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
1255 			 count - room);
1256 	if (!room)
1257 		return room;
1258 
1259 	for (i = 0; i < room; i++)
1260 		tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1261 				     TTY_NORMAL);
1262 
1263 	port->icount.rx += room;
1264 
1265 	return room;
1266 }
1267 
1268 static void sci_dma_rx_complete(void *arg)
1269 {
1270 	struct sci_port *s = arg;
1271 	struct uart_port *port = &s->port;
1272 	unsigned long flags;
1273 	int count;
1274 
1275 	dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1276 
1277 	spin_lock_irqsave(&port->lock, flags);
1278 
1279 	count = sci_dma_rx_push(s, s->buf_len_rx);
1280 
1281 	mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1282 
1283 	spin_unlock_irqrestore(&port->lock, flags);
1284 
1285 	if (count)
1286 		tty_flip_buffer_push(&port->state->port);
1287 
1288 	schedule_work(&s->work_rx);
1289 }
1290 
1291 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1292 {
1293 	struct dma_chan *chan = s->chan_rx;
1294 	struct uart_port *port = &s->port;
1295 
1296 	s->chan_rx = NULL;
1297 	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1298 	dma_release_channel(chan);
1299 	if (sg_dma_address(&s->sg_rx[0]))
1300 		dma_free_coherent(port->dev, s->buf_len_rx * 2,
1301 				  sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1302 	if (enable_pio)
1303 		sci_start_rx(port);
1304 }
1305 
1306 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1307 {
1308 	struct dma_chan *chan = s->chan_tx;
1309 	struct uart_port *port = &s->port;
1310 
1311 	s->chan_tx = NULL;
1312 	s->cookie_tx = -EINVAL;
1313 	dma_release_channel(chan);
1314 	if (enable_pio)
1315 		sci_start_tx(port);
1316 }
1317 
1318 static void sci_submit_rx(struct sci_port *s)
1319 {
1320 	struct dma_chan *chan = s->chan_rx;
1321 	int i;
1322 
1323 	for (i = 0; i < 2; i++) {
1324 		struct scatterlist *sg = &s->sg_rx[i];
1325 		struct dma_async_tx_descriptor *desc;
1326 
1327 		desc = dmaengine_prep_slave_sg(chan,
1328 			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1329 
1330 		if (desc) {
1331 			s->desc_rx[i] = desc;
1332 			desc->callback = sci_dma_rx_complete;
1333 			desc->callback_param = s;
1334 			s->cookie_rx[i] = desc->tx_submit(desc);
1335 		}
1336 
1337 		if (!desc || s->cookie_rx[i] < 0) {
1338 			if (i) {
1339 				async_tx_ack(s->desc_rx[0]);
1340 				s->cookie_rx[0] = -EINVAL;
1341 			}
1342 			if (desc) {
1343 				async_tx_ack(desc);
1344 				s->cookie_rx[i] = -EINVAL;
1345 			}
1346 			dev_warn(s->port.dev,
1347 				 "failed to re-start DMA, using PIO\n");
1348 			sci_rx_dma_release(s, true);
1349 			return;
1350 		}
1351 		dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1352 			s->cookie_rx[i], i);
1353 	}
1354 
1355 	s->active_rx = s->cookie_rx[0];
1356 
1357 	dma_async_issue_pending(chan);
1358 }
1359 
1360 static void work_fn_rx(struct work_struct *work)
1361 {
1362 	struct sci_port *s = container_of(work, struct sci_port, work_rx);
1363 	struct uart_port *port = &s->port;
1364 	struct dma_async_tx_descriptor *desc;
1365 	int new;
1366 
1367 	if (s->active_rx == s->cookie_rx[0]) {
1368 		new = 0;
1369 	} else if (s->active_rx == s->cookie_rx[1]) {
1370 		new = 1;
1371 	} else {
1372 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1373 		return;
1374 	}
1375 	desc = s->desc_rx[new];
1376 
1377 	if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1378 	    DMA_COMPLETE) {
1379 		/* Handle incomplete DMA receive */
1380 		struct dma_chan *chan = s->chan_rx;
1381 		struct shdma_desc *sh_desc = container_of(desc,
1382 					struct shdma_desc, async_tx);
1383 		unsigned long flags;
1384 		int count;
1385 
1386 		chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1387 		dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
1388 			sh_desc->partial, sh_desc->cookie);
1389 
1390 		spin_lock_irqsave(&port->lock, flags);
1391 		count = sci_dma_rx_push(s, sh_desc->partial);
1392 		spin_unlock_irqrestore(&port->lock, flags);
1393 
1394 		if (count)
1395 			tty_flip_buffer_push(&port->state->port);
1396 
1397 		sci_submit_rx(s);
1398 
1399 		return;
1400 	}
1401 
1402 	s->cookie_rx[new] = desc->tx_submit(desc);
1403 	if (s->cookie_rx[new] < 0) {
1404 		dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1405 		sci_rx_dma_release(s, true);
1406 		return;
1407 	}
1408 
1409 	s->active_rx = s->cookie_rx[!new];
1410 
1411 	dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1412 		s->cookie_rx[new], new, s->active_rx);
1413 }
1414 
1415 static void work_fn_tx(struct work_struct *work)
1416 {
1417 	struct sci_port *s = container_of(work, struct sci_port, work_tx);
1418 	struct dma_async_tx_descriptor *desc;
1419 	struct dma_chan *chan = s->chan_tx;
1420 	struct uart_port *port = &s->port;
1421 	struct circ_buf *xmit = &port->state->xmit;
1422 	struct scatterlist *sg = &s->sg_tx;
1423 
1424 	/*
1425 	 * DMA is idle now.
1426 	 * Port xmit buffer is already mapped, and it is one page... Just adjust
1427 	 * offsets and lengths. Since it is a circular buffer, we have to
1428 	 * transmit till the end, and then the rest. Take the port lock to get a
1429 	 * consistent xmit buffer state.
1430 	 */
1431 	spin_lock_irq(&port->lock);
1432 	sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1433 	sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1434 		sg->offset;
1435 	sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1436 		CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1437 	spin_unlock_irq(&port->lock);
1438 
1439 	BUG_ON(!sg_dma_len(sg));
1440 
1441 	desc = dmaengine_prep_slave_sg(chan,
1442 			sg, s->sg_len_tx, DMA_MEM_TO_DEV,
1443 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1444 	if (!desc) {
1445 		/* switch to PIO */
1446 		sci_tx_dma_release(s, true);
1447 		return;
1448 	}
1449 
1450 	dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1451 
1452 	spin_lock_irq(&port->lock);
1453 	s->desc_tx = desc;
1454 	desc->callback = sci_dma_tx_complete;
1455 	desc->callback_param = s;
1456 	spin_unlock_irq(&port->lock);
1457 	s->cookie_tx = desc->tx_submit(desc);
1458 	if (s->cookie_tx < 0) {
1459 		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1460 		/* switch to PIO */
1461 		sci_tx_dma_release(s, true);
1462 		return;
1463 	}
1464 
1465 	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1466 		xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1467 
1468 	dma_async_issue_pending(chan);
1469 }
1470 #endif
1471 
1472 static void sci_start_tx(struct uart_port *port)
1473 {
1474 	struct sci_port *s = to_sci_port(port);
1475 	unsigned short ctrl;
1476 
1477 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1478 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1479 		u16 new, scr = serial_port_in(port, SCSCR);
1480 		if (s->chan_tx)
1481 			new = scr | 0x8000;
1482 		else
1483 			new = scr & ~0x8000;
1484 		if (new != scr)
1485 			serial_port_out(port, SCSCR, new);
1486 	}
1487 
1488 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1489 	    s->cookie_tx < 0) {
1490 		s->cookie_tx = 0;
1491 		schedule_work(&s->work_tx);
1492 	}
1493 #endif
1494 
1495 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1496 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1497 		ctrl = serial_port_in(port, SCSCR);
1498 		serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
1499 	}
1500 }
1501 
1502 static void sci_stop_tx(struct uart_port *port)
1503 {
1504 	unsigned short ctrl;
1505 
1506 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1507 	ctrl = serial_port_in(port, SCSCR);
1508 
1509 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1510 		ctrl &= ~0x8000;
1511 
1512 	ctrl &= ~SCSCR_TIE;
1513 
1514 	serial_port_out(port, SCSCR, ctrl);
1515 }
1516 
1517 static void sci_start_rx(struct uart_port *port)
1518 {
1519 	unsigned short ctrl;
1520 
1521 	ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
1522 
1523 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1524 		ctrl &= ~0x4000;
1525 
1526 	serial_port_out(port, SCSCR, ctrl);
1527 }
1528 
1529 static void sci_stop_rx(struct uart_port *port)
1530 {
1531 	unsigned short ctrl;
1532 
1533 	ctrl = serial_port_in(port, SCSCR);
1534 
1535 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1536 		ctrl &= ~0x4000;
1537 
1538 	ctrl &= ~port_rx_irq_mask(port);
1539 
1540 	serial_port_out(port, SCSCR, ctrl);
1541 }
1542 
1543 static void sci_enable_ms(struct uart_port *port)
1544 {
1545 	/*
1546 	 * Not supported by hardware, always a nop.
1547 	 */
1548 }
1549 
1550 static void sci_break_ctl(struct uart_port *port, int break_state)
1551 {
1552 	struct sci_port *s = to_sci_port(port);
1553 	struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
1554 	unsigned short scscr, scsptr;
1555 
1556 	/* check wheter the port has SCSPTR */
1557 	if (!reg->size) {
1558 		/*
1559 		 * Not supported by hardware. Most parts couple break and rx
1560 		 * interrupts together, with break detection always enabled.
1561 		 */
1562 		return;
1563 	}
1564 
1565 	scsptr = serial_port_in(port, SCSPTR);
1566 	scscr = serial_port_in(port, SCSCR);
1567 
1568 	if (break_state == -1) {
1569 		scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
1570 		scscr &= ~SCSCR_TE;
1571 	} else {
1572 		scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
1573 		scscr |= SCSCR_TE;
1574 	}
1575 
1576 	serial_port_out(port, SCSPTR, scsptr);
1577 	serial_port_out(port, SCSCR, scscr);
1578 }
1579 
1580 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1581 static bool filter(struct dma_chan *chan, void *slave)
1582 {
1583 	struct sh_dmae_slave *param = slave;
1584 
1585 	dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1586 		param->shdma_slave.slave_id);
1587 
1588 	chan->private = &param->shdma_slave;
1589 	return true;
1590 }
1591 
1592 static void rx_timer_fn(unsigned long arg)
1593 {
1594 	struct sci_port *s = (struct sci_port *)arg;
1595 	struct uart_port *port = &s->port;
1596 	u16 scr = serial_port_in(port, SCSCR);
1597 
1598 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1599 		scr &= ~0x4000;
1600 		enable_irq(s->irqs[SCIx_RXI_IRQ]);
1601 	}
1602 	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
1603 	dev_dbg(port->dev, "DMA Rx timed out\n");
1604 	schedule_work(&s->work_rx);
1605 }
1606 
1607 static void sci_request_dma(struct uart_port *port)
1608 {
1609 	struct sci_port *s = to_sci_port(port);
1610 	struct sh_dmae_slave *param;
1611 	struct dma_chan *chan;
1612 	dma_cap_mask_t mask;
1613 	int nent;
1614 
1615 	dev_dbg(port->dev, "%s: port %d\n", __func__,
1616 		port->line);
1617 
1618 	if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
1619 		return;
1620 
1621 	dma_cap_zero(mask);
1622 	dma_cap_set(DMA_SLAVE, mask);
1623 
1624 	param = &s->param_tx;
1625 
1626 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1627 	param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
1628 
1629 	s->cookie_tx = -EINVAL;
1630 	chan = dma_request_channel(mask, filter, param);
1631 	dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1632 	if (chan) {
1633 		s->chan_tx = chan;
1634 		sg_init_table(&s->sg_tx, 1);
1635 		/* UART circular tx buffer is an aligned page. */
1636 		BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
1637 		sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1638 			    UART_XMIT_SIZE,
1639 			    (uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
1640 		nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1641 		if (!nent)
1642 			sci_tx_dma_release(s, false);
1643 		else
1644 			dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1645 				sg_dma_len(&s->sg_tx), port->state->xmit.buf,
1646 				&sg_dma_address(&s->sg_tx));
1647 
1648 		s->sg_len_tx = nent;
1649 
1650 		INIT_WORK(&s->work_tx, work_fn_tx);
1651 	}
1652 
1653 	param = &s->param_rx;
1654 
1655 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1656 	param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
1657 
1658 	chan = dma_request_channel(mask, filter, param);
1659 	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1660 	if (chan) {
1661 		dma_addr_t dma[2];
1662 		void *buf[2];
1663 		int i;
1664 
1665 		s->chan_rx = chan;
1666 
1667 		s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1668 		buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1669 					    &dma[0], GFP_KERNEL);
1670 
1671 		if (!buf[0]) {
1672 			dev_warn(port->dev,
1673 				 "failed to allocate dma buffer, using PIO\n");
1674 			sci_rx_dma_release(s, true);
1675 			return;
1676 		}
1677 
1678 		buf[1] = buf[0] + s->buf_len_rx;
1679 		dma[1] = dma[0] + s->buf_len_rx;
1680 
1681 		for (i = 0; i < 2; i++) {
1682 			struct scatterlist *sg = &s->sg_rx[i];
1683 
1684 			sg_init_table(sg, 1);
1685 			sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1686 				    (uintptr_t)buf[i] & ~PAGE_MASK);
1687 			sg_dma_address(sg) = dma[i];
1688 		}
1689 
1690 		INIT_WORK(&s->work_rx, work_fn_rx);
1691 		setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1692 
1693 		sci_submit_rx(s);
1694 	}
1695 }
1696 
1697 static void sci_free_dma(struct uart_port *port)
1698 {
1699 	struct sci_port *s = to_sci_port(port);
1700 
1701 	if (s->chan_tx)
1702 		sci_tx_dma_release(s, false);
1703 	if (s->chan_rx)
1704 		sci_rx_dma_release(s, false);
1705 }
1706 #else
1707 static inline void sci_request_dma(struct uart_port *port)
1708 {
1709 }
1710 
1711 static inline void sci_free_dma(struct uart_port *port)
1712 {
1713 }
1714 #endif
1715 
1716 static int sci_startup(struct uart_port *port)
1717 {
1718 	struct sci_port *s = to_sci_port(port);
1719 	unsigned long flags;
1720 	int ret;
1721 
1722 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1723 
1724 	ret = sci_request_irq(s);
1725 	if (unlikely(ret < 0))
1726 		return ret;
1727 
1728 	sci_request_dma(port);
1729 
1730 	spin_lock_irqsave(&port->lock, flags);
1731 	sci_start_tx(port);
1732 	sci_start_rx(port);
1733 	spin_unlock_irqrestore(&port->lock, flags);
1734 
1735 	return 0;
1736 }
1737 
1738 static void sci_shutdown(struct uart_port *port)
1739 {
1740 	struct sci_port *s = to_sci_port(port);
1741 	unsigned long flags;
1742 
1743 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1744 
1745 	spin_lock_irqsave(&port->lock, flags);
1746 	sci_stop_rx(port);
1747 	sci_stop_tx(port);
1748 	spin_unlock_irqrestore(&port->lock, flags);
1749 
1750 	sci_free_dma(port);
1751 	sci_free_irq(s);
1752 }
1753 
1754 static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
1755 				   unsigned long freq)
1756 {
1757 	if (s->sampling_rate)
1758 		return DIV_ROUND_CLOSEST(freq, s->sampling_rate * bps) - 1;
1759 
1760 	/* Warn, but use a safe default */
1761 	WARN_ON(1);
1762 
1763 	return ((freq + 16 * bps) / (32 * bps) - 1);
1764 }
1765 
1766 /* calculate sample rate, BRR, and clock select for HSCIF */
1767 static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq,
1768 				int *brr, unsigned int *srr,
1769 				unsigned int *cks)
1770 {
1771 	int sr, c, br, err;
1772 	int min_err = 1000; /* 100% */
1773 
1774 	/* Find the combination of sample rate and clock select with the
1775 	   smallest deviation from the desired baud rate. */
1776 	for (sr = 8; sr <= 32; sr++) {
1777 		for (c = 0; c <= 3; c++) {
1778 			/* integerized formulas from HSCIF documentation */
1779 			br = freq / (sr * (1 << (2 * c + 1)) * bps) - 1;
1780 			if (br < 0 || br > 255)
1781 				continue;
1782 			err = freq / ((br + 1) * bps * sr *
1783 			      (1 << (2 * c + 1)) / 1000) - 1000;
1784 			if (min_err > err) {
1785 				min_err = err;
1786 				*brr = br;
1787 				*srr = sr - 1;
1788 				*cks = c;
1789 			}
1790 		}
1791 	}
1792 
1793 	if (min_err == 1000) {
1794 		WARN_ON(1);
1795 		/* use defaults */
1796 		*brr = 255;
1797 		*srr = 15;
1798 		*cks = 0;
1799 	}
1800 }
1801 
1802 static void sci_reset(struct uart_port *port)
1803 {
1804 	struct plat_sci_reg *reg;
1805 	unsigned int status;
1806 
1807 	do {
1808 		status = serial_port_in(port, SCxSR);
1809 	} while (!(status & SCxSR_TEND(port)));
1810 
1811 	serial_port_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
1812 
1813 	reg = sci_getreg(port, SCFCR);
1814 	if (reg->size)
1815 		serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1816 }
1817 
1818 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1819 			    struct ktermios *old)
1820 {
1821 	struct sci_port *s = to_sci_port(port);
1822 	struct plat_sci_reg *reg;
1823 	unsigned int baud, smr_val, max_baud, cks = 0;
1824 	int t = -1;
1825 	unsigned int srr = 15;
1826 
1827 	/*
1828 	 * earlyprintk comes here early on with port->uartclk set to zero.
1829 	 * the clock framework is not up and running at this point so here
1830 	 * we assume that 115200 is the maximum baud rate. please note that
1831 	 * the baud rate is not programmed during earlyprintk - it is assumed
1832 	 * that the previous boot loader has enabled required clocks and
1833 	 * setup the baud rate generator hardware for us already.
1834 	 */
1835 	max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1836 
1837 	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1838 	if (likely(baud && port->uartclk)) {
1839 		if (s->cfg->type == PORT_HSCIF) {
1840 			sci_baud_calc_hscif(baud, port->uartclk, &t, &srr,
1841 					    &cks);
1842 		} else {
1843 			t = sci_scbrr_calc(s, baud, port->uartclk);
1844 			for (cks = 0; t >= 256 && cks <= 3; cks++)
1845 				t >>= 2;
1846 		}
1847 	}
1848 
1849 	sci_port_enable(s);
1850 
1851 	sci_reset(port);
1852 
1853 	smr_val = serial_port_in(port, SCSMR) & 3;
1854 
1855 	if ((termios->c_cflag & CSIZE) == CS7)
1856 		smr_val |= 0x40;
1857 	if (termios->c_cflag & PARENB)
1858 		smr_val |= 0x20;
1859 	if (termios->c_cflag & PARODD)
1860 		smr_val |= 0x30;
1861 	if (termios->c_cflag & CSTOPB)
1862 		smr_val |= 0x08;
1863 
1864 	uart_update_timeout(port, termios->c_cflag, baud);
1865 
1866 	dev_dbg(port->dev, "%s: SMR %x, cks %x, t %x, SCSCR %x\n",
1867 		__func__, smr_val, cks, t, s->cfg->scscr);
1868 
1869 	if (t >= 0) {
1870 		serial_port_out(port, SCSMR, (smr_val & ~3) | cks);
1871 		serial_port_out(port, SCBRR, t);
1872 		reg = sci_getreg(port, HSSRR);
1873 		if (reg->size)
1874 			serial_port_out(port, HSSRR, srr | HSCIF_SRE);
1875 		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1876 	} else
1877 		serial_port_out(port, SCSMR, smr_val);
1878 
1879 	sci_init_pins(port, termios->c_cflag);
1880 
1881 	reg = sci_getreg(port, SCFCR);
1882 	if (reg->size) {
1883 		unsigned short ctrl = serial_port_in(port, SCFCR);
1884 
1885 		if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
1886 			if (termios->c_cflag & CRTSCTS)
1887 				ctrl |= SCFCR_MCE;
1888 			else
1889 				ctrl &= ~SCFCR_MCE;
1890 		}
1891 
1892 		/*
1893 		 * As we've done a sci_reset() above, ensure we don't
1894 		 * interfere with the FIFOs while toggling MCE. As the
1895 		 * reset values could still be set, simply mask them out.
1896 		 */
1897 		ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
1898 
1899 		serial_port_out(port, SCFCR, ctrl);
1900 	}
1901 
1902 	serial_port_out(port, SCSCR, s->cfg->scscr);
1903 
1904 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1905 	/*
1906 	 * Calculate delay for 1.5 DMA buffers: see
1907 	 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1908 	 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1909 	 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1910 	 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1911 	 * sizes), but it has been found out experimentally, that this is not
1912 	 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1913 	 * as a minimum seem to work perfectly.
1914 	 */
1915 	if (s->chan_rx) {
1916 		s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1917 			port->fifosize / 2;
1918 		dev_dbg(port->dev,
1919 			"DMA Rx t-out %ums, tty t-out %u jiffies\n",
1920 			s->rx_timeout * 1000 / HZ, port->timeout);
1921 		if (s->rx_timeout < msecs_to_jiffies(20))
1922 			s->rx_timeout = msecs_to_jiffies(20);
1923 	}
1924 #endif
1925 
1926 	if ((termios->c_cflag & CREAD) != 0)
1927 		sci_start_rx(port);
1928 
1929 	sci_port_disable(s);
1930 }
1931 
1932 static void sci_pm(struct uart_port *port, unsigned int state,
1933 		   unsigned int oldstate)
1934 {
1935 	struct sci_port *sci_port = to_sci_port(port);
1936 
1937 	switch (state) {
1938 	case 3:
1939 		sci_port_disable(sci_port);
1940 		break;
1941 	default:
1942 		sci_port_enable(sci_port);
1943 		break;
1944 	}
1945 }
1946 
1947 static const char *sci_type(struct uart_port *port)
1948 {
1949 	switch (port->type) {
1950 	case PORT_IRDA:
1951 		return "irda";
1952 	case PORT_SCI:
1953 		return "sci";
1954 	case PORT_SCIF:
1955 		return "scif";
1956 	case PORT_SCIFA:
1957 		return "scifa";
1958 	case PORT_SCIFB:
1959 		return "scifb";
1960 	case PORT_HSCIF:
1961 		return "hscif";
1962 	}
1963 
1964 	return NULL;
1965 }
1966 
1967 static inline unsigned long sci_port_size(struct uart_port *port)
1968 {
1969 	/*
1970 	 * Pick an arbitrary size that encapsulates all of the base
1971 	 * registers by default. This can be optimized later, or derived
1972 	 * from platform resource data at such a time that ports begin to
1973 	 * behave more erratically.
1974 	 */
1975 	if (port->type == PORT_HSCIF)
1976 		return 96;
1977 	else
1978 		return 64;
1979 }
1980 
1981 static int sci_remap_port(struct uart_port *port)
1982 {
1983 	unsigned long size = sci_port_size(port);
1984 
1985 	/*
1986 	 * Nothing to do if there's already an established membase.
1987 	 */
1988 	if (port->membase)
1989 		return 0;
1990 
1991 	if (port->flags & UPF_IOREMAP) {
1992 		port->membase = ioremap_nocache(port->mapbase, size);
1993 		if (unlikely(!port->membase)) {
1994 			dev_err(port->dev, "can't remap port#%d\n", port->line);
1995 			return -ENXIO;
1996 		}
1997 	} else {
1998 		/*
1999 		 * For the simple (and majority of) cases where we don't
2000 		 * need to do any remapping, just cast the cookie
2001 		 * directly.
2002 		 */
2003 		port->membase = (void __iomem *)port->mapbase;
2004 	}
2005 
2006 	return 0;
2007 }
2008 
2009 static void sci_release_port(struct uart_port *port)
2010 {
2011 	if (port->flags & UPF_IOREMAP) {
2012 		iounmap(port->membase);
2013 		port->membase = NULL;
2014 	}
2015 
2016 	release_mem_region(port->mapbase, sci_port_size(port));
2017 }
2018 
2019 static int sci_request_port(struct uart_port *port)
2020 {
2021 	unsigned long size = sci_port_size(port);
2022 	struct resource *res;
2023 	int ret;
2024 
2025 	res = request_mem_region(port->mapbase, size, dev_name(port->dev));
2026 	if (unlikely(res == NULL))
2027 		return -EBUSY;
2028 
2029 	ret = sci_remap_port(port);
2030 	if (unlikely(ret != 0)) {
2031 		release_resource(res);
2032 		return ret;
2033 	}
2034 
2035 	return 0;
2036 }
2037 
2038 static void sci_config_port(struct uart_port *port, int flags)
2039 {
2040 	if (flags & UART_CONFIG_TYPE) {
2041 		struct sci_port *sport = to_sci_port(port);
2042 
2043 		port->type = sport->cfg->type;
2044 		sci_request_port(port);
2045 	}
2046 }
2047 
2048 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
2049 {
2050 	if (ser->baud_base < 2400)
2051 		/* No paper tape reader for Mitch.. */
2052 		return -EINVAL;
2053 
2054 	return 0;
2055 }
2056 
2057 static struct uart_ops sci_uart_ops = {
2058 	.tx_empty	= sci_tx_empty,
2059 	.set_mctrl	= sci_set_mctrl,
2060 	.get_mctrl	= sci_get_mctrl,
2061 	.start_tx	= sci_start_tx,
2062 	.stop_tx	= sci_stop_tx,
2063 	.stop_rx	= sci_stop_rx,
2064 	.enable_ms	= sci_enable_ms,
2065 	.break_ctl	= sci_break_ctl,
2066 	.startup	= sci_startup,
2067 	.shutdown	= sci_shutdown,
2068 	.set_termios	= sci_set_termios,
2069 	.pm		= sci_pm,
2070 	.type		= sci_type,
2071 	.release_port	= sci_release_port,
2072 	.request_port	= sci_request_port,
2073 	.config_port	= sci_config_port,
2074 	.verify_port	= sci_verify_port,
2075 #ifdef CONFIG_CONSOLE_POLL
2076 	.poll_get_char	= sci_poll_get_char,
2077 	.poll_put_char	= sci_poll_put_char,
2078 #endif
2079 };
2080 
2081 static int sci_init_single(struct platform_device *dev,
2082 			   struct sci_port *sci_port, unsigned int index,
2083 			   struct plat_sci_port *p, bool early)
2084 {
2085 	struct uart_port *port = &sci_port->port;
2086 	const struct resource *res;
2087 	unsigned int sampling_rate;
2088 	unsigned int i;
2089 	int ret;
2090 
2091 	sci_port->cfg	= p;
2092 
2093 	port->ops	= &sci_uart_ops;
2094 	port->iotype	= UPIO_MEM;
2095 	port->line	= index;
2096 
2097 	if (dev->num_resources) {
2098 		/* Device has resources, use them. */
2099 		res = platform_get_resource(dev, IORESOURCE_MEM, 0);
2100 		if (res == NULL)
2101 			return -ENOMEM;
2102 
2103 		port->mapbase = res->start;
2104 
2105 		for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
2106 			sci_port->irqs[i] = platform_get_irq(dev, i);
2107 
2108 		/* The SCI generates several interrupts. They can be muxed
2109 		 * together or connected to different interrupt lines. In the
2110 		 * muxed case only one interrupt resource is specified. In the
2111 		 * non-muxed case three or four interrupt resources are
2112 		 * specified, as the BRI interrupt is optional.
2113 		 */
2114 		if (sci_port->irqs[0] < 0)
2115 			return -ENXIO;
2116 
2117 		if (sci_port->irqs[1] < 0) {
2118 			sci_port->irqs[1] = sci_port->irqs[0];
2119 			sci_port->irqs[2] = sci_port->irqs[0];
2120 			sci_port->irqs[3] = sci_port->irqs[0];
2121 		}
2122 	} else {
2123 		/* No resources, use old-style platform data. */
2124 		port->mapbase = p->mapbase;
2125 		for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
2126 			sci_port->irqs[i] = p->irqs[i] ? p->irqs[i] : -ENXIO;
2127 	}
2128 
2129 	if (p->regtype == SCIx_PROBE_REGTYPE) {
2130 		ret = sci_probe_regmap(p);
2131 		if (unlikely(ret))
2132 			return ret;
2133 	}
2134 
2135 	switch (p->type) {
2136 	case PORT_SCIFB:
2137 		port->fifosize = 256;
2138 		sci_port->overrun_bit = 9;
2139 		sampling_rate = 16;
2140 		break;
2141 	case PORT_HSCIF:
2142 		port->fifosize = 128;
2143 		sampling_rate = 0;
2144 		sci_port->overrun_bit = 0;
2145 		break;
2146 	case PORT_SCIFA:
2147 		port->fifosize = 64;
2148 		sci_port->overrun_bit = 9;
2149 		sampling_rate = 16;
2150 		break;
2151 	case PORT_SCIF:
2152 		port->fifosize = 16;
2153 		if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
2154 			sci_port->overrun_bit = 9;
2155 			sampling_rate = 16;
2156 		} else {
2157 			sci_port->overrun_bit = 0;
2158 			sampling_rate = 32;
2159 		}
2160 		break;
2161 	default:
2162 		port->fifosize = 1;
2163 		sci_port->overrun_bit = 5;
2164 		sampling_rate = 32;
2165 		break;
2166 	}
2167 
2168 	/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
2169 	 * match the SoC datasheet, this should be investigated. Let platform
2170 	 * data override the sampling rate for now.
2171 	 */
2172 	sci_port->sampling_rate = p->sampling_rate ? p->sampling_rate
2173 				: sampling_rate;
2174 
2175 	if (!early) {
2176 		sci_port->iclk = clk_get(&dev->dev, "sci_ick");
2177 		if (IS_ERR(sci_port->iclk)) {
2178 			sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
2179 			if (IS_ERR(sci_port->iclk)) {
2180 				dev_err(&dev->dev, "can't get iclk\n");
2181 				return PTR_ERR(sci_port->iclk);
2182 			}
2183 		}
2184 
2185 		/*
2186 		 * The function clock is optional, ignore it if we can't
2187 		 * find it.
2188 		 */
2189 		sci_port->fclk = clk_get(&dev->dev, "sci_fck");
2190 		if (IS_ERR(sci_port->fclk))
2191 			sci_port->fclk = NULL;
2192 
2193 		port->dev = &dev->dev;
2194 
2195 		pm_runtime_enable(&dev->dev);
2196 	}
2197 
2198 	sci_port->break_timer.data = (unsigned long)sci_port;
2199 	sci_port->break_timer.function = sci_break_timer;
2200 	init_timer(&sci_port->break_timer);
2201 
2202 	/*
2203 	 * Establish some sensible defaults for the error detection.
2204 	 */
2205 	sci_port->error_mask = (p->type == PORT_SCI) ?
2206 			SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
2207 
2208 	/*
2209 	 * Establish sensible defaults for the overrun detection, unless
2210 	 * the part has explicitly disabled support for it.
2211 	 */
2212 
2213 	/*
2214 	 * Make the error mask inclusive of overrun detection, if
2215 	 * supported.
2216 	 */
2217 	sci_port->error_mask |= 1 << sci_port->overrun_bit;
2218 
2219 	port->type		= p->type;
2220 	port->flags		= UPF_FIXED_PORT | p->flags;
2221 	port->regshift		= p->regshift;
2222 
2223 	/*
2224 	 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2225 	 * for the multi-IRQ ports, which is where we are primarily
2226 	 * concerned with the shutdown path synchronization.
2227 	 *
2228 	 * For the muxed case there's nothing more to do.
2229 	 */
2230 	port->irq		= sci_port->irqs[SCIx_RXI_IRQ];
2231 	port->irqflags		= 0;
2232 
2233 	port->serial_in		= sci_serial_in;
2234 	port->serial_out	= sci_serial_out;
2235 
2236 	if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
2237 		dev_dbg(port->dev, "DMA tx %d, rx %d\n",
2238 			p->dma_slave_tx, p->dma_slave_rx);
2239 
2240 	return 0;
2241 }
2242 
2243 static void sci_cleanup_single(struct sci_port *port)
2244 {
2245 	clk_put(port->iclk);
2246 	clk_put(port->fclk);
2247 
2248 	pm_runtime_disable(port->port.dev);
2249 }
2250 
2251 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2252 static void serial_console_putchar(struct uart_port *port, int ch)
2253 {
2254 	sci_poll_put_char(port, ch);
2255 }
2256 
2257 /*
2258  *	Print a string to the serial port trying not to disturb
2259  *	any possible real use of the port...
2260  */
2261 static void serial_console_write(struct console *co, const char *s,
2262 				 unsigned count)
2263 {
2264 	struct sci_port *sci_port = &sci_ports[co->index];
2265 	struct uart_port *port = &sci_port->port;
2266 	unsigned short bits, ctrl;
2267 	unsigned long flags;
2268 	int locked = 1;
2269 
2270 	local_irq_save(flags);
2271 	if (port->sysrq)
2272 		locked = 0;
2273 	else if (oops_in_progress)
2274 		locked = spin_trylock(&port->lock);
2275 	else
2276 		spin_lock(&port->lock);
2277 
2278 	/* first save the SCSCR then disable the interrupts */
2279 	ctrl = serial_port_in(port, SCSCR);
2280 	serial_port_out(port, SCSCR, sci_port->cfg->scscr);
2281 
2282 	uart_console_write(port, s, count, serial_console_putchar);
2283 
2284 	/* wait until fifo is empty and last bit has been transmitted */
2285 	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
2286 	while ((serial_port_in(port, SCxSR) & bits) != bits)
2287 		cpu_relax();
2288 
2289 	/* restore the SCSCR */
2290 	serial_port_out(port, SCSCR, ctrl);
2291 
2292 	if (locked)
2293 		spin_unlock(&port->lock);
2294 	local_irq_restore(flags);
2295 }
2296 
2297 static int serial_console_setup(struct console *co, char *options)
2298 {
2299 	struct sci_port *sci_port;
2300 	struct uart_port *port;
2301 	int baud = 115200;
2302 	int bits = 8;
2303 	int parity = 'n';
2304 	int flow = 'n';
2305 	int ret;
2306 
2307 	/*
2308 	 * Refuse to handle any bogus ports.
2309 	 */
2310 	if (co->index < 0 || co->index >= SCI_NPORTS)
2311 		return -ENODEV;
2312 
2313 	sci_port = &sci_ports[co->index];
2314 	port = &sci_port->port;
2315 
2316 	/*
2317 	 * Refuse to handle uninitialized ports.
2318 	 */
2319 	if (!port->ops)
2320 		return -ENODEV;
2321 
2322 	ret = sci_remap_port(port);
2323 	if (unlikely(ret != 0))
2324 		return ret;
2325 
2326 	if (options)
2327 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2328 
2329 	return uart_set_options(port, co, baud, parity, bits, flow);
2330 }
2331 
2332 static struct console serial_console = {
2333 	.name		= "ttySC",
2334 	.device		= uart_console_device,
2335 	.write		= serial_console_write,
2336 	.setup		= serial_console_setup,
2337 	.flags		= CON_PRINTBUFFER,
2338 	.index		= -1,
2339 	.data		= &sci_uart_driver,
2340 };
2341 
2342 static struct console early_serial_console = {
2343 	.name           = "early_ttySC",
2344 	.write          = serial_console_write,
2345 	.flags          = CON_PRINTBUFFER,
2346 	.index		= -1,
2347 };
2348 
2349 static char early_serial_buf[32];
2350 
2351 static int sci_probe_earlyprintk(struct platform_device *pdev)
2352 {
2353 	struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
2354 
2355 	if (early_serial_console.data)
2356 		return -EEXIST;
2357 
2358 	early_serial_console.index = pdev->id;
2359 
2360 	sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
2361 
2362 	serial_console_setup(&early_serial_console, early_serial_buf);
2363 
2364 	if (!strstr(early_serial_buf, "keep"))
2365 		early_serial_console.flags |= CON_BOOT;
2366 
2367 	register_console(&early_serial_console);
2368 	return 0;
2369 }
2370 
2371 #define SCI_CONSOLE	(&serial_console)
2372 
2373 #else
2374 static inline int sci_probe_earlyprintk(struct platform_device *pdev)
2375 {
2376 	return -EINVAL;
2377 }
2378 
2379 #define SCI_CONSOLE	NULL
2380 
2381 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2382 
2383 static char banner[] __initdata =
2384 	KERN_INFO "SuperH (H)SCI(F) driver initialized\n";
2385 
2386 static struct uart_driver sci_uart_driver = {
2387 	.owner		= THIS_MODULE,
2388 	.driver_name	= "sci",
2389 	.dev_name	= "ttySC",
2390 	.major		= SCI_MAJOR,
2391 	.minor		= SCI_MINOR_START,
2392 	.nr		= SCI_NPORTS,
2393 	.cons		= SCI_CONSOLE,
2394 };
2395 
2396 static int sci_remove(struct platform_device *dev)
2397 {
2398 	struct sci_port *port = platform_get_drvdata(dev);
2399 
2400 	cpufreq_unregister_notifier(&port->freq_transition,
2401 				    CPUFREQ_TRANSITION_NOTIFIER);
2402 
2403 	uart_remove_one_port(&sci_uart_driver, &port->port);
2404 
2405 	sci_cleanup_single(port);
2406 
2407 	return 0;
2408 }
2409 
2410 static int sci_probe_single(struct platform_device *dev,
2411 				      unsigned int index,
2412 				      struct plat_sci_port *p,
2413 				      struct sci_port *sciport)
2414 {
2415 	int ret;
2416 
2417 	/* Sanity check */
2418 	if (unlikely(index >= SCI_NPORTS)) {
2419 		dev_notice(&dev->dev, "Attempting to register port "
2420 			   "%d when only %d are available.\n",
2421 			   index+1, SCI_NPORTS);
2422 		dev_notice(&dev->dev, "Consider bumping "
2423 			   "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2424 		return -EINVAL;
2425 	}
2426 
2427 	ret = sci_init_single(dev, sciport, index, p, false);
2428 	if (ret)
2429 		return ret;
2430 
2431 	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
2432 	if (ret) {
2433 		sci_cleanup_single(sciport);
2434 		return ret;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int sci_probe(struct platform_device *dev)
2441 {
2442 	struct plat_sci_port *p = dev_get_platdata(&dev->dev);
2443 	struct sci_port *sp = &sci_ports[dev->id];
2444 	int ret;
2445 
2446 	/*
2447 	 * If we've come here via earlyprintk initialization, head off to
2448 	 * the special early probe. We don't have sufficient device state
2449 	 * to make it beyond this yet.
2450 	 */
2451 	if (is_early_platform_device(dev))
2452 		return sci_probe_earlyprintk(dev);
2453 
2454 	platform_set_drvdata(dev, sp);
2455 
2456 	ret = sci_probe_single(dev, dev->id, p, sp);
2457 	if (ret)
2458 		return ret;
2459 
2460 	sp->freq_transition.notifier_call = sci_notifier;
2461 
2462 	ret = cpufreq_register_notifier(&sp->freq_transition,
2463 					CPUFREQ_TRANSITION_NOTIFIER);
2464 	if (unlikely(ret < 0)) {
2465 		sci_cleanup_single(sp);
2466 		return ret;
2467 	}
2468 
2469 #ifdef CONFIG_SH_STANDARD_BIOS
2470 	sh_bios_gdb_detach();
2471 #endif
2472 
2473 	return 0;
2474 }
2475 
2476 static int sci_suspend(struct device *dev)
2477 {
2478 	struct sci_port *sport = dev_get_drvdata(dev);
2479 
2480 	if (sport)
2481 		uart_suspend_port(&sci_uart_driver, &sport->port);
2482 
2483 	return 0;
2484 }
2485 
2486 static int sci_resume(struct device *dev)
2487 {
2488 	struct sci_port *sport = dev_get_drvdata(dev);
2489 
2490 	if (sport)
2491 		uart_resume_port(&sci_uart_driver, &sport->port);
2492 
2493 	return 0;
2494 }
2495 
2496 static const struct dev_pm_ops sci_dev_pm_ops = {
2497 	.suspend	= sci_suspend,
2498 	.resume		= sci_resume,
2499 };
2500 
2501 static struct platform_driver sci_driver = {
2502 	.probe		= sci_probe,
2503 	.remove		= sci_remove,
2504 	.driver		= {
2505 		.name	= "sh-sci",
2506 		.owner	= THIS_MODULE,
2507 		.pm	= &sci_dev_pm_ops,
2508 	},
2509 };
2510 
2511 static int __init sci_init(void)
2512 {
2513 	int ret;
2514 
2515 	printk(banner);
2516 
2517 	ret = uart_register_driver(&sci_uart_driver);
2518 	if (likely(ret == 0)) {
2519 		ret = platform_driver_register(&sci_driver);
2520 		if (unlikely(ret))
2521 			uart_unregister_driver(&sci_uart_driver);
2522 	}
2523 
2524 	return ret;
2525 }
2526 
2527 static void __exit sci_exit(void)
2528 {
2529 	platform_driver_unregister(&sci_driver);
2530 	uart_unregister_driver(&sci_uart_driver);
2531 }
2532 
2533 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2534 early_platform_init_buffer("earlyprintk", &sci_driver,
2535 			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
2536 #endif
2537 module_init(sci_init);
2538 module_exit(sci_exit);
2539 
2540 MODULE_LICENSE("GPL");
2541 MODULE_ALIAS("platform:sh-sci");
2542 MODULE_AUTHOR("Paul Mundt");
2543 MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");
2544