xref: /openbmc/linux/drivers/tty/serial/mvebu-uart.c (revision 2cfe9bbe)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ***************************************************************************
4 * Marvell Armada-3700 Serial Driver
5 * Author: Wilson Ding <dingwei@marvell.com>
6 * Copyright (C) 2015 Marvell International Ltd.
7 * ***************************************************************************
8 */
9 
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/math64.h>
19 #include <linux/of.h>
20 #include <linux/of_address.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/serial.h>
26 #include <linux/serial_core.h>
27 #include <linux/slab.h>
28 #include <linux/tty.h>
29 #include <linux/tty_flip.h>
30 
31 /* Register Map */
32 #define UART_STD_RBR		0x00
33 #define UART_EXT_RBR		0x18
34 
35 #define UART_STD_TSH		0x04
36 #define UART_EXT_TSH		0x1C
37 
38 #define UART_STD_CTRL1		0x08
39 #define UART_EXT_CTRL1		0x04
40 #define  CTRL_SOFT_RST		BIT(31)
41 #define  CTRL_TXFIFO_RST	BIT(15)
42 #define  CTRL_RXFIFO_RST	BIT(14)
43 #define  CTRL_SND_BRK_SEQ	BIT(11)
44 #define  CTRL_BRK_DET_INT	BIT(3)
45 #define  CTRL_FRM_ERR_INT	BIT(2)
46 #define  CTRL_PAR_ERR_INT	BIT(1)
47 #define  CTRL_OVR_ERR_INT	BIT(0)
48 #define  CTRL_BRK_INT		(CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
49 				CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
50 
51 #define UART_STD_CTRL2		UART_STD_CTRL1
52 #define UART_EXT_CTRL2		0x20
53 #define  CTRL_STD_TX_RDY_INT	BIT(5)
54 #define  CTRL_EXT_TX_RDY_INT	BIT(6)
55 #define  CTRL_STD_RX_RDY_INT	BIT(4)
56 #define  CTRL_EXT_RX_RDY_INT	BIT(5)
57 
58 #define UART_STAT		0x0C
59 #define  STAT_TX_FIFO_EMP	BIT(13)
60 #define  STAT_TX_FIFO_FUL	BIT(11)
61 #define  STAT_TX_EMP		BIT(6)
62 #define  STAT_STD_TX_RDY	BIT(5)
63 #define  STAT_EXT_TX_RDY	BIT(15)
64 #define  STAT_STD_RX_RDY	BIT(4)
65 #define  STAT_EXT_RX_RDY	BIT(14)
66 #define  STAT_BRK_DET		BIT(3)
67 #define  STAT_FRM_ERR		BIT(2)
68 #define  STAT_PAR_ERR		BIT(1)
69 #define  STAT_OVR_ERR		BIT(0)
70 #define  STAT_BRK_ERR		(STAT_BRK_DET | STAT_FRM_ERR \
71 				 | STAT_PAR_ERR | STAT_OVR_ERR)
72 
73 /*
74  * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART
75  * Clock Control register controls UART1 and bit 20 controls UART2. But in
76  * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an
77  * error in Marvell's documentation. Hence following CLK_DIS macros are swapped.
78  */
79 
80 #define UART_BRDV		0x10
81 /* These bits are located in UART1 address space and control UART2 */
82 #define  UART2_CLK_DIS		BIT(21)
83 /* These bits are located in UART1 address space and control UART1 */
84 #define  UART1_CLK_DIS		BIT(20)
85 /* These bits are located in UART1 address space and control both UARTs */
86 #define  CLK_NO_XTAL		BIT(19)
87 #define  CLK_TBG_DIV1_SHIFT	15
88 #define  CLK_TBG_DIV1_MASK	0x7
89 #define  CLK_TBG_DIV1_MAX	6
90 #define  CLK_TBG_DIV2_SHIFT	12
91 #define  CLK_TBG_DIV2_MASK	0x7
92 #define  CLK_TBG_DIV2_MAX	6
93 #define  CLK_TBG_SEL_SHIFT	10
94 #define  CLK_TBG_SEL_MASK	0x3
95 /* These bits are located in both UARTs address space */
96 #define  BRDV_BAUD_MASK         0x3FF
97 #define  BRDV_BAUD_MAX		BRDV_BAUD_MASK
98 
99 #define UART_OSAMP		0x14
100 #define  OSAMP_DEFAULT_DIVISOR	16
101 #define  OSAMP_DIVISORS_MASK	0x3F3F3F3F
102 #define  OSAMP_MAX_DIVISOR	63
103 
104 #define MVEBU_NR_UARTS		2
105 
106 #define MVEBU_UART_TYPE		"mvebu-uart"
107 #define DRIVER_NAME		"mvebu_serial"
108 
109 enum {
110 	/* Either there is only one summed IRQ... */
111 	UART_IRQ_SUM = 0,
112 	/* ...or there are two separate IRQ for RX and TX */
113 	UART_RX_IRQ = 0,
114 	UART_TX_IRQ,
115 	UART_IRQ_COUNT
116 };
117 
118 /* Diverging register offsets */
119 struct uart_regs_layout {
120 	unsigned int rbr;
121 	unsigned int tsh;
122 	unsigned int ctrl;
123 	unsigned int intr;
124 };
125 
126 /* Diverging flags */
127 struct uart_flags {
128 	unsigned int ctrl_tx_rdy_int;
129 	unsigned int ctrl_rx_rdy_int;
130 	unsigned int stat_tx_rdy;
131 	unsigned int stat_rx_rdy;
132 };
133 
134 /* Driver data, a structure for each UART port */
135 struct mvebu_uart_driver_data {
136 	bool is_ext;
137 	struct uart_regs_layout regs;
138 	struct uart_flags flags;
139 };
140 
141 /* Saved registers during suspend */
142 struct mvebu_uart_pm_regs {
143 	unsigned int rbr;
144 	unsigned int tsh;
145 	unsigned int ctrl;
146 	unsigned int intr;
147 	unsigned int stat;
148 	unsigned int brdv;
149 	unsigned int osamp;
150 };
151 
152 /* MVEBU UART driver structure */
153 struct mvebu_uart {
154 	struct uart_port *port;
155 	struct clk *clk;
156 	int irq[UART_IRQ_COUNT];
157 	struct mvebu_uart_driver_data *data;
158 #if defined(CONFIG_PM)
159 	struct mvebu_uart_pm_regs pm_regs;
160 #endif /* CONFIG_PM */
161 };
162 
163 static struct mvebu_uart *to_mvuart(struct uart_port *port)
164 {
165 	return (struct mvebu_uart *)port->private_data;
166 }
167 
168 #define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
169 
170 #define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
171 #define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
172 #define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
173 #define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
174 
175 #define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
176 #define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
177 #define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
178 #define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
179 
180 static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
181 
182 static DEFINE_SPINLOCK(mvebu_uart_lock);
183 
184 /* Core UART Driver Operations */
185 static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
186 {
187 	unsigned long flags;
188 	unsigned int st;
189 
190 	spin_lock_irqsave(&port->lock, flags);
191 	st = readl(port->membase + UART_STAT);
192 	spin_unlock_irqrestore(&port->lock, flags);
193 
194 	return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
195 }
196 
197 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
198 {
199 	return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
200 }
201 
202 static void mvebu_uart_set_mctrl(struct uart_port *port,
203 				 unsigned int mctrl)
204 {
205 /*
206  * Even if we do not support configuring the modem control lines, this
207  * function must be proided to the serial core
208  */
209 }
210 
211 static void mvebu_uart_stop_tx(struct uart_port *port)
212 {
213 	unsigned int ctl = readl(port->membase + UART_INTR(port));
214 
215 	ctl &= ~CTRL_TX_RDY_INT(port);
216 	writel(ctl, port->membase + UART_INTR(port));
217 }
218 
219 static void mvebu_uart_start_tx(struct uart_port *port)
220 {
221 	unsigned int ctl;
222 	struct circ_buf *xmit = &port->state->xmit;
223 
224 	if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
225 		writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
226 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
227 		port->icount.tx++;
228 	}
229 
230 	ctl = readl(port->membase + UART_INTR(port));
231 	ctl |= CTRL_TX_RDY_INT(port);
232 	writel(ctl, port->membase + UART_INTR(port));
233 }
234 
235 static void mvebu_uart_stop_rx(struct uart_port *port)
236 {
237 	unsigned int ctl;
238 
239 	ctl = readl(port->membase + UART_CTRL(port));
240 	ctl &= ~CTRL_BRK_INT;
241 	writel(ctl, port->membase + UART_CTRL(port));
242 
243 	ctl = readl(port->membase + UART_INTR(port));
244 	ctl &= ~CTRL_RX_RDY_INT(port);
245 	writel(ctl, port->membase + UART_INTR(port));
246 }
247 
248 static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
249 {
250 	unsigned int ctl;
251 	unsigned long flags;
252 
253 	spin_lock_irqsave(&port->lock, flags);
254 	ctl = readl(port->membase + UART_CTRL(port));
255 	if (brk == -1)
256 		ctl |= CTRL_SND_BRK_SEQ;
257 	else
258 		ctl &= ~CTRL_SND_BRK_SEQ;
259 	writel(ctl, port->membase + UART_CTRL(port));
260 	spin_unlock_irqrestore(&port->lock, flags);
261 }
262 
263 static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
264 {
265 	struct tty_port *tport = &port->state->port;
266 	unsigned char ch = 0;
267 	char flag = 0;
268 
269 	do {
270 		if (status & STAT_RX_RDY(port)) {
271 			ch = readl(port->membase + UART_RBR(port));
272 			ch &= 0xff;
273 			flag = TTY_NORMAL;
274 			port->icount.rx++;
275 
276 			if (status & STAT_PAR_ERR)
277 				port->icount.parity++;
278 		}
279 
280 		if (status & STAT_BRK_DET) {
281 			port->icount.brk++;
282 			status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
283 			if (uart_handle_break(port))
284 				goto ignore_char;
285 		}
286 
287 		if (status & STAT_OVR_ERR)
288 			port->icount.overrun++;
289 
290 		if (status & STAT_FRM_ERR)
291 			port->icount.frame++;
292 
293 		if (uart_handle_sysrq_char(port, ch))
294 			goto ignore_char;
295 
296 		if (status & port->ignore_status_mask & STAT_PAR_ERR)
297 			status &= ~STAT_RX_RDY(port);
298 
299 		status &= port->read_status_mask;
300 
301 		if (status & STAT_PAR_ERR)
302 			flag = TTY_PARITY;
303 
304 		status &= ~port->ignore_status_mask;
305 
306 		if (status & STAT_RX_RDY(port))
307 			tty_insert_flip_char(tport, ch, flag);
308 
309 		if (status & STAT_BRK_DET)
310 			tty_insert_flip_char(tport, 0, TTY_BREAK);
311 
312 		if (status & STAT_FRM_ERR)
313 			tty_insert_flip_char(tport, 0, TTY_FRAME);
314 
315 		if (status & STAT_OVR_ERR)
316 			tty_insert_flip_char(tport, 0, TTY_OVERRUN);
317 
318 ignore_char:
319 		status = readl(port->membase + UART_STAT);
320 	} while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
321 
322 	tty_flip_buffer_push(tport);
323 }
324 
325 static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
326 {
327 	struct circ_buf *xmit = &port->state->xmit;
328 	unsigned int count;
329 	unsigned int st;
330 
331 	if (port->x_char) {
332 		writel(port->x_char, port->membase + UART_TSH(port));
333 		port->icount.tx++;
334 		port->x_char = 0;
335 		return;
336 	}
337 
338 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
339 		mvebu_uart_stop_tx(port);
340 		return;
341 	}
342 
343 	for (count = 0; count < port->fifosize; count++) {
344 		writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
345 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
346 		port->icount.tx++;
347 
348 		if (uart_circ_empty(xmit))
349 			break;
350 
351 		st = readl(port->membase + UART_STAT);
352 		if (st & STAT_TX_FIFO_FUL)
353 			break;
354 	}
355 
356 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
357 		uart_write_wakeup(port);
358 
359 	if (uart_circ_empty(xmit))
360 		mvebu_uart_stop_tx(port);
361 }
362 
363 static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
364 {
365 	struct uart_port *port = (struct uart_port *)dev_id;
366 	unsigned int st = readl(port->membase + UART_STAT);
367 
368 	if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
369 		  STAT_BRK_DET))
370 		mvebu_uart_rx_chars(port, st);
371 
372 	if (st & STAT_TX_RDY(port))
373 		mvebu_uart_tx_chars(port, st);
374 
375 	return IRQ_HANDLED;
376 }
377 
378 static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
379 {
380 	struct uart_port *port = (struct uart_port *)dev_id;
381 	unsigned int st = readl(port->membase + UART_STAT);
382 
383 	if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
384 			STAT_BRK_DET))
385 		mvebu_uart_rx_chars(port, st);
386 
387 	return IRQ_HANDLED;
388 }
389 
390 static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
391 {
392 	struct uart_port *port = (struct uart_port *)dev_id;
393 	unsigned int st = readl(port->membase + UART_STAT);
394 
395 	if (st & STAT_TX_RDY(port))
396 		mvebu_uart_tx_chars(port, st);
397 
398 	return IRQ_HANDLED;
399 }
400 
401 static int mvebu_uart_startup(struct uart_port *port)
402 {
403 	struct mvebu_uart *mvuart = to_mvuart(port);
404 	unsigned int ctl;
405 	int ret;
406 
407 	writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
408 	       port->membase + UART_CTRL(port));
409 	udelay(1);
410 
411 	/* Clear the error bits of state register before IRQ request */
412 	ret = readl(port->membase + UART_STAT);
413 	ret |= STAT_BRK_ERR;
414 	writel(ret, port->membase + UART_STAT);
415 
416 	writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
417 
418 	ctl = readl(port->membase + UART_INTR(port));
419 	ctl |= CTRL_RX_RDY_INT(port);
420 	writel(ctl, port->membase + UART_INTR(port));
421 
422 	if (!mvuart->irq[UART_TX_IRQ]) {
423 		/* Old bindings with just one interrupt (UART0 only) */
424 		ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
425 				       mvebu_uart_isr, port->irqflags,
426 				       dev_name(port->dev), port);
427 		if (ret) {
428 			dev_err(port->dev, "unable to request IRQ %d\n",
429 				mvuart->irq[UART_IRQ_SUM]);
430 			return ret;
431 		}
432 	} else {
433 		/* New bindings with an IRQ for RX and TX (both UART) */
434 		ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
435 				       mvebu_uart_rx_isr, port->irqflags,
436 				       dev_name(port->dev), port);
437 		if (ret) {
438 			dev_err(port->dev, "unable to request IRQ %d\n",
439 				mvuart->irq[UART_RX_IRQ]);
440 			return ret;
441 		}
442 
443 		ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
444 				       mvebu_uart_tx_isr, port->irqflags,
445 				       dev_name(port->dev),
446 				       port);
447 		if (ret) {
448 			dev_err(port->dev, "unable to request IRQ %d\n",
449 				mvuart->irq[UART_TX_IRQ]);
450 			devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
451 				      port);
452 			return ret;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 static void mvebu_uart_shutdown(struct uart_port *port)
460 {
461 	struct mvebu_uart *mvuart = to_mvuart(port);
462 
463 	writel(0, port->membase + UART_INTR(port));
464 
465 	if (!mvuart->irq[UART_TX_IRQ]) {
466 		devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
467 	} else {
468 		devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
469 		devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
470 	}
471 }
472 
473 static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
474 {
475 	unsigned int d_divisor, m_divisor;
476 	unsigned long flags;
477 	u32 brdv, osamp;
478 
479 	if (!port->uartclk)
480 		return 0;
481 
482 	/*
483 	 * The baudrate is derived from the UART clock thanks to divisors:
484 	 *   > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6
485 	 *   > D ("baud generator"): can divide the clock from 1 to 1023
486 	 *   > M ("fractional divisor"): allows a better accuracy (from 1 to 63)
487 	 *
488 	 * Exact formulas for calculating baudrate:
489 	 *
490 	 * with default x16 scheme:
491 	 *   baudrate = xtal / (d * 16)
492 	 *   baudrate = tbg / (d1 * d2 * d * 16)
493 	 *
494 	 * with fractional divisor:
495 	 *   baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4)))
496 	 *   baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4)))
497 	 *
498 	 * Oversampling value:
499 	 *   osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24);
500 	 *
501 	 * Where m1 controls number of clock cycles per bit for bits 1,2,3;
502 	 * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10.
503 	 *
504 	 * To simplify baudrate setup set all the M prescalers to the same
505 	 * value. For baudrates 9600 Bd and higher, it is enough to use the
506 	 * default (x16) divisor or fractional divisor with M = 63, so there
507 	 * is no need to use real fractional support (where the M prescalers
508 	 * are not equal).
509 	 *
510 	 * When all the M prescalers are zeroed then default (x16) divisor is
511 	 * used. Default x16 scheme is more stable than M (fractional divisor),
512 	 * so use M only when D divisor is not enough to derive baudrate.
513 	 *
514 	 * Member port->uartclk is either xtal clock rate or TBG clock rate
515 	 * divided by (d1 * d2). So d1 and d2 are already set by the UART clock
516 	 * driver (and UART driver itself cannot change them). Moreover they are
517 	 * shared between both UARTs.
518 	 */
519 
520 	m_divisor = OSAMP_DEFAULT_DIVISOR;
521 	d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
522 
523 	if (d_divisor > BRDV_BAUD_MAX) {
524 		/*
525 		 * Experiments show that small M divisors are unstable.
526 		 * Use maximal possible M = 63 and calculate D divisor.
527 		 */
528 		m_divisor = OSAMP_MAX_DIVISOR;
529 		d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
530 	}
531 
532 	if (d_divisor < 1)
533 		d_divisor = 1;
534 	else if (d_divisor > BRDV_BAUD_MAX)
535 		d_divisor = BRDV_BAUD_MAX;
536 
537 	spin_lock_irqsave(&mvebu_uart_lock, flags);
538 	brdv = readl(port->membase + UART_BRDV);
539 	brdv &= ~BRDV_BAUD_MASK;
540 	brdv |= d_divisor;
541 	writel(brdv, port->membase + UART_BRDV);
542 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
543 
544 	osamp = readl(port->membase + UART_OSAMP);
545 	osamp &= ~OSAMP_DIVISORS_MASK;
546 	if (m_divisor != OSAMP_DEFAULT_DIVISOR)
547 		osamp |= (m_divisor << 0) | (m_divisor << 8) |
548 			(m_divisor << 16) | (m_divisor << 24);
549 	writel(osamp, port->membase + UART_OSAMP);
550 
551 	return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
552 }
553 
554 static void mvebu_uart_set_termios(struct uart_port *port,
555 				   struct ktermios *termios,
556 				   struct ktermios *old)
557 {
558 	unsigned long flags;
559 	unsigned int baud, min_baud, max_baud;
560 
561 	spin_lock_irqsave(&port->lock, flags);
562 
563 	port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
564 		STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
565 
566 	if (termios->c_iflag & INPCK)
567 		port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
568 
569 	port->ignore_status_mask = 0;
570 	if (termios->c_iflag & IGNPAR)
571 		port->ignore_status_mask |=
572 			STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
573 
574 	if ((termios->c_cflag & CREAD) == 0)
575 		port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
576 
577 	/*
578 	 * Maximal divisor is 1023 and maximal fractional divisor is 63. And
579 	 * experiments show that baudrates above 1/80 of parent clock rate are
580 	 * not stable. So disallow baudrates above 1/80 of the parent clock
581 	 * rate. If port->uartclk is not available, then
582 	 * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud
583 	 * in this case do not matter.
584 	 */
585 	min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX *
586 				OSAMP_MAX_DIVISOR);
587 	max_baud = port->uartclk / 80;
588 
589 	baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
590 	baud = mvebu_uart_baud_rate_set(port, baud);
591 
592 	/* In case baudrate cannot be changed, report previous old value */
593 	if (baud == 0 && old)
594 		baud = tty_termios_baud_rate(old);
595 
596 	/* Only the following flag changes are supported */
597 	if (old) {
598 		termios->c_iflag &= INPCK | IGNPAR;
599 		termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
600 		termios->c_cflag &= CREAD | CBAUD;
601 		termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
602 		termios->c_cflag |= CS8;
603 	}
604 
605 	if (baud != 0) {
606 		tty_termios_encode_baud_rate(termios, baud, baud);
607 		uart_update_timeout(port, termios->c_cflag, baud);
608 	}
609 
610 	spin_unlock_irqrestore(&port->lock, flags);
611 }
612 
613 static const char *mvebu_uart_type(struct uart_port *port)
614 {
615 	return MVEBU_UART_TYPE;
616 }
617 
618 static void mvebu_uart_release_port(struct uart_port *port)
619 {
620 	/* Nothing to do here */
621 }
622 
623 static int mvebu_uart_request_port(struct uart_port *port)
624 {
625 	return 0;
626 }
627 
628 #ifdef CONFIG_CONSOLE_POLL
629 static int mvebu_uart_get_poll_char(struct uart_port *port)
630 {
631 	unsigned int st = readl(port->membase + UART_STAT);
632 
633 	if (!(st & STAT_RX_RDY(port)))
634 		return NO_POLL_CHAR;
635 
636 	return readl(port->membase + UART_RBR(port));
637 }
638 
639 static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
640 {
641 	unsigned int st;
642 
643 	for (;;) {
644 		st = readl(port->membase + UART_STAT);
645 
646 		if (!(st & STAT_TX_FIFO_FUL))
647 			break;
648 
649 		udelay(1);
650 	}
651 
652 	writel(c, port->membase + UART_TSH(port));
653 }
654 #endif
655 
656 static const struct uart_ops mvebu_uart_ops = {
657 	.tx_empty	= mvebu_uart_tx_empty,
658 	.set_mctrl	= mvebu_uart_set_mctrl,
659 	.get_mctrl	= mvebu_uart_get_mctrl,
660 	.stop_tx	= mvebu_uart_stop_tx,
661 	.start_tx	= mvebu_uart_start_tx,
662 	.stop_rx	= mvebu_uart_stop_rx,
663 	.break_ctl	= mvebu_uart_break_ctl,
664 	.startup	= mvebu_uart_startup,
665 	.shutdown	= mvebu_uart_shutdown,
666 	.set_termios	= mvebu_uart_set_termios,
667 	.type		= mvebu_uart_type,
668 	.release_port	= mvebu_uart_release_port,
669 	.request_port	= mvebu_uart_request_port,
670 #ifdef CONFIG_CONSOLE_POLL
671 	.poll_get_char	= mvebu_uart_get_poll_char,
672 	.poll_put_char	= mvebu_uart_put_poll_char,
673 #endif
674 };
675 
676 /* Console Driver Operations  */
677 
678 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
679 /* Early Console */
680 static void mvebu_uart_putc(struct uart_port *port, unsigned char c)
681 {
682 	unsigned int st;
683 
684 	for (;;) {
685 		st = readl(port->membase + UART_STAT);
686 		if (!(st & STAT_TX_FIFO_FUL))
687 			break;
688 	}
689 
690 	/* At early stage, DT is not parsed yet, only use UART0 */
691 	writel(c, port->membase + UART_STD_TSH);
692 
693 	for (;;) {
694 		st = readl(port->membase + UART_STAT);
695 		if (st & STAT_TX_FIFO_EMP)
696 			break;
697 	}
698 }
699 
700 static void mvebu_uart_putc_early_write(struct console *con,
701 					const char *s,
702 					unsigned int n)
703 {
704 	struct earlycon_device *dev = con->data;
705 
706 	uart_console_write(&dev->port, s, n, mvebu_uart_putc);
707 }
708 
709 static int __init
710 mvebu_uart_early_console_setup(struct earlycon_device *device,
711 			       const char *opt)
712 {
713 	if (!device->port.membase)
714 		return -ENODEV;
715 
716 	device->con->write = mvebu_uart_putc_early_write;
717 
718 	return 0;
719 }
720 
721 EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
722 OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
723 		    mvebu_uart_early_console_setup);
724 
725 static void wait_for_xmitr(struct uart_port *port)
726 {
727 	u32 val;
728 
729 	readl_poll_timeout_atomic(port->membase + UART_STAT, val,
730 				  (val & STAT_TX_RDY(port)), 1, 10000);
731 }
732 
733 static void wait_for_xmite(struct uart_port *port)
734 {
735 	u32 val;
736 
737 	readl_poll_timeout_atomic(port->membase + UART_STAT, val,
738 				  (val & STAT_TX_EMP), 1, 10000);
739 }
740 
741 static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch)
742 {
743 	wait_for_xmitr(port);
744 	writel(ch, port->membase + UART_TSH(port));
745 }
746 
747 static void mvebu_uart_console_write(struct console *co, const char *s,
748 				     unsigned int count)
749 {
750 	struct uart_port *port = &mvebu_uart_ports[co->index];
751 	unsigned long flags;
752 	unsigned int ier, intr, ctl;
753 	int locked = 1;
754 
755 	if (oops_in_progress)
756 		locked = spin_trylock_irqsave(&port->lock, flags);
757 	else
758 		spin_lock_irqsave(&port->lock, flags);
759 
760 	ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
761 	intr = readl(port->membase + UART_INTR(port)) &
762 		(CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
763 	writel(0, port->membase + UART_CTRL(port));
764 	writel(0, port->membase + UART_INTR(port));
765 
766 	uart_console_write(port, s, count, mvebu_uart_console_putchar);
767 
768 	wait_for_xmite(port);
769 
770 	if (ier)
771 		writel(ier, port->membase + UART_CTRL(port));
772 
773 	if (intr) {
774 		ctl = intr | readl(port->membase + UART_INTR(port));
775 		writel(ctl, port->membase + UART_INTR(port));
776 	}
777 
778 	if (locked)
779 		spin_unlock_irqrestore(&port->lock, flags);
780 }
781 
782 static int mvebu_uart_console_setup(struct console *co, char *options)
783 {
784 	struct uart_port *port;
785 	int baud = 9600;
786 	int bits = 8;
787 	int parity = 'n';
788 	int flow = 'n';
789 
790 	if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
791 		return -EINVAL;
792 
793 	port = &mvebu_uart_ports[co->index];
794 
795 	if (!port->mapbase || !port->membase) {
796 		pr_debug("console on ttyMV%i not present\n", co->index);
797 		return -ENODEV;
798 	}
799 
800 	if (options)
801 		uart_parse_options(options, &baud, &parity, &bits, &flow);
802 
803 	return uart_set_options(port, co, baud, parity, bits, flow);
804 }
805 
806 static struct uart_driver mvebu_uart_driver;
807 
808 static struct console mvebu_uart_console = {
809 	.name	= "ttyMV",
810 	.write	= mvebu_uart_console_write,
811 	.device	= uart_console_device,
812 	.setup	= mvebu_uart_console_setup,
813 	.flags	= CON_PRINTBUFFER,
814 	.index	= -1,
815 	.data	= &mvebu_uart_driver,
816 };
817 
818 static int __init mvebu_uart_console_init(void)
819 {
820 	register_console(&mvebu_uart_console);
821 	return 0;
822 }
823 
824 console_initcall(mvebu_uart_console_init);
825 
826 
827 #endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
828 
829 static struct uart_driver mvebu_uart_driver = {
830 	.owner			= THIS_MODULE,
831 	.driver_name		= DRIVER_NAME,
832 	.dev_name		= "ttyMV",
833 	.nr			= MVEBU_NR_UARTS,
834 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
835 	.cons			= &mvebu_uart_console,
836 #endif
837 };
838 
839 #if defined(CONFIG_PM)
840 static int mvebu_uart_suspend(struct device *dev)
841 {
842 	struct mvebu_uart *mvuart = dev_get_drvdata(dev);
843 	struct uart_port *port = mvuart->port;
844 	unsigned long flags;
845 
846 	uart_suspend_port(&mvebu_uart_driver, port);
847 
848 	mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port));
849 	mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port));
850 	mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port));
851 	mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port));
852 	mvuart->pm_regs.stat = readl(port->membase + UART_STAT);
853 	spin_lock_irqsave(&mvebu_uart_lock, flags);
854 	mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV);
855 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
856 	mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP);
857 
858 	device_set_wakeup_enable(dev, true);
859 
860 	return 0;
861 }
862 
863 static int mvebu_uart_resume(struct device *dev)
864 {
865 	struct mvebu_uart *mvuart = dev_get_drvdata(dev);
866 	struct uart_port *port = mvuart->port;
867 	unsigned long flags;
868 
869 	writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port));
870 	writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port));
871 	writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port));
872 	writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port));
873 	writel(mvuart->pm_regs.stat, port->membase + UART_STAT);
874 	spin_lock_irqsave(&mvebu_uart_lock, flags);
875 	writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV);
876 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
877 	writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP);
878 
879 	uart_resume_port(&mvebu_uart_driver, port);
880 
881 	return 0;
882 }
883 
884 static const struct dev_pm_ops mvebu_uart_pm_ops = {
885 	.suspend        = mvebu_uart_suspend,
886 	.resume         = mvebu_uart_resume,
887 };
888 #endif /* CONFIG_PM */
889 
890 static const struct of_device_id mvebu_uart_of_match[];
891 
892 /* Counter to keep track of each UART port id when not using CONFIG_OF */
893 static int uart_num_counter;
894 
895 static int mvebu_uart_probe(struct platform_device *pdev)
896 {
897 	struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
898 	const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
899 							   &pdev->dev);
900 	struct uart_port *port;
901 	struct mvebu_uart *mvuart;
902 	int id, irq;
903 
904 	if (!reg) {
905 		dev_err(&pdev->dev, "no registers defined\n");
906 		return -EINVAL;
907 	}
908 
909 	/* Assume that all UART ports have a DT alias or none has */
910 	id = of_alias_get_id(pdev->dev.of_node, "serial");
911 	if (!pdev->dev.of_node || id < 0)
912 		pdev->id = uart_num_counter++;
913 	else
914 		pdev->id = id;
915 
916 	if (pdev->id >= MVEBU_NR_UARTS) {
917 		dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
918 			MVEBU_NR_UARTS);
919 		return -EINVAL;
920 	}
921 
922 	port = &mvebu_uart_ports[pdev->id];
923 
924 	spin_lock_init(&port->lock);
925 
926 	port->dev        = &pdev->dev;
927 	port->type       = PORT_MVEBU;
928 	port->ops        = &mvebu_uart_ops;
929 	port->regshift   = 0;
930 
931 	port->fifosize   = 32;
932 	port->iotype     = UPIO_MEM32;
933 	port->flags      = UPF_FIXED_PORT;
934 	port->line       = pdev->id;
935 
936 	/*
937 	 * IRQ number is not stored in this structure because we may have two of
938 	 * them per port (RX and TX). Instead, use the driver UART structure
939 	 * array so called ->irq[].
940 	 */
941 	port->irq        = 0;
942 	port->irqflags   = 0;
943 	port->mapbase    = reg->start;
944 
945 	port->membase = devm_ioremap_resource(&pdev->dev, reg);
946 	if (IS_ERR(port->membase))
947 		return PTR_ERR(port->membase);
948 
949 	mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
950 			      GFP_KERNEL);
951 	if (!mvuart)
952 		return -ENOMEM;
953 
954 	/* Get controller data depending on the compatible string */
955 	mvuart->data = (struct mvebu_uart_driver_data *)match->data;
956 	mvuart->port = port;
957 
958 	port->private_data = mvuart;
959 	platform_set_drvdata(pdev, mvuart);
960 
961 	/* Get fixed clock frequency */
962 	mvuart->clk = devm_clk_get(&pdev->dev, NULL);
963 	if (IS_ERR(mvuart->clk)) {
964 		if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
965 			return PTR_ERR(mvuart->clk);
966 
967 		if (IS_EXTENDED(port)) {
968 			dev_err(&pdev->dev, "unable to get UART clock\n");
969 			return PTR_ERR(mvuart->clk);
970 		}
971 	} else {
972 		if (!clk_prepare_enable(mvuart->clk))
973 			port->uartclk = clk_get_rate(mvuart->clk);
974 	}
975 
976 	/* Manage interrupts */
977 	if (platform_irq_count(pdev) == 1) {
978 		/* Old bindings: no name on the single unamed UART0 IRQ */
979 		irq = platform_get_irq(pdev, 0);
980 		if (irq < 0)
981 			return irq;
982 
983 		mvuart->irq[UART_IRQ_SUM] = irq;
984 	} else {
985 		/*
986 		 * New bindings: named interrupts (RX, TX) for both UARTS,
987 		 * only make use of uart-rx and uart-tx interrupts, do not use
988 		 * uart-sum of UART0 port.
989 		 */
990 		irq = platform_get_irq_byname(pdev, "uart-rx");
991 		if (irq < 0)
992 			return irq;
993 
994 		mvuart->irq[UART_RX_IRQ] = irq;
995 
996 		irq = platform_get_irq_byname(pdev, "uart-tx");
997 		if (irq < 0)
998 			return irq;
999 
1000 		mvuart->irq[UART_TX_IRQ] = irq;
1001 	}
1002 
1003 	/* UART Soft Reset*/
1004 	writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
1005 	udelay(1);
1006 	writel(0, port->membase + UART_CTRL(port));
1007 
1008 	return uart_add_one_port(&mvebu_uart_driver, port);
1009 }
1010 
1011 static struct mvebu_uart_driver_data uart_std_driver_data = {
1012 	.is_ext = false,
1013 	.regs.rbr = UART_STD_RBR,
1014 	.regs.tsh = UART_STD_TSH,
1015 	.regs.ctrl = UART_STD_CTRL1,
1016 	.regs.intr = UART_STD_CTRL2,
1017 	.flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
1018 	.flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
1019 	.flags.stat_tx_rdy = STAT_STD_TX_RDY,
1020 	.flags.stat_rx_rdy = STAT_STD_RX_RDY,
1021 };
1022 
1023 static struct mvebu_uart_driver_data uart_ext_driver_data = {
1024 	.is_ext = true,
1025 	.regs.rbr = UART_EXT_RBR,
1026 	.regs.tsh = UART_EXT_TSH,
1027 	.regs.ctrl = UART_EXT_CTRL1,
1028 	.regs.intr = UART_EXT_CTRL2,
1029 	.flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
1030 	.flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
1031 	.flags.stat_tx_rdy = STAT_EXT_TX_RDY,
1032 	.flags.stat_rx_rdy = STAT_EXT_RX_RDY,
1033 };
1034 
1035 /* Match table for of_platform binding */
1036 static const struct of_device_id mvebu_uart_of_match[] = {
1037 	{
1038 		.compatible = "marvell,armada-3700-uart",
1039 		.data = (void *)&uart_std_driver_data,
1040 	},
1041 	{
1042 		.compatible = "marvell,armada-3700-uart-ext",
1043 		.data = (void *)&uart_ext_driver_data,
1044 	},
1045 	{}
1046 };
1047 
1048 static struct platform_driver mvebu_uart_platform_driver = {
1049 	.probe	= mvebu_uart_probe,
1050 	.driver	= {
1051 		.name  = "mvebu-uart",
1052 		.of_match_table = of_match_ptr(mvebu_uart_of_match),
1053 		.suppress_bind_attrs = true,
1054 #if defined(CONFIG_PM)
1055 		.pm	= &mvebu_uart_pm_ops,
1056 #endif /* CONFIG_PM */
1057 	},
1058 };
1059 
1060 /* This code is based on clk-fixed-factor.c driver and modified. */
1061 
1062 struct mvebu_uart_clock {
1063 	struct clk_hw clk_hw;
1064 	int clock_idx;
1065 	u32 pm_context_reg1;
1066 	u32 pm_context_reg2;
1067 };
1068 
1069 struct mvebu_uart_clock_base {
1070 	struct mvebu_uart_clock clocks[2];
1071 	unsigned int parent_rates[5];
1072 	int parent_idx;
1073 	unsigned int div;
1074 	void __iomem *reg1;
1075 	void __iomem *reg2;
1076 	bool configured;
1077 };
1078 
1079 #define PARENT_CLOCK_XTAL 4
1080 
1081 #define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw)
1082 #define to_uart_clock_base(uart_clock) container_of(uart_clock, \
1083 	struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx])
1084 
1085 static int mvebu_uart_clock_prepare(struct clk_hw *hw)
1086 {
1087 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1088 	struct mvebu_uart_clock_base *uart_clock_base =
1089 						to_uart_clock_base(uart_clock);
1090 	unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2;
1091 	unsigned int parent_clock_idx, parent_clock_rate;
1092 	unsigned long flags;
1093 	unsigned int d1, d2;
1094 	u64 divisor;
1095 	u32 val;
1096 
1097 	/*
1098 	 * This function just reconfigures UART Clock Control register (located
1099 	 * in UART1 address space which controls both UART1 and UART2) to
1100 	 * selected UART base clock and recalculates current UART1/UART2
1101 	 * divisors in their address spaces, so that final baudrate will not be
1102 	 * changed by switching UART parent clock. This is required for
1103 	 * otherwise kernel's boot log stops working - we need to ensure that
1104 	 * UART baudrate does not change during this setup. It is a one time
1105 	 * operation, it will execute only once and set `configured` to true,
1106 	 * and be skipped on subsequent calls. Because this UART Clock Control
1107 	 * register (UART_BRDV) is shared between UART1 baudrate function,
1108 	 * UART1 clock selector and UART2 clock selector, every access to
1109 	 * UART_BRDV (reg1) needs to be protected by a lock.
1110 	 */
1111 
1112 	spin_lock_irqsave(&mvebu_uart_lock, flags);
1113 
1114 	if (uart_clock_base->configured) {
1115 		spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1116 		return 0;
1117 	}
1118 
1119 	parent_clock_idx = uart_clock_base->parent_idx;
1120 	parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx];
1121 
1122 	val = readl(uart_clock_base->reg1);
1123 
1124 	if (uart_clock_base->div > CLK_TBG_DIV1_MAX) {
1125 		d1 = CLK_TBG_DIV1_MAX;
1126 		d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX;
1127 	} else {
1128 		d1 = uart_clock_base->div;
1129 		d2 = 1;
1130 	}
1131 
1132 	if (val & CLK_NO_XTAL) {
1133 		prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK;
1134 		prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) *
1135 			    ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK);
1136 	} else {
1137 		prev_clock_idx = PARENT_CLOCK_XTAL;
1138 		prev_d1d2 = 1;
1139 	}
1140 
1141 	/* Note that uart_clock_base->parent_rates[i] may not be available */
1142 	prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx];
1143 
1144 	/* Recalculate UART1 divisor so UART1 baudrate does not change */
1145 	if (prev_clock_rate) {
1146 		divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1147 						parent_clock_rate * prev_d1d2,
1148 						prev_clock_rate * d1 * d2);
1149 		if (divisor < 1)
1150 			divisor = 1;
1151 		else if (divisor > BRDV_BAUD_MAX)
1152 			divisor = BRDV_BAUD_MAX;
1153 		val = (val & ~BRDV_BAUD_MASK) | divisor;
1154 	}
1155 
1156 	if (parent_clock_idx != PARENT_CLOCK_XTAL) {
1157 		/* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */
1158 		val |= CLK_NO_XTAL;
1159 		val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT);
1160 		val |= d1 << CLK_TBG_DIV1_SHIFT;
1161 		val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT);
1162 		val |= d2 << CLK_TBG_DIV2_SHIFT;
1163 		val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT);
1164 		val |= parent_clock_idx << CLK_TBG_SEL_SHIFT;
1165 	} else {
1166 		/* Use XTAL, TBG bits are then ignored */
1167 		val &= ~CLK_NO_XTAL;
1168 	}
1169 
1170 	writel(val, uart_clock_base->reg1);
1171 
1172 	/* Recalculate UART2 divisor so UART2 baudrate does not change */
1173 	if (prev_clock_rate) {
1174 		val = readl(uart_clock_base->reg2);
1175 		divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1176 						parent_clock_rate * prev_d1d2,
1177 						prev_clock_rate * d1 * d2);
1178 		if (divisor < 1)
1179 			divisor = 1;
1180 		else if (divisor > BRDV_BAUD_MAX)
1181 			divisor = BRDV_BAUD_MAX;
1182 		val = (val & ~BRDV_BAUD_MASK) | divisor;
1183 		writel(val, uart_clock_base->reg2);
1184 	}
1185 
1186 	uart_clock_base->configured = true;
1187 
1188 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1189 
1190 	return 0;
1191 }
1192 
1193 static int mvebu_uart_clock_enable(struct clk_hw *hw)
1194 {
1195 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1196 	struct mvebu_uart_clock_base *uart_clock_base =
1197 						to_uart_clock_base(uart_clock);
1198 	unsigned long flags;
1199 	u32 val;
1200 
1201 	spin_lock_irqsave(&mvebu_uart_lock, flags);
1202 
1203 	val = readl(uart_clock_base->reg1);
1204 
1205 	if (uart_clock->clock_idx == 0)
1206 		val &= ~UART1_CLK_DIS;
1207 	else
1208 		val &= ~UART2_CLK_DIS;
1209 
1210 	writel(val, uart_clock_base->reg1);
1211 
1212 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1213 
1214 	return 0;
1215 }
1216 
1217 static void mvebu_uart_clock_disable(struct clk_hw *hw)
1218 {
1219 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1220 	struct mvebu_uart_clock_base *uart_clock_base =
1221 						to_uart_clock_base(uart_clock);
1222 	unsigned long flags;
1223 	u32 val;
1224 
1225 	spin_lock_irqsave(&mvebu_uart_lock, flags);
1226 
1227 	val = readl(uart_clock_base->reg1);
1228 
1229 	if (uart_clock->clock_idx == 0)
1230 		val |= UART1_CLK_DIS;
1231 	else
1232 		val |= UART2_CLK_DIS;
1233 
1234 	writel(val, uart_clock_base->reg1);
1235 
1236 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1237 }
1238 
1239 static int mvebu_uart_clock_is_enabled(struct clk_hw *hw)
1240 {
1241 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1242 	struct mvebu_uart_clock_base *uart_clock_base =
1243 						to_uart_clock_base(uart_clock);
1244 	u32 val;
1245 
1246 	val = readl(uart_clock_base->reg1);
1247 
1248 	if (uart_clock->clock_idx == 0)
1249 		return !(val & UART1_CLK_DIS);
1250 	else
1251 		return !(val & UART2_CLK_DIS);
1252 }
1253 
1254 static int mvebu_uart_clock_save_context(struct clk_hw *hw)
1255 {
1256 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1257 	struct mvebu_uart_clock_base *uart_clock_base =
1258 						to_uart_clock_base(uart_clock);
1259 	unsigned long flags;
1260 
1261 	spin_lock_irqsave(&mvebu_uart_lock, flags);
1262 	uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1);
1263 	uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2);
1264 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1265 
1266 	return 0;
1267 }
1268 
1269 static void mvebu_uart_clock_restore_context(struct clk_hw *hw)
1270 {
1271 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1272 	struct mvebu_uart_clock_base *uart_clock_base =
1273 						to_uart_clock_base(uart_clock);
1274 	unsigned long flags;
1275 
1276 	spin_lock_irqsave(&mvebu_uart_lock, flags);
1277 	writel(uart_clock->pm_context_reg1, uart_clock_base->reg1);
1278 	writel(uart_clock->pm_context_reg2, uart_clock_base->reg2);
1279 	spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1280 }
1281 
1282 static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw,
1283 						  unsigned long parent_rate)
1284 {
1285 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1286 	struct mvebu_uart_clock_base *uart_clock_base =
1287 						to_uart_clock_base(uart_clock);
1288 
1289 	return parent_rate / uart_clock_base->div;
1290 }
1291 
1292 static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate,
1293 					unsigned long *parent_rate)
1294 {
1295 	struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1296 	struct mvebu_uart_clock_base *uart_clock_base =
1297 						to_uart_clock_base(uart_clock);
1298 
1299 	return *parent_rate / uart_clock_base->div;
1300 }
1301 
1302 static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate,
1303 				     unsigned long parent_rate)
1304 {
1305 	/*
1306 	 * We must report success but we can do so unconditionally because
1307 	 * mvebu_uart_clock_round_rate returns values that ensure this call is a
1308 	 * nop.
1309 	 */
1310 
1311 	return 0;
1312 }
1313 
1314 static const struct clk_ops mvebu_uart_clock_ops = {
1315 	.prepare = mvebu_uart_clock_prepare,
1316 	.enable = mvebu_uart_clock_enable,
1317 	.disable = mvebu_uart_clock_disable,
1318 	.is_enabled = mvebu_uart_clock_is_enabled,
1319 	.save_context = mvebu_uart_clock_save_context,
1320 	.restore_context = mvebu_uart_clock_restore_context,
1321 	.round_rate = mvebu_uart_clock_round_rate,
1322 	.set_rate = mvebu_uart_clock_set_rate,
1323 	.recalc_rate = mvebu_uart_clock_recalc_rate,
1324 };
1325 
1326 static int mvebu_uart_clock_register(struct device *dev,
1327 				     struct mvebu_uart_clock *uart_clock,
1328 				     const char *name,
1329 				     const char *parent_name)
1330 {
1331 	struct clk_init_data init = { };
1332 
1333 	uart_clock->clk_hw.init = &init;
1334 
1335 	init.name = name;
1336 	init.ops = &mvebu_uart_clock_ops;
1337 	init.flags = 0;
1338 	init.num_parents = 1;
1339 	init.parent_names = &parent_name;
1340 
1341 	return devm_clk_hw_register(dev, &uart_clock->clk_hw);
1342 }
1343 
1344 static int mvebu_uart_clock_probe(struct platform_device *pdev)
1345 {
1346 	static const char *const uart_clk_names[] = { "uart_1", "uart_2" };
1347 	static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P",
1348 							"TBG-A-S", "TBG-B-S",
1349 							"xtal" };
1350 	struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)];
1351 	struct mvebu_uart_clock_base *uart_clock_base;
1352 	struct clk_hw_onecell_data *hw_clk_data;
1353 	struct device *dev = &pdev->dev;
1354 	int i, parent_clk_idx, ret;
1355 	unsigned long div, rate;
1356 	struct resource *res;
1357 	unsigned int d1, d2;
1358 
1359 	BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) !=
1360 		     ARRAY_SIZE(uart_clock_base->clocks));
1361 	BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) !=
1362 		     ARRAY_SIZE(uart_clock_base->parent_rates));
1363 
1364 	uart_clock_base = devm_kzalloc(dev,
1365 				       sizeof(*uart_clock_base),
1366 				       GFP_KERNEL);
1367 	if (!uart_clock_base)
1368 		return -ENOMEM;
1369 
1370 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1371 	if (!res) {
1372 		dev_err(dev, "Couldn't get first register\n");
1373 		return -ENOENT;
1374 	}
1375 
1376 	/*
1377 	 * UART Clock Control register (reg1 / UART_BRDV) is in the address
1378 	 * space of UART1 (standard UART variant), controls parent clock and
1379 	 * dividers for both UART1 and UART2 and is supplied via DT as the first
1380 	 * resource. Therefore use ioremap() rather than ioremap_resource() to
1381 	 * avoid conflicts with UART1 driver. Access to UART_BRDV is protected
1382 	 * by a lock shared between clock and UART driver.
1383 	 */
1384 	uart_clock_base->reg1 = devm_ioremap(dev, res->start,
1385 					     resource_size(res));
1386 	if (!uart_clock_base->reg1)
1387 		return -ENOMEM;
1388 
1389 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1390 	if (!res) {
1391 		dev_err(dev, "Couldn't get second register\n");
1392 		return -ENOENT;
1393 	}
1394 
1395 	/*
1396 	 * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address
1397 	 * space of UART2 (extended UART variant), controls only one UART2
1398 	 * specific divider and is supplied via DT as second resource.
1399 	 * Therefore use ioremap() rather than ioremap_resource() to avoid
1400 	 * conflicts with UART2 driver. Access to UART_BRDV is protected by a
1401 	 * by lock shared between clock and UART driver.
1402 	 */
1403 	uart_clock_base->reg2 = devm_ioremap(dev, res->start,
1404 					     resource_size(res));
1405 	if (!uart_clock_base->reg2)
1406 		return -ENOMEM;
1407 
1408 	hw_clk_data = devm_kzalloc(dev,
1409 				   struct_size(hw_clk_data, hws,
1410 					       ARRAY_SIZE(uart_clk_names)),
1411 				   GFP_KERNEL);
1412 	if (!hw_clk_data)
1413 		return -ENOMEM;
1414 
1415 	hw_clk_data->num = ARRAY_SIZE(uart_clk_names);
1416 	for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1417 		hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw;
1418 		uart_clock_base->clocks[i].clock_idx = i;
1419 	}
1420 
1421 	parent_clk_idx = -1;
1422 
1423 	for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1424 		parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]);
1425 		if (IS_ERR(parent_clks[i])) {
1426 			if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER)
1427 				return -EPROBE_DEFER;
1428 			dev_warn(dev, "Couldn't get the parent clock %s: %ld\n",
1429 				 parent_clk_names[i], PTR_ERR(parent_clks[i]));
1430 			continue;
1431 		}
1432 
1433 		ret = clk_prepare_enable(parent_clks[i]);
1434 		if (ret) {
1435 			dev_warn(dev, "Couldn't enable parent clock %s: %d\n",
1436 				 parent_clk_names[i], ret);
1437 			continue;
1438 		}
1439 		rate = clk_get_rate(parent_clks[i]);
1440 		uart_clock_base->parent_rates[i] = rate;
1441 
1442 		if (i != PARENT_CLOCK_XTAL) {
1443 			/*
1444 			 * Calculate the smallest TBG d1 and d2 divisors that
1445 			 * still can provide 9600 baudrate.
1446 			 */
1447 			d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1448 					  BRDV_BAUD_MAX);
1449 			if (d1 < 1)
1450 				d1 = 1;
1451 			else if (d1 > CLK_TBG_DIV1_MAX)
1452 				d1 = CLK_TBG_DIV1_MAX;
1453 
1454 			d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1455 					  BRDV_BAUD_MAX * d1);
1456 			if (d2 < 1)
1457 				d2 = 1;
1458 			else if (d2 > CLK_TBG_DIV2_MAX)
1459 				d2 = CLK_TBG_DIV2_MAX;
1460 		} else {
1461 			/*
1462 			 * When UART clock uses XTAL clock as a source then it
1463 			 * is not possible to use d1 and d2 divisors.
1464 			 */
1465 			d1 = d2 = 1;
1466 		}
1467 
1468 		/* Skip clock source which cannot provide 9600 baudrate */
1469 		if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2)
1470 			continue;
1471 
1472 		/*
1473 		 * Choose TBG clock source with the smallest divisors. Use XTAL
1474 		 * clock source only in case TBG is not available as XTAL cannot
1475 		 * be used for baudrates higher than 230400.
1476 		 */
1477 		if (parent_clk_idx == -1 ||
1478 		    (i != PARENT_CLOCK_XTAL && div > d1 * d2)) {
1479 			parent_clk_idx = i;
1480 			div = d1 * d2;
1481 		}
1482 	}
1483 
1484 	for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1485 		if (i == parent_clk_idx || IS_ERR(parent_clks[i]))
1486 			continue;
1487 		clk_disable_unprepare(parent_clks[i]);
1488 		devm_clk_put(dev, parent_clks[i]);
1489 	}
1490 
1491 	if (parent_clk_idx == -1) {
1492 		dev_err(dev, "No usable parent clock\n");
1493 		return -ENOENT;
1494 	}
1495 
1496 	uart_clock_base->parent_idx = parent_clk_idx;
1497 	uart_clock_base->div = div;
1498 
1499 	dev_notice(dev, "Using parent clock %s as base UART clock\n",
1500 		   __clk_get_name(parent_clks[parent_clk_idx]));
1501 
1502 	for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1503 		ret = mvebu_uart_clock_register(dev,
1504 				&uart_clock_base->clocks[i],
1505 				uart_clk_names[i],
1506 				__clk_get_name(parent_clks[parent_clk_idx]));
1507 		if (ret) {
1508 			dev_err(dev, "Can't register UART clock %d: %d\n",
1509 				i, ret);
1510 			return ret;
1511 		}
1512 	}
1513 
1514 	return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1515 					   hw_clk_data);
1516 }
1517 
1518 static const struct of_device_id mvebu_uart_clock_of_match[] = {
1519 	{ .compatible = "marvell,armada-3700-uart-clock", },
1520 	{ }
1521 };
1522 
1523 static struct platform_driver mvebu_uart_clock_platform_driver = {
1524 	.probe = mvebu_uart_clock_probe,
1525 	.driver		= {
1526 		.name	= "mvebu-uart-clock",
1527 		.of_match_table = mvebu_uart_clock_of_match,
1528 	},
1529 };
1530 
1531 static int __init mvebu_uart_init(void)
1532 {
1533 	int ret;
1534 
1535 	ret = uart_register_driver(&mvebu_uart_driver);
1536 	if (ret)
1537 		return ret;
1538 
1539 	ret = platform_driver_register(&mvebu_uart_clock_platform_driver);
1540 	if (ret) {
1541 		uart_unregister_driver(&mvebu_uart_driver);
1542 		return ret;
1543 	}
1544 
1545 	ret = platform_driver_register(&mvebu_uart_platform_driver);
1546 	if (ret) {
1547 		platform_driver_unregister(&mvebu_uart_clock_platform_driver);
1548 		uart_unregister_driver(&mvebu_uart_driver);
1549 		return ret;
1550 	}
1551 
1552 	return 0;
1553 }
1554 arch_initcall(mvebu_uart_init);
1555