1 /* 2 * Driver for Motorola IMX serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Author: Sascha Hauer <sascha@saschahauer.de> 7 * Copyright (C) 2004 Pengutronix 8 * 9 * Copyright (C) 2009 emlix GmbH 10 * Author: Fabian Godehardt (added IrDA support for iMX) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 * [29-Mar-2005] Mike Lee 27 * Added hardware handshake 28 */ 29 30 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 31 #define SUPPORT_SYSRQ 32 #endif 33 34 #include <linux/module.h> 35 #include <linux/ioport.h> 36 #include <linux/init.h> 37 #include <linux/console.h> 38 #include <linux/sysrq.h> 39 #include <linux/platform_device.h> 40 #include <linux/tty.h> 41 #include <linux/tty_flip.h> 42 #include <linux/serial_core.h> 43 #include <linux/serial.h> 44 #include <linux/clk.h> 45 #include <linux/delay.h> 46 #include <linux/rational.h> 47 #include <linux/slab.h> 48 #include <linux/of.h> 49 #include <linux/of_device.h> 50 #include <linux/io.h> 51 #include <linux/dma-mapping.h> 52 53 #include <asm/irq.h> 54 #include <linux/platform_data/serial-imx.h> 55 #include <linux/platform_data/dma-imx.h> 56 57 /* Register definitions */ 58 #define URXD0 0x0 /* Receiver Register */ 59 #define URTX0 0x40 /* Transmitter Register */ 60 #define UCR1 0x80 /* Control Register 1 */ 61 #define UCR2 0x84 /* Control Register 2 */ 62 #define UCR3 0x88 /* Control Register 3 */ 63 #define UCR4 0x8c /* Control Register 4 */ 64 #define UFCR 0x90 /* FIFO Control Register */ 65 #define USR1 0x94 /* Status Register 1 */ 66 #define USR2 0x98 /* Status Register 2 */ 67 #define UESC 0x9c /* Escape Character Register */ 68 #define UTIM 0xa0 /* Escape Timer Register */ 69 #define UBIR 0xa4 /* BRM Incremental Register */ 70 #define UBMR 0xa8 /* BRM Modulator Register */ 71 #define UBRC 0xac /* Baud Rate Count Register */ 72 #define IMX21_ONEMS 0xb0 /* One Millisecond register */ 73 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */ 74 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ 75 76 /* UART Control Register Bit Fields.*/ 77 #define URXD_CHARRDY (1<<15) 78 #define URXD_ERR (1<<14) 79 #define URXD_OVRRUN (1<<13) 80 #define URXD_FRMERR (1<<12) 81 #define URXD_BRK (1<<11) 82 #define URXD_PRERR (1<<10) 83 #define URXD_RX_DATA (0xFF<<0) 84 #define UCR1_ADEN (1<<15) /* Auto detect interrupt */ 85 #define UCR1_ADBR (1<<14) /* Auto detect baud rate */ 86 #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ 87 #define UCR1_IDEN (1<<12) /* Idle condition interrupt */ 88 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */ 89 #define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ 90 #define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */ 91 #define UCR1_IREN (1<<7) /* Infrared interface enable */ 92 #define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ 93 #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 94 #define UCR1_SNDBRK (1<<4) /* Send break */ 95 #define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 96 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ 97 #define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */ 98 #define UCR1_DOZE (1<<1) /* Doze */ 99 #define UCR1_UARTEN (1<<0) /* UART enabled */ 100 #define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ 101 #define UCR2_IRTS (1<<14) /* Ignore RTS pin */ 102 #define UCR2_CTSC (1<<13) /* CTS pin control */ 103 #define UCR2_CTS (1<<12) /* Clear to send */ 104 #define UCR2_ESCEN (1<<11) /* Escape enable */ 105 #define UCR2_PREN (1<<8) /* Parity enable */ 106 #define UCR2_PROE (1<<7) /* Parity odd/even */ 107 #define UCR2_STPB (1<<6) /* Stop */ 108 #define UCR2_WS (1<<5) /* Word size */ 109 #define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ 110 #define UCR2_ATEN (1<<3) /* Aging Timer Enable */ 111 #define UCR2_TXEN (1<<2) /* Transmitter enabled */ 112 #define UCR2_RXEN (1<<1) /* Receiver enabled */ 113 #define UCR2_SRST (1<<0) /* SW reset */ 114 #define UCR3_DTREN (1<<13) /* DTR interrupt enable */ 115 #define UCR3_PARERREN (1<<12) /* Parity enable */ 116 #define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ 117 #define UCR3_DSR (1<<10) /* Data set ready */ 118 #define UCR3_DCD (1<<9) /* Data carrier detect */ 119 #define UCR3_RI (1<<8) /* Ring indicator */ 120 #define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */ 121 #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ 122 #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ 123 #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ 124 #define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */ 125 #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ 126 #define UCR3_BPEN (1<<0) /* Preset registers enable */ 127 #define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ 128 #define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ 129 #define UCR4_INVR (1<<9) /* Inverted infrared reception */ 130 #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ 131 #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ 132 #define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ 133 #define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */ 134 #define UCR4_IRSC (1<<5) /* IR special case */ 135 #define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ 136 #define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ 137 #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ 138 #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ 139 #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ 140 #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ 141 #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ 142 #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) 143 #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ 144 #define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ 145 #define USR1_RTSS (1<<14) /* RTS pin status */ 146 #define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ 147 #define USR1_RTSD (1<<12) /* RTS delta */ 148 #define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ 149 #define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ 150 #define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ 151 #define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */ 152 #define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ 153 #define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ 154 #define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ 155 #define USR2_ADET (1<<15) /* Auto baud rate detect complete */ 156 #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ 157 #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ 158 #define USR2_IDLE (1<<12) /* Idle condition */ 159 #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ 160 #define USR2_WAKE (1<<7) /* Wake */ 161 #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ 162 #define USR2_TXDC (1<<3) /* Transmitter complete */ 163 #define USR2_BRCD (1<<2) /* Break condition */ 164 #define USR2_ORE (1<<1) /* Overrun error */ 165 #define USR2_RDR (1<<0) /* Recv data ready */ 166 #define UTS_FRCPERR (1<<13) /* Force parity error */ 167 #define UTS_LOOP (1<<12) /* Loop tx and rx */ 168 #define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ 169 #define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ 170 #define UTS_TXFULL (1<<4) /* TxFIFO full */ 171 #define UTS_RXFULL (1<<3) /* RxFIFO full */ 172 #define UTS_SOFTRST (1<<0) /* Software reset */ 173 174 /* We've been assigned a range on the "Low-density serial ports" major */ 175 #define SERIAL_IMX_MAJOR 207 176 #define MINOR_START 16 177 #define DEV_NAME "ttymxc" 178 179 /* 180 * This determines how often we check the modem status signals 181 * for any change. They generally aren't connected to an IRQ 182 * so we have to poll them. We also check immediately before 183 * filling the TX fifo incase CTS has been dropped. 184 */ 185 #define MCTRL_TIMEOUT (250*HZ/1000) 186 187 #define DRIVER_NAME "IMX-uart" 188 189 #define UART_NR 8 190 191 /* i.mx21 type uart runs on all i.mx except i.mx1 */ 192 enum imx_uart_type { 193 IMX1_UART, 194 IMX21_UART, 195 IMX6Q_UART, 196 }; 197 198 /* device type dependent stuff */ 199 struct imx_uart_data { 200 unsigned uts_reg; 201 enum imx_uart_type devtype; 202 }; 203 204 struct imx_port { 205 struct uart_port port; 206 struct timer_list timer; 207 unsigned int old_status; 208 int txirq, rxirq, rtsirq; 209 unsigned int have_rtscts:1; 210 unsigned int dte_mode:1; 211 unsigned int use_irda:1; 212 unsigned int irda_inv_rx:1; 213 unsigned int irda_inv_tx:1; 214 unsigned short trcv_delay; /* transceiver delay */ 215 struct clk *clk_ipg; 216 struct clk *clk_per; 217 const struct imx_uart_data *devdata; 218 219 /* DMA fields */ 220 unsigned int dma_is_inited:1; 221 unsigned int dma_is_enabled:1; 222 unsigned int dma_is_rxing:1; 223 unsigned int dma_is_txing:1; 224 struct dma_chan *dma_chan_rx, *dma_chan_tx; 225 struct scatterlist rx_sgl, tx_sgl[2]; 226 void *rx_buf; 227 unsigned int tx_bytes; 228 unsigned int dma_tx_nents; 229 wait_queue_head_t dma_wait; 230 }; 231 232 struct imx_port_ucrs { 233 unsigned int ucr1; 234 unsigned int ucr2; 235 unsigned int ucr3; 236 }; 237 238 #ifdef CONFIG_IRDA 239 #define USE_IRDA(sport) ((sport)->use_irda) 240 #else 241 #define USE_IRDA(sport) (0) 242 #endif 243 244 static struct imx_uart_data imx_uart_devdata[] = { 245 [IMX1_UART] = { 246 .uts_reg = IMX1_UTS, 247 .devtype = IMX1_UART, 248 }, 249 [IMX21_UART] = { 250 .uts_reg = IMX21_UTS, 251 .devtype = IMX21_UART, 252 }, 253 [IMX6Q_UART] = { 254 .uts_reg = IMX21_UTS, 255 .devtype = IMX6Q_UART, 256 }, 257 }; 258 259 static struct platform_device_id imx_uart_devtype[] = { 260 { 261 .name = "imx1-uart", 262 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART], 263 }, { 264 .name = "imx21-uart", 265 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART], 266 }, { 267 .name = "imx6q-uart", 268 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART], 269 }, { 270 /* sentinel */ 271 } 272 }; 273 MODULE_DEVICE_TABLE(platform, imx_uart_devtype); 274 275 static struct of_device_id imx_uart_dt_ids[] = { 276 { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, 277 { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, 278 { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, 279 { /* sentinel */ } 280 }; 281 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids); 282 283 static inline unsigned uts_reg(struct imx_port *sport) 284 { 285 return sport->devdata->uts_reg; 286 } 287 288 static inline int is_imx1_uart(struct imx_port *sport) 289 { 290 return sport->devdata->devtype == IMX1_UART; 291 } 292 293 static inline int is_imx21_uart(struct imx_port *sport) 294 { 295 return sport->devdata->devtype == IMX21_UART; 296 } 297 298 static inline int is_imx6q_uart(struct imx_port *sport) 299 { 300 return sport->devdata->devtype == IMX6Q_UART; 301 } 302 /* 303 * Save and restore functions for UCR1, UCR2 and UCR3 registers 304 */ 305 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_IMX_CONSOLE) 306 static void imx_port_ucrs_save(struct uart_port *port, 307 struct imx_port_ucrs *ucr) 308 { 309 /* save control registers */ 310 ucr->ucr1 = readl(port->membase + UCR1); 311 ucr->ucr2 = readl(port->membase + UCR2); 312 ucr->ucr3 = readl(port->membase + UCR3); 313 } 314 315 static void imx_port_ucrs_restore(struct uart_port *port, 316 struct imx_port_ucrs *ucr) 317 { 318 /* restore control registers */ 319 writel(ucr->ucr1, port->membase + UCR1); 320 writel(ucr->ucr2, port->membase + UCR2); 321 writel(ucr->ucr3, port->membase + UCR3); 322 } 323 #endif 324 325 /* 326 * Handle any change of modem status signal since we were last called. 327 */ 328 static void imx_mctrl_check(struct imx_port *sport) 329 { 330 unsigned int status, changed; 331 332 status = sport->port.ops->get_mctrl(&sport->port); 333 changed = status ^ sport->old_status; 334 335 if (changed == 0) 336 return; 337 338 sport->old_status = status; 339 340 if (changed & TIOCM_RI) 341 sport->port.icount.rng++; 342 if (changed & TIOCM_DSR) 343 sport->port.icount.dsr++; 344 if (changed & TIOCM_CAR) 345 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); 346 if (changed & TIOCM_CTS) 347 uart_handle_cts_change(&sport->port, status & TIOCM_CTS); 348 349 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 350 } 351 352 /* 353 * This is our per-port timeout handler, for checking the 354 * modem status signals. 355 */ 356 static void imx_timeout(unsigned long data) 357 { 358 struct imx_port *sport = (struct imx_port *)data; 359 unsigned long flags; 360 361 if (sport->port.state) { 362 spin_lock_irqsave(&sport->port.lock, flags); 363 imx_mctrl_check(sport); 364 spin_unlock_irqrestore(&sport->port.lock, flags); 365 366 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); 367 } 368 } 369 370 /* 371 * interrupts disabled on entry 372 */ 373 static void imx_stop_tx(struct uart_port *port) 374 { 375 struct imx_port *sport = (struct imx_port *)port; 376 unsigned long temp; 377 378 if (USE_IRDA(sport)) { 379 /* half duplex - wait for end of transmission */ 380 int n = 256; 381 while ((--n > 0) && 382 !(readl(sport->port.membase + USR2) & USR2_TXDC)) { 383 udelay(5); 384 barrier(); 385 } 386 /* 387 * irda transceiver - wait a bit more to avoid 388 * cutoff, hardware dependent 389 */ 390 udelay(sport->trcv_delay); 391 392 /* 393 * half duplex - reactivate receive mode, 394 * flush receive pipe echo crap 395 */ 396 if (readl(sport->port.membase + USR2) & USR2_TXDC) { 397 temp = readl(sport->port.membase + UCR1); 398 temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN); 399 writel(temp, sport->port.membase + UCR1); 400 401 temp = readl(sport->port.membase + UCR4); 402 temp &= ~(UCR4_TCEN); 403 writel(temp, sport->port.membase + UCR4); 404 405 while (readl(sport->port.membase + URXD0) & 406 URXD_CHARRDY) 407 barrier(); 408 409 temp = readl(sport->port.membase + UCR1); 410 temp |= UCR1_RRDYEN; 411 writel(temp, sport->port.membase + UCR1); 412 413 temp = readl(sport->port.membase + UCR4); 414 temp |= UCR4_DREN; 415 writel(temp, sport->port.membase + UCR4); 416 } 417 return; 418 } 419 420 /* 421 * We are maybe in the SMP context, so if the DMA TX thread is running 422 * on other cpu, we have to wait for it to finish. 423 */ 424 if (sport->dma_is_enabled && sport->dma_is_txing) 425 return; 426 427 temp = readl(sport->port.membase + UCR1); 428 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); 429 } 430 431 /* 432 * interrupts disabled on entry 433 */ 434 static void imx_stop_rx(struct uart_port *port) 435 { 436 struct imx_port *sport = (struct imx_port *)port; 437 unsigned long temp; 438 439 if (sport->dma_is_enabled && sport->dma_is_rxing) { 440 if (sport->port.suspended) { 441 dmaengine_terminate_all(sport->dma_chan_rx); 442 sport->dma_is_rxing = 0; 443 } else { 444 return; 445 } 446 } 447 448 temp = readl(sport->port.membase + UCR2); 449 writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); 450 451 /* disable the `Receiver Ready Interrrupt` */ 452 temp = readl(sport->port.membase + UCR1); 453 writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1); 454 } 455 456 /* 457 * Set the modem control timer to fire immediately. 458 */ 459 static void imx_enable_ms(struct uart_port *port) 460 { 461 struct imx_port *sport = (struct imx_port *)port; 462 463 mod_timer(&sport->timer, jiffies); 464 } 465 466 static inline void imx_transmit_buffer(struct imx_port *sport) 467 { 468 struct circ_buf *xmit = &sport->port.state->xmit; 469 470 if (sport->port.x_char) { 471 /* Send next char */ 472 writel(sport->port.x_char, sport->port.membase + URTX0); 473 return; 474 } 475 476 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { 477 imx_stop_tx(&sport->port); 478 return; 479 } 480 481 while (!uart_circ_empty(xmit) && 482 !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) { 483 /* send xmit->buf[xmit->tail] 484 * out the port here */ 485 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); 486 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 487 sport->port.icount.tx++; 488 } 489 490 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 491 uart_write_wakeup(&sport->port); 492 493 if (uart_circ_empty(xmit)) 494 imx_stop_tx(&sport->port); 495 } 496 497 static void dma_tx_callback(void *data) 498 { 499 struct imx_port *sport = data; 500 struct scatterlist *sgl = &sport->tx_sgl[0]; 501 struct circ_buf *xmit = &sport->port.state->xmit; 502 unsigned long flags; 503 504 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); 505 506 sport->dma_is_txing = 0; 507 508 /* update the stat */ 509 spin_lock_irqsave(&sport->port.lock, flags); 510 xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); 511 sport->port.icount.tx += sport->tx_bytes; 512 spin_unlock_irqrestore(&sport->port.lock, flags); 513 514 dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); 515 516 uart_write_wakeup(&sport->port); 517 518 if (waitqueue_active(&sport->dma_wait)) { 519 wake_up(&sport->dma_wait); 520 dev_dbg(sport->port.dev, "exit in %s.\n", __func__); 521 return; 522 } 523 } 524 525 static void imx_dma_tx(struct imx_port *sport) 526 { 527 struct circ_buf *xmit = &sport->port.state->xmit; 528 struct scatterlist *sgl = sport->tx_sgl; 529 struct dma_async_tx_descriptor *desc; 530 struct dma_chan *chan = sport->dma_chan_tx; 531 struct device *dev = sport->port.dev; 532 enum dma_status status; 533 int ret; 534 535 status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL); 536 if (DMA_IN_PROGRESS == status) 537 return; 538 539 sport->tx_bytes = uart_circ_chars_pending(xmit); 540 541 if (xmit->tail > xmit->head && xmit->head > 0) { 542 sport->dma_tx_nents = 2; 543 sg_init_table(sgl, 2); 544 sg_set_buf(sgl, xmit->buf + xmit->tail, 545 UART_XMIT_SIZE - xmit->tail); 546 sg_set_buf(sgl + 1, xmit->buf, xmit->head); 547 } else { 548 sport->dma_tx_nents = 1; 549 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); 550 } 551 552 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); 553 if (ret == 0) { 554 dev_err(dev, "DMA mapping error for TX.\n"); 555 return; 556 } 557 desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, 558 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 559 if (!desc) { 560 dev_err(dev, "We cannot prepare for the TX slave dma!\n"); 561 return; 562 } 563 desc->callback = dma_tx_callback; 564 desc->callback_param = sport; 565 566 dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", 567 uart_circ_chars_pending(xmit)); 568 /* fire it */ 569 sport->dma_is_txing = 1; 570 dmaengine_submit(desc); 571 dma_async_issue_pending(chan); 572 return; 573 } 574 575 /* 576 * interrupts disabled on entry 577 */ 578 static void imx_start_tx(struct uart_port *port) 579 { 580 struct imx_port *sport = (struct imx_port *)port; 581 unsigned long temp; 582 583 if (USE_IRDA(sport)) { 584 /* half duplex in IrDA mode; have to disable receive mode */ 585 temp = readl(sport->port.membase + UCR4); 586 temp &= ~(UCR4_DREN); 587 writel(temp, sport->port.membase + UCR4); 588 589 temp = readl(sport->port.membase + UCR1); 590 temp &= ~(UCR1_RRDYEN); 591 writel(temp, sport->port.membase + UCR1); 592 } 593 /* Clear any pending ORE flag before enabling interrupt */ 594 temp = readl(sport->port.membase + USR2); 595 writel(temp | USR2_ORE, sport->port.membase + USR2); 596 597 temp = readl(sport->port.membase + UCR4); 598 temp |= UCR4_OREN; 599 writel(temp, sport->port.membase + UCR4); 600 601 if (!sport->dma_is_enabled) { 602 temp = readl(sport->port.membase + UCR1); 603 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); 604 } 605 606 if (USE_IRDA(sport)) { 607 temp = readl(sport->port.membase + UCR1); 608 temp |= UCR1_TRDYEN; 609 writel(temp, sport->port.membase + UCR1); 610 611 temp = readl(sport->port.membase + UCR4); 612 temp |= UCR4_TCEN; 613 writel(temp, sport->port.membase + UCR4); 614 } 615 616 if (sport->dma_is_enabled) { 617 /* FIXME: port->x_char must be transmitted if != 0 */ 618 if (!uart_circ_empty(&port->state->xmit) && 619 !uart_tx_stopped(port)) 620 imx_dma_tx(sport); 621 return; 622 } 623 624 if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY) 625 imx_transmit_buffer(sport); 626 } 627 628 static irqreturn_t imx_rtsint(int irq, void *dev_id) 629 { 630 struct imx_port *sport = dev_id; 631 unsigned int val; 632 unsigned long flags; 633 634 spin_lock_irqsave(&sport->port.lock, flags); 635 636 writel(USR1_RTSD, sport->port.membase + USR1); 637 val = readl(sport->port.membase + USR1) & USR1_RTSS; 638 uart_handle_cts_change(&sport->port, !!val); 639 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 640 641 spin_unlock_irqrestore(&sport->port.lock, flags); 642 return IRQ_HANDLED; 643 } 644 645 static irqreturn_t imx_txint(int irq, void *dev_id) 646 { 647 struct imx_port *sport = dev_id; 648 unsigned long flags; 649 650 spin_lock_irqsave(&sport->port.lock, flags); 651 imx_transmit_buffer(sport); 652 spin_unlock_irqrestore(&sport->port.lock, flags); 653 return IRQ_HANDLED; 654 } 655 656 static irqreturn_t imx_rxint(int irq, void *dev_id) 657 { 658 struct imx_port *sport = dev_id; 659 unsigned int rx, flg, ignored = 0; 660 struct tty_port *port = &sport->port.state->port; 661 unsigned long flags, temp; 662 663 spin_lock_irqsave(&sport->port.lock, flags); 664 665 while (readl(sport->port.membase + USR2) & USR2_RDR) { 666 flg = TTY_NORMAL; 667 sport->port.icount.rx++; 668 669 rx = readl(sport->port.membase + URXD0); 670 671 temp = readl(sport->port.membase + USR2); 672 if (temp & USR2_BRCD) { 673 writel(USR2_BRCD, sport->port.membase + USR2); 674 if (uart_handle_break(&sport->port)) 675 continue; 676 } 677 678 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) 679 continue; 680 681 if (unlikely(rx & URXD_ERR)) { 682 if (rx & URXD_BRK) 683 sport->port.icount.brk++; 684 else if (rx & URXD_PRERR) 685 sport->port.icount.parity++; 686 else if (rx & URXD_FRMERR) 687 sport->port.icount.frame++; 688 if (rx & URXD_OVRRUN) 689 sport->port.icount.overrun++; 690 691 if (rx & sport->port.ignore_status_mask) { 692 if (++ignored > 100) 693 goto out; 694 continue; 695 } 696 697 rx &= sport->port.read_status_mask; 698 699 if (rx & URXD_BRK) 700 flg = TTY_BREAK; 701 else if (rx & URXD_PRERR) 702 flg = TTY_PARITY; 703 else if (rx & URXD_FRMERR) 704 flg = TTY_FRAME; 705 if (rx & URXD_OVRRUN) 706 flg = TTY_OVERRUN; 707 708 #ifdef SUPPORT_SYSRQ 709 sport->port.sysrq = 0; 710 #endif 711 } 712 713 tty_insert_flip_char(port, rx, flg); 714 } 715 716 out: 717 spin_unlock_irqrestore(&sport->port.lock, flags); 718 tty_flip_buffer_push(port); 719 return IRQ_HANDLED; 720 } 721 722 static int start_rx_dma(struct imx_port *sport); 723 /* 724 * If the RXFIFO is filled with some data, and then we 725 * arise a DMA operation to receive them. 726 */ 727 static void imx_dma_rxint(struct imx_port *sport) 728 { 729 unsigned long temp; 730 731 temp = readl(sport->port.membase + USR2); 732 if ((temp & USR2_RDR) && !sport->dma_is_rxing) { 733 sport->dma_is_rxing = 1; 734 735 /* disable the `Recerver Ready Interrrupt` */ 736 temp = readl(sport->port.membase + UCR1); 737 temp &= ~(UCR1_RRDYEN); 738 writel(temp, sport->port.membase + UCR1); 739 740 /* tell the DMA to receive the data. */ 741 start_rx_dma(sport); 742 } 743 } 744 745 static irqreturn_t imx_int(int irq, void *dev_id) 746 { 747 struct imx_port *sport = dev_id; 748 unsigned int sts; 749 unsigned int sts2; 750 751 sts = readl(sport->port.membase + USR1); 752 753 if (sts & USR1_RRDY) { 754 if (sport->dma_is_enabled) 755 imx_dma_rxint(sport); 756 else 757 imx_rxint(irq, dev_id); 758 } 759 760 if (sts & USR1_TRDY && 761 readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) 762 imx_txint(irq, dev_id); 763 764 if (sts & USR1_RTSD) 765 imx_rtsint(irq, dev_id); 766 767 if (sts & USR1_AWAKE) 768 writel(USR1_AWAKE, sport->port.membase + USR1); 769 770 sts2 = readl(sport->port.membase + USR2); 771 if (sts2 & USR2_ORE) { 772 dev_err(sport->port.dev, "Rx FIFO overrun\n"); 773 sport->port.icount.overrun++; 774 writel(sts2 | USR2_ORE, sport->port.membase + USR2); 775 } 776 777 return IRQ_HANDLED; 778 } 779 780 /* 781 * Return TIOCSER_TEMT when transmitter is not busy. 782 */ 783 static unsigned int imx_tx_empty(struct uart_port *port) 784 { 785 struct imx_port *sport = (struct imx_port *)port; 786 unsigned int ret; 787 788 ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; 789 790 /* If the TX DMA is working, return 0. */ 791 if (sport->dma_is_enabled && sport->dma_is_txing) 792 ret = 0; 793 794 return ret; 795 } 796 797 /* 798 * We have a modem side uart, so the meanings of RTS and CTS are inverted. 799 */ 800 static unsigned int imx_get_mctrl(struct uart_port *port) 801 { 802 struct imx_port *sport = (struct imx_port *)port; 803 unsigned int tmp = TIOCM_DSR | TIOCM_CAR; 804 805 if (readl(sport->port.membase + USR1) & USR1_RTSS) 806 tmp |= TIOCM_CTS; 807 808 if (readl(sport->port.membase + UCR2) & UCR2_CTS) 809 tmp |= TIOCM_RTS; 810 811 if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP) 812 tmp |= TIOCM_LOOP; 813 814 return tmp; 815 } 816 817 static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) 818 { 819 struct imx_port *sport = (struct imx_port *)port; 820 unsigned long temp; 821 822 temp = readl(sport->port.membase + UCR2) & ~(UCR2_CTS | UCR2_CTSC); 823 if (mctrl & TIOCM_RTS) 824 temp |= UCR2_CTS | UCR2_CTSC; 825 826 writel(temp, sport->port.membase + UCR2); 827 828 temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP; 829 if (mctrl & TIOCM_LOOP) 830 temp |= UTS_LOOP; 831 writel(temp, sport->port.membase + uts_reg(sport)); 832 } 833 834 /* 835 * Interrupts always disabled. 836 */ 837 static void imx_break_ctl(struct uart_port *port, int break_state) 838 { 839 struct imx_port *sport = (struct imx_port *)port; 840 unsigned long flags, temp; 841 842 spin_lock_irqsave(&sport->port.lock, flags); 843 844 temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK; 845 846 if (break_state != 0) 847 temp |= UCR1_SNDBRK; 848 849 writel(temp, sport->port.membase + UCR1); 850 851 spin_unlock_irqrestore(&sport->port.lock, flags); 852 } 853 854 #define TXTL 2 /* reset default */ 855 #define RXTL 1 /* reset default */ 856 857 static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode) 858 { 859 unsigned int val; 860 861 /* set receiver / transmitter trigger level */ 862 val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); 863 val |= TXTL << UFCR_TXTL_SHF | RXTL; 864 writel(val, sport->port.membase + UFCR); 865 return 0; 866 } 867 868 #define RX_BUF_SIZE (PAGE_SIZE) 869 static void imx_rx_dma_done(struct imx_port *sport) 870 { 871 unsigned long temp; 872 873 /* Enable this interrupt when the RXFIFO is empty. */ 874 temp = readl(sport->port.membase + UCR1); 875 temp |= UCR1_RRDYEN; 876 writel(temp, sport->port.membase + UCR1); 877 878 sport->dma_is_rxing = 0; 879 880 /* Is the shutdown waiting for us? */ 881 if (waitqueue_active(&sport->dma_wait)) 882 wake_up(&sport->dma_wait); 883 } 884 885 /* 886 * There are three kinds of RX DMA interrupts(such as in the MX6Q): 887 * [1] the RX DMA buffer is full. 888 * [2] the Aging timer expires(wait for 8 bytes long) 889 * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN). 890 * 891 * The [2] is trigger when a character was been sitting in the FIFO 892 * meanwhile [3] can wait for 32 bytes long when the RX line is 893 * on IDLE state and RxFIFO is empty. 894 */ 895 static void dma_rx_callback(void *data) 896 { 897 struct imx_port *sport = data; 898 struct dma_chan *chan = sport->dma_chan_rx; 899 struct scatterlist *sgl = &sport->rx_sgl; 900 struct tty_port *port = &sport->port.state->port; 901 struct dma_tx_state state; 902 enum dma_status status; 903 unsigned int count; 904 905 /* unmap it first */ 906 dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE); 907 908 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); 909 count = RX_BUF_SIZE - state.residue; 910 dev_dbg(sport->port.dev, "We get %d bytes.\n", count); 911 912 if (count) { 913 tty_insert_flip_string(port, sport->rx_buf, count); 914 tty_flip_buffer_push(port); 915 916 start_rx_dma(sport); 917 } else 918 imx_rx_dma_done(sport); 919 } 920 921 static int start_rx_dma(struct imx_port *sport) 922 { 923 struct scatterlist *sgl = &sport->rx_sgl; 924 struct dma_chan *chan = sport->dma_chan_rx; 925 struct device *dev = sport->port.dev; 926 struct dma_async_tx_descriptor *desc; 927 int ret; 928 929 sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE); 930 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); 931 if (ret == 0) { 932 dev_err(dev, "DMA mapping error for RX.\n"); 933 return -EINVAL; 934 } 935 desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM, 936 DMA_PREP_INTERRUPT); 937 if (!desc) { 938 dev_err(dev, "We cannot prepare for the RX slave dma!\n"); 939 return -EINVAL; 940 } 941 desc->callback = dma_rx_callback; 942 desc->callback_param = sport; 943 944 dev_dbg(dev, "RX: prepare for the DMA.\n"); 945 dmaengine_submit(desc); 946 dma_async_issue_pending(chan); 947 return 0; 948 } 949 950 static void imx_uart_dma_exit(struct imx_port *sport) 951 { 952 if (sport->dma_chan_rx) { 953 dma_release_channel(sport->dma_chan_rx); 954 sport->dma_chan_rx = NULL; 955 956 kfree(sport->rx_buf); 957 sport->rx_buf = NULL; 958 } 959 960 if (sport->dma_chan_tx) { 961 dma_release_channel(sport->dma_chan_tx); 962 sport->dma_chan_tx = NULL; 963 } 964 965 sport->dma_is_inited = 0; 966 } 967 968 static int imx_uart_dma_init(struct imx_port *sport) 969 { 970 struct dma_slave_config slave_config = {}; 971 struct device *dev = sport->port.dev; 972 int ret; 973 974 /* Prepare for RX : */ 975 sport->dma_chan_rx = dma_request_slave_channel(dev, "rx"); 976 if (!sport->dma_chan_rx) { 977 dev_dbg(dev, "cannot get the DMA channel.\n"); 978 ret = -EINVAL; 979 goto err; 980 } 981 982 slave_config.direction = DMA_DEV_TO_MEM; 983 slave_config.src_addr = sport->port.mapbase + URXD0; 984 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 985 slave_config.src_maxburst = RXTL; 986 ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config); 987 if (ret) { 988 dev_err(dev, "error in RX dma configuration.\n"); 989 goto err; 990 } 991 992 sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 993 if (!sport->rx_buf) { 994 dev_err(dev, "cannot alloc DMA buffer.\n"); 995 ret = -ENOMEM; 996 goto err; 997 } 998 999 /* Prepare for TX : */ 1000 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); 1001 if (!sport->dma_chan_tx) { 1002 dev_err(dev, "cannot get the TX DMA channel!\n"); 1003 ret = -EINVAL; 1004 goto err; 1005 } 1006 1007 slave_config.direction = DMA_MEM_TO_DEV; 1008 slave_config.dst_addr = sport->port.mapbase + URTX0; 1009 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1010 slave_config.dst_maxburst = TXTL; 1011 ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config); 1012 if (ret) { 1013 dev_err(dev, "error in TX dma configuration."); 1014 goto err; 1015 } 1016 1017 sport->dma_is_inited = 1; 1018 1019 return 0; 1020 err: 1021 imx_uart_dma_exit(sport); 1022 return ret; 1023 } 1024 1025 static void imx_enable_dma(struct imx_port *sport) 1026 { 1027 unsigned long temp; 1028 1029 init_waitqueue_head(&sport->dma_wait); 1030 1031 /* set UCR1 */ 1032 temp = readl(sport->port.membase + UCR1); 1033 temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | 1034 /* wait for 32 idle frames for IDDMA interrupt */ 1035 UCR1_ICD_REG(3); 1036 writel(temp, sport->port.membase + UCR1); 1037 1038 /* set UCR4 */ 1039 temp = readl(sport->port.membase + UCR4); 1040 temp |= UCR4_IDDMAEN; 1041 writel(temp, sport->port.membase + UCR4); 1042 1043 sport->dma_is_enabled = 1; 1044 } 1045 1046 static void imx_disable_dma(struct imx_port *sport) 1047 { 1048 unsigned long temp; 1049 1050 /* clear UCR1 */ 1051 temp = readl(sport->port.membase + UCR1); 1052 temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN); 1053 writel(temp, sport->port.membase + UCR1); 1054 1055 /* clear UCR2 */ 1056 temp = readl(sport->port.membase + UCR2); 1057 temp &= ~(UCR2_CTSC | UCR2_CTS); 1058 writel(temp, sport->port.membase + UCR2); 1059 1060 /* clear UCR4 */ 1061 temp = readl(sport->port.membase + UCR4); 1062 temp &= ~UCR4_IDDMAEN; 1063 writel(temp, sport->port.membase + UCR4); 1064 1065 sport->dma_is_enabled = 0; 1066 } 1067 1068 /* half the RX buffer size */ 1069 #define CTSTL 16 1070 1071 static int imx_startup(struct uart_port *port) 1072 { 1073 struct imx_port *sport = (struct imx_port *)port; 1074 int retval, i; 1075 unsigned long flags, temp; 1076 1077 retval = clk_prepare_enable(sport->clk_per); 1078 if (retval) 1079 goto error_out1; 1080 retval = clk_prepare_enable(sport->clk_ipg); 1081 if (retval) { 1082 clk_disable_unprepare(sport->clk_per); 1083 goto error_out1; 1084 } 1085 1086 imx_setup_ufcr(sport, 0); 1087 1088 /* disable the DREN bit (Data Ready interrupt enable) before 1089 * requesting IRQs 1090 */ 1091 temp = readl(sport->port.membase + UCR4); 1092 1093 if (USE_IRDA(sport)) 1094 temp |= UCR4_IRSC; 1095 1096 /* set the trigger level for CTS */ 1097 temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); 1098 temp |= CTSTL << UCR4_CTSTL_SHF; 1099 1100 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1101 1102 /* Reset fifo's and state machines */ 1103 i = 100; 1104 1105 temp = readl(sport->port.membase + UCR2); 1106 temp &= ~UCR2_SRST; 1107 writel(temp, sport->port.membase + UCR2); 1108 1109 while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0)) 1110 udelay(1); 1111 1112 /* 1113 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later 1114 * chips only have one interrupt. 1115 */ 1116 if (sport->txirq > 0) { 1117 retval = request_irq(sport->rxirq, imx_rxint, 0, 1118 dev_name(port->dev), sport); 1119 if (retval) 1120 goto error_out1; 1121 1122 retval = request_irq(sport->txirq, imx_txint, 0, 1123 dev_name(port->dev), sport); 1124 if (retval) 1125 goto error_out2; 1126 1127 /* do not use RTS IRQ on IrDA */ 1128 if (!USE_IRDA(sport)) { 1129 retval = request_irq(sport->rtsirq, imx_rtsint, 0, 1130 dev_name(port->dev), sport); 1131 if (retval) 1132 goto error_out3; 1133 } 1134 } else { 1135 retval = request_irq(sport->port.irq, imx_int, 0, 1136 dev_name(port->dev), sport); 1137 if (retval) { 1138 free_irq(sport->port.irq, sport); 1139 goto error_out1; 1140 } 1141 } 1142 1143 spin_lock_irqsave(&sport->port.lock, flags); 1144 /* 1145 * Finally, clear and enable interrupts 1146 */ 1147 writel(USR1_RTSD, sport->port.membase + USR1); 1148 1149 temp = readl(sport->port.membase + UCR1); 1150 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1151 1152 if (USE_IRDA(sport)) { 1153 temp |= UCR1_IREN; 1154 temp &= ~(UCR1_RTSDEN); 1155 } 1156 1157 writel(temp, sport->port.membase + UCR1); 1158 1159 temp = readl(sport->port.membase + UCR2); 1160 temp |= (UCR2_RXEN | UCR2_TXEN); 1161 if (!sport->have_rtscts) 1162 temp |= UCR2_IRTS; 1163 writel(temp, sport->port.membase + UCR2); 1164 1165 if (!is_imx1_uart(sport)) { 1166 temp = readl(sport->port.membase + UCR3); 1167 temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; 1168 writel(temp, sport->port.membase + UCR3); 1169 } 1170 1171 if (USE_IRDA(sport)) { 1172 temp = readl(sport->port.membase + UCR4); 1173 if (sport->irda_inv_rx) 1174 temp |= UCR4_INVR; 1175 else 1176 temp &= ~(UCR4_INVR); 1177 writel(temp | UCR4_DREN, sport->port.membase + UCR4); 1178 1179 temp = readl(sport->port.membase + UCR3); 1180 if (sport->irda_inv_tx) 1181 temp |= UCR3_INVT; 1182 else 1183 temp &= ~(UCR3_INVT); 1184 writel(temp, sport->port.membase + UCR3); 1185 } 1186 1187 /* 1188 * Enable modem status interrupts 1189 */ 1190 imx_enable_ms(&sport->port); 1191 spin_unlock_irqrestore(&sport->port.lock, flags); 1192 1193 if (USE_IRDA(sport)) { 1194 struct imxuart_platform_data *pdata; 1195 pdata = dev_get_platdata(sport->port.dev); 1196 sport->irda_inv_rx = pdata->irda_inv_rx; 1197 sport->irda_inv_tx = pdata->irda_inv_tx; 1198 sport->trcv_delay = pdata->transceiver_delay; 1199 if (pdata->irda_enable) 1200 pdata->irda_enable(1); 1201 } 1202 1203 return 0; 1204 1205 error_out3: 1206 if (sport->txirq) 1207 free_irq(sport->txirq, sport); 1208 error_out2: 1209 if (sport->rxirq) 1210 free_irq(sport->rxirq, sport); 1211 error_out1: 1212 return retval; 1213 } 1214 1215 static void imx_shutdown(struct uart_port *port) 1216 { 1217 struct imx_port *sport = (struct imx_port *)port; 1218 unsigned long temp; 1219 unsigned long flags; 1220 1221 if (sport->dma_is_enabled) { 1222 int ret; 1223 1224 /* We have to wait for the DMA to finish. */ 1225 ret = wait_event_interruptible(sport->dma_wait, 1226 !sport->dma_is_rxing && !sport->dma_is_txing); 1227 if (ret != 0) { 1228 sport->dma_is_rxing = 0; 1229 sport->dma_is_txing = 0; 1230 dmaengine_terminate_all(sport->dma_chan_tx); 1231 dmaengine_terminate_all(sport->dma_chan_rx); 1232 } 1233 imx_stop_tx(port); 1234 imx_stop_rx(port); 1235 imx_disable_dma(sport); 1236 imx_uart_dma_exit(sport); 1237 } 1238 1239 spin_lock_irqsave(&sport->port.lock, flags); 1240 temp = readl(sport->port.membase + UCR2); 1241 temp &= ~(UCR2_TXEN); 1242 writel(temp, sport->port.membase + UCR2); 1243 spin_unlock_irqrestore(&sport->port.lock, flags); 1244 1245 if (USE_IRDA(sport)) { 1246 struct imxuart_platform_data *pdata; 1247 pdata = dev_get_platdata(sport->port.dev); 1248 if (pdata->irda_enable) 1249 pdata->irda_enable(0); 1250 } 1251 1252 /* 1253 * Stop our timer. 1254 */ 1255 del_timer_sync(&sport->timer); 1256 1257 /* 1258 * Free the interrupts 1259 */ 1260 if (sport->txirq > 0) { 1261 if (!USE_IRDA(sport)) 1262 free_irq(sport->rtsirq, sport); 1263 free_irq(sport->txirq, sport); 1264 free_irq(sport->rxirq, sport); 1265 } else 1266 free_irq(sport->port.irq, sport); 1267 1268 /* 1269 * Disable all interrupts, port and break condition. 1270 */ 1271 1272 spin_lock_irqsave(&sport->port.lock, flags); 1273 temp = readl(sport->port.membase + UCR1); 1274 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); 1275 if (USE_IRDA(sport)) 1276 temp &= ~(UCR1_IREN); 1277 1278 writel(temp, sport->port.membase + UCR1); 1279 spin_unlock_irqrestore(&sport->port.lock, flags); 1280 1281 clk_disable_unprepare(sport->clk_per); 1282 clk_disable_unprepare(sport->clk_ipg); 1283 } 1284 1285 static void imx_flush_buffer(struct uart_port *port) 1286 { 1287 struct imx_port *sport = (struct imx_port *)port; 1288 1289 if (sport->dma_is_enabled) { 1290 sport->tx_bytes = 0; 1291 dmaengine_terminate_all(sport->dma_chan_tx); 1292 } 1293 } 1294 1295 static void 1296 imx_set_termios(struct uart_port *port, struct ktermios *termios, 1297 struct ktermios *old) 1298 { 1299 struct imx_port *sport = (struct imx_port *)port; 1300 unsigned long flags; 1301 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot; 1302 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; 1303 unsigned int div, ufcr; 1304 unsigned long num, denom; 1305 uint64_t tdiv64; 1306 1307 /* 1308 * If we don't support modem control lines, don't allow 1309 * these to be set. 1310 */ 1311 if (0) { 1312 termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR); 1313 termios->c_cflag |= CLOCAL; 1314 } 1315 1316 /* 1317 * We only support CS7 and CS8. 1318 */ 1319 while ((termios->c_cflag & CSIZE) != CS7 && 1320 (termios->c_cflag & CSIZE) != CS8) { 1321 termios->c_cflag &= ~CSIZE; 1322 termios->c_cflag |= old_csize; 1323 old_csize = CS8; 1324 } 1325 1326 if ((termios->c_cflag & CSIZE) == CS8) 1327 ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; 1328 else 1329 ucr2 = UCR2_SRST | UCR2_IRTS; 1330 1331 if (termios->c_cflag & CRTSCTS) { 1332 if (sport->have_rtscts) { 1333 ucr2 &= ~UCR2_IRTS; 1334 ucr2 |= UCR2_CTSC; 1335 1336 /* Can we enable the DMA support? */ 1337 if (is_imx6q_uart(sport) && !uart_console(port) 1338 && !sport->dma_is_inited) 1339 imx_uart_dma_init(sport); 1340 } else { 1341 termios->c_cflag &= ~CRTSCTS; 1342 } 1343 } 1344 1345 if (termios->c_cflag & CSTOPB) 1346 ucr2 |= UCR2_STPB; 1347 if (termios->c_cflag & PARENB) { 1348 ucr2 |= UCR2_PREN; 1349 if (termios->c_cflag & PARODD) 1350 ucr2 |= UCR2_PROE; 1351 } 1352 1353 del_timer_sync(&sport->timer); 1354 1355 /* 1356 * Ask the core to calculate the divisor for us. 1357 */ 1358 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); 1359 quot = uart_get_divisor(port, baud); 1360 1361 spin_lock_irqsave(&sport->port.lock, flags); 1362 1363 sport->port.read_status_mask = 0; 1364 if (termios->c_iflag & INPCK) 1365 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); 1366 if (termios->c_iflag & (BRKINT | PARMRK)) 1367 sport->port.read_status_mask |= URXD_BRK; 1368 1369 /* 1370 * Characters to ignore 1371 */ 1372 sport->port.ignore_status_mask = 0; 1373 if (termios->c_iflag & IGNPAR) 1374 sport->port.ignore_status_mask |= URXD_PRERR; 1375 if (termios->c_iflag & IGNBRK) { 1376 sport->port.ignore_status_mask |= URXD_BRK; 1377 /* 1378 * If we're ignoring parity and break indicators, 1379 * ignore overruns too (for real raw support). 1380 */ 1381 if (termios->c_iflag & IGNPAR) 1382 sport->port.ignore_status_mask |= URXD_OVRRUN; 1383 } 1384 1385 /* 1386 * Update the per-port timeout. 1387 */ 1388 uart_update_timeout(port, termios->c_cflag, baud); 1389 1390 /* 1391 * disable interrupts and drain transmitter 1392 */ 1393 old_ucr1 = readl(sport->port.membase + UCR1); 1394 writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN), 1395 sport->port.membase + UCR1); 1396 1397 while (!(readl(sport->port.membase + USR2) & USR2_TXDC)) 1398 barrier(); 1399 1400 /* then, disable everything */ 1401 old_txrxen = readl(sport->port.membase + UCR2); 1402 writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN), 1403 sport->port.membase + UCR2); 1404 old_txrxen &= (UCR2_TXEN | UCR2_RXEN); 1405 1406 if (USE_IRDA(sport)) { 1407 /* 1408 * use maximum available submodule frequency to 1409 * avoid missing short pulses due to low sampling rate 1410 */ 1411 div = 1; 1412 } else { 1413 /* custom-baudrate handling */ 1414 div = sport->port.uartclk / (baud * 16); 1415 if (baud == 38400 && quot != div) 1416 baud = sport->port.uartclk / (quot * 16); 1417 1418 div = sport->port.uartclk / (baud * 16); 1419 if (div > 7) 1420 div = 7; 1421 if (!div) 1422 div = 1; 1423 } 1424 1425 rational_best_approximation(16 * div * baud, sport->port.uartclk, 1426 1 << 16, 1 << 16, &num, &denom); 1427 1428 tdiv64 = sport->port.uartclk; 1429 tdiv64 *= num; 1430 do_div(tdiv64, denom * 16 * div); 1431 tty_termios_encode_baud_rate(termios, 1432 (speed_t)tdiv64, (speed_t)tdiv64); 1433 1434 num -= 1; 1435 denom -= 1; 1436 1437 ufcr = readl(sport->port.membase + UFCR); 1438 ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); 1439 if (sport->dte_mode) 1440 ufcr |= UFCR_DCEDTE; 1441 writel(ufcr, sport->port.membase + UFCR); 1442 1443 writel(num, sport->port.membase + UBIR); 1444 writel(denom, sport->port.membase + UBMR); 1445 1446 if (!is_imx1_uart(sport)) 1447 writel(sport->port.uartclk / div / 1000, 1448 sport->port.membase + IMX21_ONEMS); 1449 1450 writel(old_ucr1, sport->port.membase + UCR1); 1451 1452 /* set the parity, stop bits and data size */ 1453 writel(ucr2 | old_txrxen, sport->port.membase + UCR2); 1454 1455 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1456 imx_enable_ms(&sport->port); 1457 1458 if (sport->dma_is_inited && !sport->dma_is_enabled) 1459 imx_enable_dma(sport); 1460 spin_unlock_irqrestore(&sport->port.lock, flags); 1461 } 1462 1463 static const char *imx_type(struct uart_port *port) 1464 { 1465 struct imx_port *sport = (struct imx_port *)port; 1466 1467 return sport->port.type == PORT_IMX ? "IMX" : NULL; 1468 } 1469 1470 /* 1471 * Configure/autoconfigure the port. 1472 */ 1473 static void imx_config_port(struct uart_port *port, int flags) 1474 { 1475 struct imx_port *sport = (struct imx_port *)port; 1476 1477 if (flags & UART_CONFIG_TYPE) 1478 sport->port.type = PORT_IMX; 1479 } 1480 1481 /* 1482 * Verify the new serial_struct (for TIOCSSERIAL). 1483 * The only change we allow are to the flags and type, and 1484 * even then only between PORT_IMX and PORT_UNKNOWN 1485 */ 1486 static int 1487 imx_verify_port(struct uart_port *port, struct serial_struct *ser) 1488 { 1489 struct imx_port *sport = (struct imx_port *)port; 1490 int ret = 0; 1491 1492 if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) 1493 ret = -EINVAL; 1494 if (sport->port.irq != ser->irq) 1495 ret = -EINVAL; 1496 if (ser->io_type != UPIO_MEM) 1497 ret = -EINVAL; 1498 if (sport->port.uartclk / 16 != ser->baud_base) 1499 ret = -EINVAL; 1500 if (sport->port.mapbase != (unsigned long)ser->iomem_base) 1501 ret = -EINVAL; 1502 if (sport->port.iobase != ser->port) 1503 ret = -EINVAL; 1504 if (ser->hub6 != 0) 1505 ret = -EINVAL; 1506 return ret; 1507 } 1508 1509 #if defined(CONFIG_CONSOLE_POLL) 1510 static int imx_poll_get_char(struct uart_port *port) 1511 { 1512 if (!(readl(port->membase + USR2) & USR2_RDR)) 1513 return NO_POLL_CHAR; 1514 1515 return readl(port->membase + URXD0) & URXD_RX_DATA; 1516 } 1517 1518 static void imx_poll_put_char(struct uart_port *port, unsigned char c) 1519 { 1520 struct imx_port_ucrs old_ucr; 1521 unsigned int status; 1522 1523 /* save control registers */ 1524 imx_port_ucrs_save(port, &old_ucr); 1525 1526 /* disable interrupts */ 1527 writel(UCR1_UARTEN, port->membase + UCR1); 1528 writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI), 1529 port->membase + UCR2); 1530 writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN), 1531 port->membase + UCR3); 1532 1533 /* drain */ 1534 do { 1535 status = readl(port->membase + USR1); 1536 } while (~status & USR1_TRDY); 1537 1538 /* write */ 1539 writel(c, port->membase + URTX0); 1540 1541 /* flush */ 1542 do { 1543 status = readl(port->membase + USR2); 1544 } while (~status & USR2_TXDC); 1545 1546 /* restore control registers */ 1547 imx_port_ucrs_restore(port, &old_ucr); 1548 } 1549 #endif 1550 1551 static struct uart_ops imx_pops = { 1552 .tx_empty = imx_tx_empty, 1553 .set_mctrl = imx_set_mctrl, 1554 .get_mctrl = imx_get_mctrl, 1555 .stop_tx = imx_stop_tx, 1556 .start_tx = imx_start_tx, 1557 .stop_rx = imx_stop_rx, 1558 .enable_ms = imx_enable_ms, 1559 .break_ctl = imx_break_ctl, 1560 .startup = imx_startup, 1561 .shutdown = imx_shutdown, 1562 .flush_buffer = imx_flush_buffer, 1563 .set_termios = imx_set_termios, 1564 .type = imx_type, 1565 .config_port = imx_config_port, 1566 .verify_port = imx_verify_port, 1567 #if defined(CONFIG_CONSOLE_POLL) 1568 .poll_get_char = imx_poll_get_char, 1569 .poll_put_char = imx_poll_put_char, 1570 #endif 1571 }; 1572 1573 static struct imx_port *imx_ports[UART_NR]; 1574 1575 #ifdef CONFIG_SERIAL_IMX_CONSOLE 1576 static void imx_console_putchar(struct uart_port *port, int ch) 1577 { 1578 struct imx_port *sport = (struct imx_port *)port; 1579 1580 while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL) 1581 barrier(); 1582 1583 writel(ch, sport->port.membase + URTX0); 1584 } 1585 1586 /* 1587 * Interrupts are disabled on entering 1588 */ 1589 static void 1590 imx_console_write(struct console *co, const char *s, unsigned int count) 1591 { 1592 struct imx_port *sport = imx_ports[co->index]; 1593 struct imx_port_ucrs old_ucr; 1594 unsigned int ucr1; 1595 unsigned long flags = 0; 1596 int locked = 1; 1597 int retval; 1598 1599 retval = clk_enable(sport->clk_per); 1600 if (retval) 1601 return; 1602 retval = clk_enable(sport->clk_ipg); 1603 if (retval) { 1604 clk_disable(sport->clk_per); 1605 return; 1606 } 1607 1608 if (sport->port.sysrq) 1609 locked = 0; 1610 else if (oops_in_progress) 1611 locked = spin_trylock_irqsave(&sport->port.lock, flags); 1612 else 1613 spin_lock_irqsave(&sport->port.lock, flags); 1614 1615 /* 1616 * First, save UCR1/2/3 and then disable interrupts 1617 */ 1618 imx_port_ucrs_save(&sport->port, &old_ucr); 1619 ucr1 = old_ucr.ucr1; 1620 1621 if (is_imx1_uart(sport)) 1622 ucr1 |= IMX1_UCR1_UARTCLKEN; 1623 ucr1 |= UCR1_UARTEN; 1624 ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); 1625 1626 writel(ucr1, sport->port.membase + UCR1); 1627 1628 writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2); 1629 1630 uart_console_write(&sport->port, s, count, imx_console_putchar); 1631 1632 /* 1633 * Finally, wait for transmitter to become empty 1634 * and restore UCR1/2/3 1635 */ 1636 while (!(readl(sport->port.membase + USR2) & USR2_TXDC)); 1637 1638 imx_port_ucrs_restore(&sport->port, &old_ucr); 1639 1640 if (locked) 1641 spin_unlock_irqrestore(&sport->port.lock, flags); 1642 1643 clk_disable(sport->clk_ipg); 1644 clk_disable(sport->clk_per); 1645 } 1646 1647 /* 1648 * If the port was already initialised (eg, by a boot loader), 1649 * try to determine the current setup. 1650 */ 1651 static void __init 1652 imx_console_get_options(struct imx_port *sport, int *baud, 1653 int *parity, int *bits) 1654 { 1655 1656 if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) { 1657 /* ok, the port was enabled */ 1658 unsigned int ucr2, ubir, ubmr, uartclk; 1659 unsigned int baud_raw; 1660 unsigned int ucfr_rfdiv; 1661 1662 ucr2 = readl(sport->port.membase + UCR2); 1663 1664 *parity = 'n'; 1665 if (ucr2 & UCR2_PREN) { 1666 if (ucr2 & UCR2_PROE) 1667 *parity = 'o'; 1668 else 1669 *parity = 'e'; 1670 } 1671 1672 if (ucr2 & UCR2_WS) 1673 *bits = 8; 1674 else 1675 *bits = 7; 1676 1677 ubir = readl(sport->port.membase + UBIR) & 0xffff; 1678 ubmr = readl(sport->port.membase + UBMR) & 0xffff; 1679 1680 ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7; 1681 if (ucfr_rfdiv == 6) 1682 ucfr_rfdiv = 7; 1683 else 1684 ucfr_rfdiv = 6 - ucfr_rfdiv; 1685 1686 uartclk = clk_get_rate(sport->clk_per); 1687 uartclk /= ucfr_rfdiv; 1688 1689 { /* 1690 * The next code provides exact computation of 1691 * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) 1692 * without need of float support or long long division, 1693 * which would be required to prevent 32bit arithmetic overflow 1694 */ 1695 unsigned int mul = ubir + 1; 1696 unsigned int div = 16 * (ubmr + 1); 1697 unsigned int rem = uartclk % div; 1698 1699 baud_raw = (uartclk / div) * mul; 1700 baud_raw += (rem * mul + div / 2) / div; 1701 *baud = (baud_raw + 50) / 100 * 100; 1702 } 1703 1704 if (*baud != baud_raw) 1705 pr_info("Console IMX rounded baud rate from %d to %d\n", 1706 baud_raw, *baud); 1707 } 1708 } 1709 1710 static int __init 1711 imx_console_setup(struct console *co, char *options) 1712 { 1713 struct imx_port *sport; 1714 int baud = 9600; 1715 int bits = 8; 1716 int parity = 'n'; 1717 int flow = 'n'; 1718 int retval; 1719 1720 /* 1721 * Check whether an invalid uart number has been specified, and 1722 * if so, search for the first available port that does have 1723 * console support. 1724 */ 1725 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) 1726 co->index = 0; 1727 sport = imx_ports[co->index]; 1728 if (sport == NULL) 1729 return -ENODEV; 1730 1731 /* For setting the registers, we only need to enable the ipg clock. */ 1732 retval = clk_prepare_enable(sport->clk_ipg); 1733 if (retval) 1734 goto error_console; 1735 1736 if (options) 1737 uart_parse_options(options, &baud, &parity, &bits, &flow); 1738 else 1739 imx_console_get_options(sport, &baud, &parity, &bits); 1740 1741 imx_setup_ufcr(sport, 0); 1742 1743 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); 1744 1745 clk_disable(sport->clk_ipg); 1746 if (retval) { 1747 clk_unprepare(sport->clk_ipg); 1748 goto error_console; 1749 } 1750 1751 retval = clk_prepare(sport->clk_per); 1752 if (retval) 1753 clk_disable_unprepare(sport->clk_ipg); 1754 1755 error_console: 1756 return retval; 1757 } 1758 1759 static struct uart_driver imx_reg; 1760 static struct console imx_console = { 1761 .name = DEV_NAME, 1762 .write = imx_console_write, 1763 .device = uart_console_device, 1764 .setup = imx_console_setup, 1765 .flags = CON_PRINTBUFFER, 1766 .index = -1, 1767 .data = &imx_reg, 1768 }; 1769 1770 #define IMX_CONSOLE &imx_console 1771 #else 1772 #define IMX_CONSOLE NULL 1773 #endif 1774 1775 static struct uart_driver imx_reg = { 1776 .owner = THIS_MODULE, 1777 .driver_name = DRIVER_NAME, 1778 .dev_name = DEV_NAME, 1779 .major = SERIAL_IMX_MAJOR, 1780 .minor = MINOR_START, 1781 .nr = ARRAY_SIZE(imx_ports), 1782 .cons = IMX_CONSOLE, 1783 }; 1784 1785 static int serial_imx_suspend(struct platform_device *dev, pm_message_t state) 1786 { 1787 struct imx_port *sport = platform_get_drvdata(dev); 1788 unsigned int val; 1789 1790 /* enable wakeup from i.MX UART */ 1791 val = readl(sport->port.membase + UCR3); 1792 val |= UCR3_AWAKEN; 1793 writel(val, sport->port.membase + UCR3); 1794 1795 uart_suspend_port(&imx_reg, &sport->port); 1796 1797 return 0; 1798 } 1799 1800 static int serial_imx_resume(struct platform_device *dev) 1801 { 1802 struct imx_port *sport = platform_get_drvdata(dev); 1803 unsigned int val; 1804 1805 /* disable wakeup from i.MX UART */ 1806 val = readl(sport->port.membase + UCR3); 1807 val &= ~UCR3_AWAKEN; 1808 writel(val, sport->port.membase + UCR3); 1809 1810 uart_resume_port(&imx_reg, &sport->port); 1811 1812 return 0; 1813 } 1814 1815 #ifdef CONFIG_OF 1816 /* 1817 * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it 1818 * could successfully get all information from dt or a negative errno. 1819 */ 1820 static int serial_imx_probe_dt(struct imx_port *sport, 1821 struct platform_device *pdev) 1822 { 1823 struct device_node *np = pdev->dev.of_node; 1824 const struct of_device_id *of_id = 1825 of_match_device(imx_uart_dt_ids, &pdev->dev); 1826 int ret; 1827 1828 if (!np) 1829 /* no device tree device */ 1830 return 1; 1831 1832 ret = of_alias_get_id(np, "serial"); 1833 if (ret < 0) { 1834 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); 1835 return ret; 1836 } 1837 sport->port.line = ret; 1838 1839 if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) 1840 sport->have_rtscts = 1; 1841 1842 if (of_get_property(np, "fsl,irda-mode", NULL)) 1843 sport->use_irda = 1; 1844 1845 if (of_get_property(np, "fsl,dte-mode", NULL)) 1846 sport->dte_mode = 1; 1847 1848 sport->devdata = of_id->data; 1849 1850 return 0; 1851 } 1852 #else 1853 static inline int serial_imx_probe_dt(struct imx_port *sport, 1854 struct platform_device *pdev) 1855 { 1856 return 1; 1857 } 1858 #endif 1859 1860 static void serial_imx_probe_pdata(struct imx_port *sport, 1861 struct platform_device *pdev) 1862 { 1863 struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev); 1864 1865 sport->port.line = pdev->id; 1866 sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data; 1867 1868 if (!pdata) 1869 return; 1870 1871 if (pdata->flags & IMXUART_HAVE_RTSCTS) 1872 sport->have_rtscts = 1; 1873 1874 if (pdata->flags & IMXUART_IRDA) 1875 sport->use_irda = 1; 1876 } 1877 1878 static int serial_imx_probe(struct platform_device *pdev) 1879 { 1880 struct imx_port *sport; 1881 void __iomem *base; 1882 int ret = 0; 1883 struct resource *res; 1884 1885 sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); 1886 if (!sport) 1887 return -ENOMEM; 1888 1889 ret = serial_imx_probe_dt(sport, pdev); 1890 if (ret > 0) 1891 serial_imx_probe_pdata(sport, pdev); 1892 else if (ret < 0) 1893 return ret; 1894 1895 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1896 base = devm_ioremap_resource(&pdev->dev, res); 1897 if (IS_ERR(base)) 1898 return PTR_ERR(base); 1899 1900 sport->port.dev = &pdev->dev; 1901 sport->port.mapbase = res->start; 1902 sport->port.membase = base; 1903 sport->port.type = PORT_IMX, 1904 sport->port.iotype = UPIO_MEM; 1905 sport->port.irq = platform_get_irq(pdev, 0); 1906 sport->rxirq = platform_get_irq(pdev, 0); 1907 sport->txirq = platform_get_irq(pdev, 1); 1908 sport->rtsirq = platform_get_irq(pdev, 2); 1909 sport->port.fifosize = 32; 1910 sport->port.ops = &imx_pops; 1911 sport->port.flags = UPF_BOOT_AUTOCONF; 1912 init_timer(&sport->timer); 1913 sport->timer.function = imx_timeout; 1914 sport->timer.data = (unsigned long)sport; 1915 1916 sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1917 if (IS_ERR(sport->clk_ipg)) { 1918 ret = PTR_ERR(sport->clk_ipg); 1919 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); 1920 return ret; 1921 } 1922 1923 sport->clk_per = devm_clk_get(&pdev->dev, "per"); 1924 if (IS_ERR(sport->clk_per)) { 1925 ret = PTR_ERR(sport->clk_per); 1926 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret); 1927 return ret; 1928 } 1929 1930 sport->port.uartclk = clk_get_rate(sport->clk_per); 1931 1932 imx_ports[sport->port.line] = sport; 1933 1934 platform_set_drvdata(pdev, sport); 1935 1936 return uart_add_one_port(&imx_reg, &sport->port); 1937 } 1938 1939 static int serial_imx_remove(struct platform_device *pdev) 1940 { 1941 struct imx_port *sport = platform_get_drvdata(pdev); 1942 1943 return uart_remove_one_port(&imx_reg, &sport->port); 1944 } 1945 1946 static struct platform_driver serial_imx_driver = { 1947 .probe = serial_imx_probe, 1948 .remove = serial_imx_remove, 1949 1950 .suspend = serial_imx_suspend, 1951 .resume = serial_imx_resume, 1952 .id_table = imx_uart_devtype, 1953 .driver = { 1954 .name = "imx-uart", 1955 .owner = THIS_MODULE, 1956 .of_match_table = imx_uart_dt_ids, 1957 }, 1958 }; 1959 1960 static int __init imx_serial_init(void) 1961 { 1962 int ret; 1963 1964 pr_info("Serial: IMX driver\n"); 1965 1966 ret = uart_register_driver(&imx_reg); 1967 if (ret) 1968 return ret; 1969 1970 ret = platform_driver_register(&serial_imx_driver); 1971 if (ret != 0) 1972 uart_unregister_driver(&imx_reg); 1973 1974 return ret; 1975 } 1976 1977 static void __exit imx_serial_exit(void) 1978 { 1979 platform_driver_unregister(&serial_imx_driver); 1980 uart_unregister_driver(&imx_reg); 1981 } 1982 1983 module_init(imx_serial_init); 1984 module_exit(imx_serial_exit); 1985 1986 MODULE_AUTHOR("Sascha Hauer"); 1987 MODULE_DESCRIPTION("IMX generic serial port driver"); 1988 MODULE_LICENSE("GPL"); 1989 MODULE_ALIAS("platform:imx-uart"); 1990