1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Motorola/Freescale IMX serial ports 4 * 5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 6 * 7 * Author: Sascha Hauer <sascha@saschahauer.de> 8 * Copyright (C) 2004 Pengutronix 9 */ 10 11 #include <linux/module.h> 12 #include <linux/ioport.h> 13 #include <linux/init.h> 14 #include <linux/console.h> 15 #include <linux/sysrq.h> 16 #include <linux/platform_device.h> 17 #include <linux/tty.h> 18 #include <linux/tty_flip.h> 19 #include <linux/serial_core.h> 20 #include <linux/serial.h> 21 #include <linux/clk.h> 22 #include <linux/delay.h> 23 #include <linux/ktime.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/rational.h> 26 #include <linux/slab.h> 27 #include <linux/of.h> 28 #include <linux/of_device.h> 29 #include <linux/io.h> 30 #include <linux/dma-mapping.h> 31 32 #include <asm/irq.h> 33 #include <linux/platform_data/dma-imx.h> 34 35 #include "serial_mctrl_gpio.h" 36 37 /* Register definitions */ 38 #define URXD0 0x0 /* Receiver Register */ 39 #define URTX0 0x40 /* Transmitter Register */ 40 #define UCR1 0x80 /* Control Register 1 */ 41 #define UCR2 0x84 /* Control Register 2 */ 42 #define UCR3 0x88 /* Control Register 3 */ 43 #define UCR4 0x8c /* Control Register 4 */ 44 #define UFCR 0x90 /* FIFO Control Register */ 45 #define USR1 0x94 /* Status Register 1 */ 46 #define USR2 0x98 /* Status Register 2 */ 47 #define UESC 0x9c /* Escape Character Register */ 48 #define UTIM 0xa0 /* Escape Timer Register */ 49 #define UBIR 0xa4 /* BRM Incremental Register */ 50 #define UBMR 0xa8 /* BRM Modulator Register */ 51 #define UBRC 0xac /* Baud Rate Count Register */ 52 #define IMX21_ONEMS 0xb0 /* One Millisecond register */ 53 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */ 54 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ 55 56 /* UART Control Register Bit Fields.*/ 57 #define URXD_DUMMY_READ (1<<16) 58 #define URXD_CHARRDY (1<<15) 59 #define URXD_ERR (1<<14) 60 #define URXD_OVRRUN (1<<13) 61 #define URXD_FRMERR (1<<12) 62 #define URXD_BRK (1<<11) 63 #define URXD_PRERR (1<<10) 64 #define URXD_RX_DATA (0xFF<<0) 65 #define UCR1_ADEN (1<<15) /* Auto detect interrupt */ 66 #define UCR1_ADBR (1<<14) /* Auto detect baud rate */ 67 #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ 68 #define UCR1_IDEN (1<<12) /* Idle condition interrupt */ 69 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */ 70 #define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ 71 #define UCR1_RXDMAEN (1<<8) /* Recv ready DMA enable */ 72 #define UCR1_IREN (1<<7) /* Infrared interface enable */ 73 #define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ 74 #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 75 #define UCR1_SNDBRK (1<<4) /* Send break */ 76 #define UCR1_TXDMAEN (1<<3) /* Transmitter ready DMA enable */ 77 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ 78 #define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */ 79 #define UCR1_DOZE (1<<1) /* Doze */ 80 #define UCR1_UARTEN (1<<0) /* UART enabled */ 81 #define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ 82 #define UCR2_IRTS (1<<14) /* Ignore RTS pin */ 83 #define UCR2_CTSC (1<<13) /* CTS pin control */ 84 #define UCR2_CTS (1<<12) /* Clear to send */ 85 #define UCR2_ESCEN (1<<11) /* Escape enable */ 86 #define UCR2_PREN (1<<8) /* Parity enable */ 87 #define UCR2_PROE (1<<7) /* Parity odd/even */ 88 #define UCR2_STPB (1<<6) /* Stop */ 89 #define UCR2_WS (1<<5) /* Word size */ 90 #define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ 91 #define UCR2_ATEN (1<<3) /* Aging Timer Enable */ 92 #define UCR2_TXEN (1<<2) /* Transmitter enabled */ 93 #define UCR2_RXEN (1<<1) /* Receiver enabled */ 94 #define UCR2_SRST (1<<0) /* SW reset */ 95 #define UCR3_DTREN (1<<13) /* DTR interrupt enable */ 96 #define UCR3_PARERREN (1<<12) /* Parity enable */ 97 #define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ 98 #define UCR3_DSR (1<<10) /* Data set ready */ 99 #define UCR3_DCD (1<<9) /* Data carrier detect */ 100 #define UCR3_RI (1<<8) /* Ring indicator */ 101 #define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */ 102 #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ 103 #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ 104 #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ 105 #define UCR3_DTRDEN (1<<3) /* Data Terminal Ready Delta Enable. */ 106 #define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */ 107 #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ 108 #define UCR3_BPEN (1<<0) /* Preset registers enable */ 109 #define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ 110 #define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ 111 #define UCR4_INVR (1<<9) /* Inverted infrared reception */ 112 #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ 113 #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ 114 #define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ 115 #define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */ 116 #define UCR4_IRSC (1<<5) /* IR special case */ 117 #define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ 118 #define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ 119 #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ 120 #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ 121 #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ 122 #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ 123 #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ 124 #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) 125 #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ 126 #define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ 127 #define USR1_RTSS (1<<14) /* RTS pin status */ 128 #define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ 129 #define USR1_RTSD (1<<12) /* RTS delta */ 130 #define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ 131 #define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ 132 #define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ 133 #define USR1_AGTIM (1<<8) /* Ageing timer interrupt flag */ 134 #define USR1_DTRD (1<<7) /* DTR Delta */ 135 #define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ 136 #define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ 137 #define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ 138 #define USR2_ADET (1<<15) /* Auto baud rate detect complete */ 139 #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ 140 #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ 141 #define USR2_IDLE (1<<12) /* Idle condition */ 142 #define USR2_RIDELT (1<<10) /* Ring Interrupt Delta */ 143 #define USR2_RIIN (1<<9) /* Ring Indicator Input */ 144 #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ 145 #define USR2_WAKE (1<<7) /* Wake */ 146 #define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */ 147 #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ 148 #define USR2_TXDC (1<<3) /* Transmitter complete */ 149 #define USR2_BRCD (1<<2) /* Break condition */ 150 #define USR2_ORE (1<<1) /* Overrun error */ 151 #define USR2_RDR (1<<0) /* Recv data ready */ 152 #define UTS_FRCPERR (1<<13) /* Force parity error */ 153 #define UTS_LOOP (1<<12) /* Loop tx and rx */ 154 #define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ 155 #define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ 156 #define UTS_TXFULL (1<<4) /* TxFIFO full */ 157 #define UTS_RXFULL (1<<3) /* RxFIFO full */ 158 #define UTS_SOFTRST (1<<0) /* Software reset */ 159 160 /* We've been assigned a range on the "Low-density serial ports" major */ 161 #define SERIAL_IMX_MAJOR 207 162 #define MINOR_START 16 163 #define DEV_NAME "ttymxc" 164 165 /* 166 * This determines how often we check the modem status signals 167 * for any change. They generally aren't connected to an IRQ 168 * so we have to poll them. We also check immediately before 169 * filling the TX fifo incase CTS has been dropped. 170 */ 171 #define MCTRL_TIMEOUT (250*HZ/1000) 172 173 #define DRIVER_NAME "IMX-uart" 174 175 #define UART_NR 8 176 177 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ 178 enum imx_uart_type { 179 IMX1_UART, 180 IMX21_UART, 181 IMX53_UART, 182 IMX6Q_UART, 183 }; 184 185 /* device type dependent stuff */ 186 struct imx_uart_data { 187 unsigned uts_reg; 188 enum imx_uart_type devtype; 189 }; 190 191 enum imx_tx_state { 192 OFF, 193 WAIT_AFTER_RTS, 194 SEND, 195 WAIT_AFTER_SEND, 196 }; 197 198 struct imx_port { 199 struct uart_port port; 200 struct timer_list timer; 201 unsigned int old_status; 202 unsigned int have_rtscts:1; 203 unsigned int have_rtsgpio:1; 204 unsigned int dte_mode:1; 205 unsigned int inverted_tx:1; 206 unsigned int inverted_rx:1; 207 struct clk *clk_ipg; 208 struct clk *clk_per; 209 const struct imx_uart_data *devdata; 210 211 struct mctrl_gpios *gpios; 212 213 /* shadow registers */ 214 unsigned int ucr1; 215 unsigned int ucr2; 216 unsigned int ucr3; 217 unsigned int ucr4; 218 unsigned int ufcr; 219 220 /* DMA fields */ 221 unsigned int dma_is_enabled:1; 222 unsigned int dma_is_rxing:1; 223 unsigned int dma_is_txing:1; 224 struct dma_chan *dma_chan_rx, *dma_chan_tx; 225 struct scatterlist rx_sgl, tx_sgl[2]; 226 void *rx_buf; 227 struct circ_buf rx_ring; 228 unsigned int rx_buf_size; 229 unsigned int rx_period_length; 230 unsigned int rx_periods; 231 dma_cookie_t rx_cookie; 232 unsigned int tx_bytes; 233 unsigned int dma_tx_nents; 234 unsigned int saved_reg[10]; 235 bool context_saved; 236 237 enum imx_tx_state tx_state; 238 struct hrtimer trigger_start_tx; 239 struct hrtimer trigger_stop_tx; 240 }; 241 242 struct imx_port_ucrs { 243 unsigned int ucr1; 244 unsigned int ucr2; 245 unsigned int ucr3; 246 }; 247 248 static struct imx_uart_data imx_uart_devdata[] = { 249 [IMX1_UART] = { 250 .uts_reg = IMX1_UTS, 251 .devtype = IMX1_UART, 252 }, 253 [IMX21_UART] = { 254 .uts_reg = IMX21_UTS, 255 .devtype = IMX21_UART, 256 }, 257 [IMX53_UART] = { 258 .uts_reg = IMX21_UTS, 259 .devtype = IMX53_UART, 260 }, 261 [IMX6Q_UART] = { 262 .uts_reg = IMX21_UTS, 263 .devtype = IMX6Q_UART, 264 }, 265 }; 266 267 static const struct of_device_id imx_uart_dt_ids[] = { 268 { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, 269 { .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], }, 270 { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, 271 { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, 272 { /* sentinel */ } 273 }; 274 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids); 275 276 static void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset) 277 { 278 switch (offset) { 279 case UCR1: 280 sport->ucr1 = val; 281 break; 282 case UCR2: 283 sport->ucr2 = val; 284 break; 285 case UCR3: 286 sport->ucr3 = val; 287 break; 288 case UCR4: 289 sport->ucr4 = val; 290 break; 291 case UFCR: 292 sport->ufcr = val; 293 break; 294 default: 295 break; 296 } 297 writel(val, sport->port.membase + offset); 298 } 299 300 static u32 imx_uart_readl(struct imx_port *sport, u32 offset) 301 { 302 switch (offset) { 303 case UCR1: 304 return sport->ucr1; 305 break; 306 case UCR2: 307 /* 308 * UCR2_SRST is the only bit in the cached registers that might 309 * differ from the value that was last written. As it only 310 * automatically becomes one after being cleared, reread 311 * conditionally. 312 */ 313 if (!(sport->ucr2 & UCR2_SRST)) 314 sport->ucr2 = readl(sport->port.membase + offset); 315 return sport->ucr2; 316 break; 317 case UCR3: 318 return sport->ucr3; 319 break; 320 case UCR4: 321 return sport->ucr4; 322 break; 323 case UFCR: 324 return sport->ufcr; 325 break; 326 default: 327 return readl(sport->port.membase + offset); 328 } 329 } 330 331 static inline unsigned imx_uart_uts_reg(struct imx_port *sport) 332 { 333 return sport->devdata->uts_reg; 334 } 335 336 static inline int imx_uart_is_imx1(struct imx_port *sport) 337 { 338 return sport->devdata->devtype == IMX1_UART; 339 } 340 341 static inline int imx_uart_is_imx21(struct imx_port *sport) 342 { 343 return sport->devdata->devtype == IMX21_UART; 344 } 345 346 static inline int imx_uart_is_imx53(struct imx_port *sport) 347 { 348 return sport->devdata->devtype == IMX53_UART; 349 } 350 351 static inline int imx_uart_is_imx6q(struct imx_port *sport) 352 { 353 return sport->devdata->devtype == IMX6Q_UART; 354 } 355 /* 356 * Save and restore functions for UCR1, UCR2 and UCR3 registers 357 */ 358 #if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) 359 static void imx_uart_ucrs_save(struct imx_port *sport, 360 struct imx_port_ucrs *ucr) 361 { 362 /* save control registers */ 363 ucr->ucr1 = imx_uart_readl(sport, UCR1); 364 ucr->ucr2 = imx_uart_readl(sport, UCR2); 365 ucr->ucr3 = imx_uart_readl(sport, UCR3); 366 } 367 368 static void imx_uart_ucrs_restore(struct imx_port *sport, 369 struct imx_port_ucrs *ucr) 370 { 371 /* restore control registers */ 372 imx_uart_writel(sport, ucr->ucr1, UCR1); 373 imx_uart_writel(sport, ucr->ucr2, UCR2); 374 imx_uart_writel(sport, ucr->ucr3, UCR3); 375 } 376 #endif 377 378 /* called with port.lock taken and irqs caller dependent */ 379 static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) 380 { 381 *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); 382 383 sport->port.mctrl |= TIOCM_RTS; 384 mctrl_gpio_set(sport->gpios, sport->port.mctrl); 385 } 386 387 /* called with port.lock taken and irqs caller dependent */ 388 static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) 389 { 390 *ucr2 &= ~UCR2_CTSC; 391 *ucr2 |= UCR2_CTS; 392 393 sport->port.mctrl &= ~TIOCM_RTS; 394 mctrl_gpio_set(sport->gpios, sport->port.mctrl); 395 } 396 397 static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) 398 { 399 hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL); 400 } 401 402 /* called with port.lock taken and irqs off */ 403 static void imx_uart_start_rx(struct uart_port *port) 404 { 405 struct imx_port *sport = (struct imx_port *)port; 406 unsigned int ucr1, ucr2; 407 408 ucr1 = imx_uart_readl(sport, UCR1); 409 ucr2 = imx_uart_readl(sport, UCR2); 410 411 ucr2 |= UCR2_RXEN; 412 413 if (sport->dma_is_enabled) { 414 ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN; 415 } else { 416 ucr1 |= UCR1_RRDYEN; 417 ucr2 |= UCR2_ATEN; 418 } 419 420 /* Write UCR2 first as it includes RXEN */ 421 imx_uart_writel(sport, ucr2, UCR2); 422 imx_uart_writel(sport, ucr1, UCR1); 423 } 424 425 /* called with port.lock taken and irqs off */ 426 static void imx_uart_stop_tx(struct uart_port *port) 427 { 428 struct imx_port *sport = (struct imx_port *)port; 429 u32 ucr1, ucr4, usr2; 430 431 if (sport->tx_state == OFF) 432 return; 433 434 /* 435 * We are maybe in the SMP context, so if the DMA TX thread is running 436 * on other cpu, we have to wait for it to finish. 437 */ 438 if (sport->dma_is_txing) 439 return; 440 441 ucr1 = imx_uart_readl(sport, UCR1); 442 imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1); 443 444 usr2 = imx_uart_readl(sport, USR2); 445 if (!(usr2 & USR2_TXDC)) { 446 /* The shifter is still busy, so retry once TC triggers */ 447 return; 448 } 449 450 ucr4 = imx_uart_readl(sport, UCR4); 451 ucr4 &= ~UCR4_TCEN; 452 imx_uart_writel(sport, ucr4, UCR4); 453 454 /* in rs485 mode disable transmitter */ 455 if (port->rs485.flags & SER_RS485_ENABLED) { 456 if (sport->tx_state == SEND) { 457 sport->tx_state = WAIT_AFTER_SEND; 458 start_hrtimer_ms(&sport->trigger_stop_tx, 459 port->rs485.delay_rts_after_send); 460 return; 461 } 462 463 if (sport->tx_state == WAIT_AFTER_RTS || 464 sport->tx_state == WAIT_AFTER_SEND) { 465 u32 ucr2; 466 467 hrtimer_try_to_cancel(&sport->trigger_start_tx); 468 469 ucr2 = imx_uart_readl(sport, UCR2); 470 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 471 imx_uart_rts_active(sport, &ucr2); 472 else 473 imx_uart_rts_inactive(sport, &ucr2); 474 imx_uart_writel(sport, ucr2, UCR2); 475 476 imx_uart_start_rx(port); 477 478 sport->tx_state = OFF; 479 } 480 } else { 481 sport->tx_state = OFF; 482 } 483 } 484 485 /* called with port.lock taken and irqs off */ 486 static void imx_uart_stop_rx(struct uart_port *port) 487 { 488 struct imx_port *sport = (struct imx_port *)port; 489 u32 ucr1, ucr2, ucr4; 490 491 ucr1 = imx_uart_readl(sport, UCR1); 492 ucr2 = imx_uart_readl(sport, UCR2); 493 ucr4 = imx_uart_readl(sport, UCR4); 494 495 if (sport->dma_is_enabled) { 496 ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN); 497 } else { 498 ucr1 &= ~UCR1_RRDYEN; 499 ucr2 &= ~UCR2_ATEN; 500 ucr4 &= ~UCR4_OREN; 501 } 502 imx_uart_writel(sport, ucr1, UCR1); 503 imx_uart_writel(sport, ucr4, UCR4); 504 505 ucr2 &= ~UCR2_RXEN; 506 imx_uart_writel(sport, ucr2, UCR2); 507 } 508 509 /* called with port.lock taken and irqs off */ 510 static void imx_uart_enable_ms(struct uart_port *port) 511 { 512 struct imx_port *sport = (struct imx_port *)port; 513 514 mod_timer(&sport->timer, jiffies); 515 516 mctrl_gpio_enable_ms(sport->gpios); 517 } 518 519 static void imx_uart_dma_tx(struct imx_port *sport); 520 521 /* called with port.lock taken and irqs off */ 522 static inline void imx_uart_transmit_buffer(struct imx_port *sport) 523 { 524 struct circ_buf *xmit = &sport->port.state->xmit; 525 526 if (sport->port.x_char) { 527 /* Send next char */ 528 imx_uart_writel(sport, sport->port.x_char, URTX0); 529 sport->port.icount.tx++; 530 sport->port.x_char = 0; 531 return; 532 } 533 534 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { 535 imx_uart_stop_tx(&sport->port); 536 return; 537 } 538 539 if (sport->dma_is_enabled) { 540 u32 ucr1; 541 /* 542 * We've just sent a X-char Ensure the TX DMA is enabled 543 * and the TX IRQ is disabled. 544 **/ 545 ucr1 = imx_uart_readl(sport, UCR1); 546 ucr1 &= ~UCR1_TRDYEN; 547 if (sport->dma_is_txing) { 548 ucr1 |= UCR1_TXDMAEN; 549 imx_uart_writel(sport, ucr1, UCR1); 550 } else { 551 imx_uart_writel(sport, ucr1, UCR1); 552 imx_uart_dma_tx(sport); 553 } 554 555 return; 556 } 557 558 while (!uart_circ_empty(xmit) && 559 !(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) { 560 /* send xmit->buf[xmit->tail] 561 * out the port here */ 562 imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0); 563 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 564 sport->port.icount.tx++; 565 } 566 567 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 568 uart_write_wakeup(&sport->port); 569 570 if (uart_circ_empty(xmit)) 571 imx_uart_stop_tx(&sport->port); 572 } 573 574 static void imx_uart_dma_tx_callback(void *data) 575 { 576 struct imx_port *sport = data; 577 struct scatterlist *sgl = &sport->tx_sgl[0]; 578 struct circ_buf *xmit = &sport->port.state->xmit; 579 unsigned long flags; 580 u32 ucr1; 581 582 spin_lock_irqsave(&sport->port.lock, flags); 583 584 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); 585 586 ucr1 = imx_uart_readl(sport, UCR1); 587 ucr1 &= ~UCR1_TXDMAEN; 588 imx_uart_writel(sport, ucr1, UCR1); 589 590 /* update the stat */ 591 xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); 592 sport->port.icount.tx += sport->tx_bytes; 593 594 dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); 595 596 sport->dma_is_txing = 0; 597 598 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 599 uart_write_wakeup(&sport->port); 600 601 if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) 602 imx_uart_dma_tx(sport); 603 else if (sport->port.rs485.flags & SER_RS485_ENABLED) { 604 u32 ucr4 = imx_uart_readl(sport, UCR4); 605 ucr4 |= UCR4_TCEN; 606 imx_uart_writel(sport, ucr4, UCR4); 607 } 608 609 spin_unlock_irqrestore(&sport->port.lock, flags); 610 } 611 612 /* called with port.lock taken and irqs off */ 613 static void imx_uart_dma_tx(struct imx_port *sport) 614 { 615 struct circ_buf *xmit = &sport->port.state->xmit; 616 struct scatterlist *sgl = sport->tx_sgl; 617 struct dma_async_tx_descriptor *desc; 618 struct dma_chan *chan = sport->dma_chan_tx; 619 struct device *dev = sport->port.dev; 620 u32 ucr1, ucr4; 621 int ret; 622 623 if (sport->dma_is_txing) 624 return; 625 626 ucr4 = imx_uart_readl(sport, UCR4); 627 ucr4 &= ~UCR4_TCEN; 628 imx_uart_writel(sport, ucr4, UCR4); 629 630 sport->tx_bytes = uart_circ_chars_pending(xmit); 631 632 if (xmit->tail < xmit->head || xmit->head == 0) { 633 sport->dma_tx_nents = 1; 634 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); 635 } else { 636 sport->dma_tx_nents = 2; 637 sg_init_table(sgl, 2); 638 sg_set_buf(sgl, xmit->buf + xmit->tail, 639 UART_XMIT_SIZE - xmit->tail); 640 sg_set_buf(sgl + 1, xmit->buf, xmit->head); 641 } 642 643 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); 644 if (ret == 0) { 645 dev_err(dev, "DMA mapping error for TX.\n"); 646 return; 647 } 648 desc = dmaengine_prep_slave_sg(chan, sgl, ret, 649 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 650 if (!desc) { 651 dma_unmap_sg(dev, sgl, sport->dma_tx_nents, 652 DMA_TO_DEVICE); 653 dev_err(dev, "We cannot prepare for the TX slave dma!\n"); 654 return; 655 } 656 desc->callback = imx_uart_dma_tx_callback; 657 desc->callback_param = sport; 658 659 dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", 660 uart_circ_chars_pending(xmit)); 661 662 ucr1 = imx_uart_readl(sport, UCR1); 663 ucr1 |= UCR1_TXDMAEN; 664 imx_uart_writel(sport, ucr1, UCR1); 665 666 /* fire it */ 667 sport->dma_is_txing = 1; 668 dmaengine_submit(desc); 669 dma_async_issue_pending(chan); 670 return; 671 } 672 673 /* called with port.lock taken and irqs off */ 674 static void imx_uart_start_tx(struct uart_port *port) 675 { 676 struct imx_port *sport = (struct imx_port *)port; 677 u32 ucr1; 678 679 if (!sport->port.x_char && uart_circ_empty(&port->state->xmit)) 680 return; 681 682 /* 683 * We cannot simply do nothing here if sport->tx_state == SEND already 684 * because UCR1_TXMPTYEN might already have been cleared in 685 * imx_uart_stop_tx(), but tx_state is still SEND. 686 */ 687 688 if (port->rs485.flags & SER_RS485_ENABLED) { 689 if (sport->tx_state == OFF) { 690 u32 ucr2 = imx_uart_readl(sport, UCR2); 691 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) 692 imx_uart_rts_active(sport, &ucr2); 693 else 694 imx_uart_rts_inactive(sport, &ucr2); 695 imx_uart_writel(sport, ucr2, UCR2); 696 697 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) 698 imx_uart_stop_rx(port); 699 700 sport->tx_state = WAIT_AFTER_RTS; 701 start_hrtimer_ms(&sport->trigger_start_tx, 702 port->rs485.delay_rts_before_send); 703 return; 704 } 705 706 if (sport->tx_state == WAIT_AFTER_SEND 707 || sport->tx_state == WAIT_AFTER_RTS) { 708 709 hrtimer_try_to_cancel(&sport->trigger_stop_tx); 710 711 /* 712 * Enable transmitter and shifter empty irq only if DMA 713 * is off. In the DMA case this is done in the 714 * tx-callback. 715 */ 716 if (!sport->dma_is_enabled) { 717 u32 ucr4 = imx_uart_readl(sport, UCR4); 718 ucr4 |= UCR4_TCEN; 719 imx_uart_writel(sport, ucr4, UCR4); 720 } 721 722 sport->tx_state = SEND; 723 } 724 } else { 725 sport->tx_state = SEND; 726 } 727 728 if (!sport->dma_is_enabled) { 729 ucr1 = imx_uart_readl(sport, UCR1); 730 imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1); 731 } 732 733 if (sport->dma_is_enabled) { 734 if (sport->port.x_char) { 735 /* We have X-char to send, so enable TX IRQ and 736 * disable TX DMA to let TX interrupt to send X-char */ 737 ucr1 = imx_uart_readl(sport, UCR1); 738 ucr1 &= ~UCR1_TXDMAEN; 739 ucr1 |= UCR1_TRDYEN; 740 imx_uart_writel(sport, ucr1, UCR1); 741 return; 742 } 743 744 if (!uart_circ_empty(&port->state->xmit) && 745 !uart_tx_stopped(port)) 746 imx_uart_dma_tx(sport); 747 return; 748 } 749 } 750 751 static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id) 752 { 753 struct imx_port *sport = dev_id; 754 u32 usr1; 755 756 imx_uart_writel(sport, USR1_RTSD, USR1); 757 usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS; 758 uart_handle_cts_change(&sport->port, !!usr1); 759 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 760 761 return IRQ_HANDLED; 762 } 763 764 static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) 765 { 766 struct imx_port *sport = dev_id; 767 irqreturn_t ret; 768 769 spin_lock(&sport->port.lock); 770 771 ret = __imx_uart_rtsint(irq, dev_id); 772 773 spin_unlock(&sport->port.lock); 774 775 return ret; 776 } 777 778 static irqreturn_t imx_uart_txint(int irq, void *dev_id) 779 { 780 struct imx_port *sport = dev_id; 781 782 spin_lock(&sport->port.lock); 783 imx_uart_transmit_buffer(sport); 784 spin_unlock(&sport->port.lock); 785 return IRQ_HANDLED; 786 } 787 788 static irqreturn_t __imx_uart_rxint(int irq, void *dev_id) 789 { 790 struct imx_port *sport = dev_id; 791 unsigned int rx, flg, ignored = 0; 792 struct tty_port *port = &sport->port.state->port; 793 794 while (imx_uart_readl(sport, USR2) & USR2_RDR) { 795 u32 usr2; 796 797 flg = TTY_NORMAL; 798 sport->port.icount.rx++; 799 800 rx = imx_uart_readl(sport, URXD0); 801 802 usr2 = imx_uart_readl(sport, USR2); 803 if (usr2 & USR2_BRCD) { 804 imx_uart_writel(sport, USR2_BRCD, USR2); 805 if (uart_handle_break(&sport->port)) 806 continue; 807 } 808 809 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) 810 continue; 811 812 if (unlikely(rx & URXD_ERR)) { 813 if (rx & URXD_BRK) 814 sport->port.icount.brk++; 815 else if (rx & URXD_PRERR) 816 sport->port.icount.parity++; 817 else if (rx & URXD_FRMERR) 818 sport->port.icount.frame++; 819 if (rx & URXD_OVRRUN) 820 sport->port.icount.overrun++; 821 822 if (rx & sport->port.ignore_status_mask) { 823 if (++ignored > 100) 824 goto out; 825 continue; 826 } 827 828 rx &= (sport->port.read_status_mask | 0xFF); 829 830 if (rx & URXD_BRK) 831 flg = TTY_BREAK; 832 else if (rx & URXD_PRERR) 833 flg = TTY_PARITY; 834 else if (rx & URXD_FRMERR) 835 flg = TTY_FRAME; 836 if (rx & URXD_OVRRUN) 837 flg = TTY_OVERRUN; 838 839 sport->port.sysrq = 0; 840 } 841 842 if (sport->port.ignore_status_mask & URXD_DUMMY_READ) 843 goto out; 844 845 if (tty_insert_flip_char(port, rx, flg) == 0) 846 sport->port.icount.buf_overrun++; 847 } 848 849 out: 850 tty_flip_buffer_push(port); 851 852 return IRQ_HANDLED; 853 } 854 855 static irqreturn_t imx_uart_rxint(int irq, void *dev_id) 856 { 857 struct imx_port *sport = dev_id; 858 irqreturn_t ret; 859 860 spin_lock(&sport->port.lock); 861 862 ret = __imx_uart_rxint(irq, dev_id); 863 864 spin_unlock(&sport->port.lock); 865 866 return ret; 867 } 868 869 static void imx_uart_clear_rx_errors(struct imx_port *sport); 870 871 /* 872 * We have a modem side uart, so the meanings of RTS and CTS are inverted. 873 */ 874 static unsigned int imx_uart_get_hwmctrl(struct imx_port *sport) 875 { 876 unsigned int tmp = TIOCM_DSR; 877 unsigned usr1 = imx_uart_readl(sport, USR1); 878 unsigned usr2 = imx_uart_readl(sport, USR2); 879 880 if (usr1 & USR1_RTSS) 881 tmp |= TIOCM_CTS; 882 883 /* in DCE mode DCDIN is always 0 */ 884 if (!(usr2 & USR2_DCDIN)) 885 tmp |= TIOCM_CAR; 886 887 if (sport->dte_mode) 888 if (!(imx_uart_readl(sport, USR2) & USR2_RIIN)) 889 tmp |= TIOCM_RI; 890 891 return tmp; 892 } 893 894 /* 895 * Handle any change of modem status signal since we were last called. 896 */ 897 static void imx_uart_mctrl_check(struct imx_port *sport) 898 { 899 unsigned int status, changed; 900 901 status = imx_uart_get_hwmctrl(sport); 902 changed = status ^ sport->old_status; 903 904 if (changed == 0) 905 return; 906 907 sport->old_status = status; 908 909 if (changed & TIOCM_RI && status & TIOCM_RI) 910 sport->port.icount.rng++; 911 if (changed & TIOCM_DSR) 912 sport->port.icount.dsr++; 913 if (changed & TIOCM_CAR) 914 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); 915 if (changed & TIOCM_CTS) 916 uart_handle_cts_change(&sport->port, status & TIOCM_CTS); 917 918 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 919 } 920 921 static irqreturn_t imx_uart_int(int irq, void *dev_id) 922 { 923 struct imx_port *sport = dev_id; 924 unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4; 925 irqreturn_t ret = IRQ_NONE; 926 927 spin_lock(&sport->port.lock); 928 929 usr1 = imx_uart_readl(sport, USR1); 930 usr2 = imx_uart_readl(sport, USR2); 931 ucr1 = imx_uart_readl(sport, UCR1); 932 ucr2 = imx_uart_readl(sport, UCR2); 933 ucr3 = imx_uart_readl(sport, UCR3); 934 ucr4 = imx_uart_readl(sport, UCR4); 935 936 /* 937 * Even if a condition is true that can trigger an irq only handle it if 938 * the respective irq source is enabled. This prevents some undesired 939 * actions, for example if a character that sits in the RX FIFO and that 940 * should be fetched via DMA is tried to be fetched using PIO. Or the 941 * receiver is currently off and so reading from URXD0 results in an 942 * exception. So just mask the (raw) status bits for disabled irqs. 943 */ 944 if ((ucr1 & UCR1_RRDYEN) == 0) 945 usr1 &= ~USR1_RRDY; 946 if ((ucr2 & UCR2_ATEN) == 0) 947 usr1 &= ~USR1_AGTIM; 948 if ((ucr1 & UCR1_TRDYEN) == 0) 949 usr1 &= ~USR1_TRDY; 950 if ((ucr4 & UCR4_TCEN) == 0) 951 usr2 &= ~USR2_TXDC; 952 if ((ucr3 & UCR3_DTRDEN) == 0) 953 usr1 &= ~USR1_DTRD; 954 if ((ucr1 & UCR1_RTSDEN) == 0) 955 usr1 &= ~USR1_RTSD; 956 if ((ucr3 & UCR3_AWAKEN) == 0) 957 usr1 &= ~USR1_AWAKE; 958 if ((ucr4 & UCR4_OREN) == 0) 959 usr2 &= ~USR2_ORE; 960 961 if (usr1 & (USR1_RRDY | USR1_AGTIM)) { 962 imx_uart_writel(sport, USR1_AGTIM, USR1); 963 964 __imx_uart_rxint(irq, dev_id); 965 ret = IRQ_HANDLED; 966 } 967 968 if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) { 969 imx_uart_transmit_buffer(sport); 970 ret = IRQ_HANDLED; 971 } 972 973 if (usr1 & USR1_DTRD) { 974 imx_uart_writel(sport, USR1_DTRD, USR1); 975 976 imx_uart_mctrl_check(sport); 977 978 ret = IRQ_HANDLED; 979 } 980 981 if (usr1 & USR1_RTSD) { 982 __imx_uart_rtsint(irq, dev_id); 983 ret = IRQ_HANDLED; 984 } 985 986 if (usr1 & USR1_AWAKE) { 987 imx_uart_writel(sport, USR1_AWAKE, USR1); 988 ret = IRQ_HANDLED; 989 } 990 991 if (usr2 & USR2_ORE) { 992 sport->port.icount.overrun++; 993 imx_uart_writel(sport, USR2_ORE, USR2); 994 ret = IRQ_HANDLED; 995 } 996 997 spin_unlock(&sport->port.lock); 998 999 return ret; 1000 } 1001 1002 /* 1003 * Return TIOCSER_TEMT when transmitter is not busy. 1004 */ 1005 static unsigned int imx_uart_tx_empty(struct uart_port *port) 1006 { 1007 struct imx_port *sport = (struct imx_port *)port; 1008 unsigned int ret; 1009 1010 ret = (imx_uart_readl(sport, USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; 1011 1012 /* If the TX DMA is working, return 0. */ 1013 if (sport->dma_is_txing) 1014 ret = 0; 1015 1016 return ret; 1017 } 1018 1019 /* called with port.lock taken and irqs off */ 1020 static unsigned int imx_uart_get_mctrl(struct uart_port *port) 1021 { 1022 struct imx_port *sport = (struct imx_port *)port; 1023 unsigned int ret = imx_uart_get_hwmctrl(sport); 1024 1025 mctrl_gpio_get(sport->gpios, &ret); 1026 1027 return ret; 1028 } 1029 1030 /* called with port.lock taken and irqs off */ 1031 static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 1032 { 1033 struct imx_port *sport = (struct imx_port *)port; 1034 u32 ucr3, uts; 1035 1036 if (!(port->rs485.flags & SER_RS485_ENABLED)) { 1037 u32 ucr2; 1038 1039 /* 1040 * Turn off autoRTS if RTS is lowered and restore autoRTS 1041 * setting if RTS is raised. 1042 */ 1043 ucr2 = imx_uart_readl(sport, UCR2); 1044 ucr2 &= ~(UCR2_CTS | UCR2_CTSC); 1045 if (mctrl & TIOCM_RTS) { 1046 ucr2 |= UCR2_CTS; 1047 /* 1048 * UCR2_IRTS is unset if and only if the port is 1049 * configured for CRTSCTS, so we use inverted UCR2_IRTS 1050 * to get the state to restore to. 1051 */ 1052 if (!(ucr2 & UCR2_IRTS)) 1053 ucr2 |= UCR2_CTSC; 1054 } 1055 imx_uart_writel(sport, ucr2, UCR2); 1056 } 1057 1058 ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_DSR; 1059 if (!(mctrl & TIOCM_DTR)) 1060 ucr3 |= UCR3_DSR; 1061 imx_uart_writel(sport, ucr3, UCR3); 1062 1063 uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)) & ~UTS_LOOP; 1064 if (mctrl & TIOCM_LOOP) 1065 uts |= UTS_LOOP; 1066 imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); 1067 1068 mctrl_gpio_set(sport->gpios, mctrl); 1069 } 1070 1071 /* 1072 * Interrupts always disabled. 1073 */ 1074 static void imx_uart_break_ctl(struct uart_port *port, int break_state) 1075 { 1076 struct imx_port *sport = (struct imx_port *)port; 1077 unsigned long flags; 1078 u32 ucr1; 1079 1080 spin_lock_irqsave(&sport->port.lock, flags); 1081 1082 ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK; 1083 1084 if (break_state != 0) 1085 ucr1 |= UCR1_SNDBRK; 1086 1087 imx_uart_writel(sport, ucr1, UCR1); 1088 1089 spin_unlock_irqrestore(&sport->port.lock, flags); 1090 } 1091 1092 /* 1093 * This is our per-port timeout handler, for checking the 1094 * modem status signals. 1095 */ 1096 static void imx_uart_timeout(struct timer_list *t) 1097 { 1098 struct imx_port *sport = from_timer(sport, t, timer); 1099 unsigned long flags; 1100 1101 if (sport->port.state) { 1102 spin_lock_irqsave(&sport->port.lock, flags); 1103 imx_uart_mctrl_check(sport); 1104 spin_unlock_irqrestore(&sport->port.lock, flags); 1105 1106 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); 1107 } 1108 } 1109 1110 /* 1111 * There are two kinds of RX DMA interrupts(such as in the MX6Q): 1112 * [1] the RX DMA buffer is full. 1113 * [2] the aging timer expires 1114 * 1115 * Condition [2] is triggered when a character has been sitting in the FIFO 1116 * for at least 8 byte durations. 1117 */ 1118 static void imx_uart_dma_rx_callback(void *data) 1119 { 1120 struct imx_port *sport = data; 1121 struct dma_chan *chan = sport->dma_chan_rx; 1122 struct scatterlist *sgl = &sport->rx_sgl; 1123 struct tty_port *port = &sport->port.state->port; 1124 struct dma_tx_state state; 1125 struct circ_buf *rx_ring = &sport->rx_ring; 1126 enum dma_status status; 1127 unsigned int w_bytes = 0; 1128 unsigned int r_bytes; 1129 unsigned int bd_size; 1130 1131 status = dmaengine_tx_status(chan, sport->rx_cookie, &state); 1132 1133 if (status == DMA_ERROR) { 1134 imx_uart_clear_rx_errors(sport); 1135 return; 1136 } 1137 1138 if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) { 1139 1140 /* 1141 * The state-residue variable represents the empty space 1142 * relative to the entire buffer. Taking this in consideration 1143 * the head is always calculated base on the buffer total 1144 * length - DMA transaction residue. The UART script from the 1145 * SDMA firmware will jump to the next buffer descriptor, 1146 * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4). 1147 * Taking this in consideration the tail is always at the 1148 * beginning of the buffer descriptor that contains the head. 1149 */ 1150 1151 /* Calculate the head */ 1152 rx_ring->head = sg_dma_len(sgl) - state.residue; 1153 1154 /* Calculate the tail. */ 1155 bd_size = sg_dma_len(sgl) / sport->rx_periods; 1156 rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size; 1157 1158 if (rx_ring->head <= sg_dma_len(sgl) && 1159 rx_ring->head > rx_ring->tail) { 1160 1161 /* Move data from tail to head */ 1162 r_bytes = rx_ring->head - rx_ring->tail; 1163 1164 /* CPU claims ownership of RX DMA buffer */ 1165 dma_sync_sg_for_cpu(sport->port.dev, sgl, 1, 1166 DMA_FROM_DEVICE); 1167 1168 w_bytes = tty_insert_flip_string(port, 1169 sport->rx_buf + rx_ring->tail, r_bytes); 1170 1171 /* UART retrieves ownership of RX DMA buffer */ 1172 dma_sync_sg_for_device(sport->port.dev, sgl, 1, 1173 DMA_FROM_DEVICE); 1174 1175 if (w_bytes != r_bytes) 1176 sport->port.icount.buf_overrun++; 1177 1178 sport->port.icount.rx += w_bytes; 1179 } else { 1180 WARN_ON(rx_ring->head > sg_dma_len(sgl)); 1181 WARN_ON(rx_ring->head <= rx_ring->tail); 1182 } 1183 } 1184 1185 if (w_bytes) { 1186 tty_flip_buffer_push(port); 1187 dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes); 1188 } 1189 } 1190 1191 static int imx_uart_start_rx_dma(struct imx_port *sport) 1192 { 1193 struct scatterlist *sgl = &sport->rx_sgl; 1194 struct dma_chan *chan = sport->dma_chan_rx; 1195 struct device *dev = sport->port.dev; 1196 struct dma_async_tx_descriptor *desc; 1197 int ret; 1198 1199 sport->rx_ring.head = 0; 1200 sport->rx_ring.tail = 0; 1201 1202 sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size); 1203 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); 1204 if (ret == 0) { 1205 dev_err(dev, "DMA mapping error for RX.\n"); 1206 return -EINVAL; 1207 } 1208 1209 desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl), 1210 sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods, 1211 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 1212 1213 if (!desc) { 1214 dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE); 1215 dev_err(dev, "We cannot prepare for the RX slave dma!\n"); 1216 return -EINVAL; 1217 } 1218 desc->callback = imx_uart_dma_rx_callback; 1219 desc->callback_param = sport; 1220 1221 dev_dbg(dev, "RX: prepare for the DMA.\n"); 1222 sport->dma_is_rxing = 1; 1223 sport->rx_cookie = dmaengine_submit(desc); 1224 dma_async_issue_pending(chan); 1225 return 0; 1226 } 1227 1228 static void imx_uart_clear_rx_errors(struct imx_port *sport) 1229 { 1230 struct tty_port *port = &sport->port.state->port; 1231 u32 usr1, usr2; 1232 1233 usr1 = imx_uart_readl(sport, USR1); 1234 usr2 = imx_uart_readl(sport, USR2); 1235 1236 if (usr2 & USR2_BRCD) { 1237 sport->port.icount.brk++; 1238 imx_uart_writel(sport, USR2_BRCD, USR2); 1239 uart_handle_break(&sport->port); 1240 if (tty_insert_flip_char(port, 0, TTY_BREAK) == 0) 1241 sport->port.icount.buf_overrun++; 1242 tty_flip_buffer_push(port); 1243 } else { 1244 if (usr1 & USR1_FRAMERR) { 1245 sport->port.icount.frame++; 1246 imx_uart_writel(sport, USR1_FRAMERR, USR1); 1247 } else if (usr1 & USR1_PARITYERR) { 1248 sport->port.icount.parity++; 1249 imx_uart_writel(sport, USR1_PARITYERR, USR1); 1250 } 1251 } 1252 1253 if (usr2 & USR2_ORE) { 1254 sport->port.icount.overrun++; 1255 imx_uart_writel(sport, USR2_ORE, USR2); 1256 } 1257 1258 } 1259 1260 #define TXTL_DEFAULT 2 /* reset default */ 1261 #define RXTL_DEFAULT 1 /* reset default */ 1262 #define TXTL_DMA 8 /* DMA burst setting */ 1263 #define RXTL_DMA 9 /* DMA burst setting */ 1264 1265 static void imx_uart_setup_ufcr(struct imx_port *sport, 1266 unsigned char txwl, unsigned char rxwl) 1267 { 1268 unsigned int val; 1269 1270 /* set receiver / transmitter trigger level */ 1271 val = imx_uart_readl(sport, UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); 1272 val |= txwl << UFCR_TXTL_SHF | rxwl; 1273 imx_uart_writel(sport, val, UFCR); 1274 } 1275 1276 static void imx_uart_dma_exit(struct imx_port *sport) 1277 { 1278 if (sport->dma_chan_rx) { 1279 dmaengine_terminate_sync(sport->dma_chan_rx); 1280 dma_release_channel(sport->dma_chan_rx); 1281 sport->dma_chan_rx = NULL; 1282 sport->rx_cookie = -EINVAL; 1283 kfree(sport->rx_buf); 1284 sport->rx_buf = NULL; 1285 } 1286 1287 if (sport->dma_chan_tx) { 1288 dmaengine_terminate_sync(sport->dma_chan_tx); 1289 dma_release_channel(sport->dma_chan_tx); 1290 sport->dma_chan_tx = NULL; 1291 } 1292 } 1293 1294 static int imx_uart_dma_init(struct imx_port *sport) 1295 { 1296 struct dma_slave_config slave_config = {}; 1297 struct device *dev = sport->port.dev; 1298 int ret; 1299 1300 /* Prepare for RX : */ 1301 sport->dma_chan_rx = dma_request_slave_channel(dev, "rx"); 1302 if (!sport->dma_chan_rx) { 1303 dev_dbg(dev, "cannot get the DMA channel.\n"); 1304 ret = -EINVAL; 1305 goto err; 1306 } 1307 1308 slave_config.direction = DMA_DEV_TO_MEM; 1309 slave_config.src_addr = sport->port.mapbase + URXD0; 1310 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1311 /* one byte less than the watermark level to enable the aging timer */ 1312 slave_config.src_maxburst = RXTL_DMA - 1; 1313 ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config); 1314 if (ret) { 1315 dev_err(dev, "error in RX dma configuration.\n"); 1316 goto err; 1317 } 1318 1319 sport->rx_buf_size = sport->rx_period_length * sport->rx_periods; 1320 sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL); 1321 if (!sport->rx_buf) { 1322 ret = -ENOMEM; 1323 goto err; 1324 } 1325 sport->rx_ring.buf = sport->rx_buf; 1326 1327 /* Prepare for TX : */ 1328 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); 1329 if (!sport->dma_chan_tx) { 1330 dev_err(dev, "cannot get the TX DMA channel!\n"); 1331 ret = -EINVAL; 1332 goto err; 1333 } 1334 1335 slave_config.direction = DMA_MEM_TO_DEV; 1336 slave_config.dst_addr = sport->port.mapbase + URTX0; 1337 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1338 slave_config.dst_maxburst = TXTL_DMA; 1339 ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config); 1340 if (ret) { 1341 dev_err(dev, "error in TX dma configuration."); 1342 goto err; 1343 } 1344 1345 return 0; 1346 err: 1347 imx_uart_dma_exit(sport); 1348 return ret; 1349 } 1350 1351 static void imx_uart_enable_dma(struct imx_port *sport) 1352 { 1353 u32 ucr1; 1354 1355 imx_uart_setup_ufcr(sport, TXTL_DMA, RXTL_DMA); 1356 1357 /* set UCR1 */ 1358 ucr1 = imx_uart_readl(sport, UCR1); 1359 ucr1 |= UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN; 1360 imx_uart_writel(sport, ucr1, UCR1); 1361 1362 sport->dma_is_enabled = 1; 1363 } 1364 1365 static void imx_uart_disable_dma(struct imx_port *sport) 1366 { 1367 u32 ucr1; 1368 1369 /* clear UCR1 */ 1370 ucr1 = imx_uart_readl(sport, UCR1); 1371 ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN); 1372 imx_uart_writel(sport, ucr1, UCR1); 1373 1374 imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); 1375 1376 sport->dma_is_enabled = 0; 1377 } 1378 1379 /* half the RX buffer size */ 1380 #define CTSTL 16 1381 1382 static int imx_uart_startup(struct uart_port *port) 1383 { 1384 struct imx_port *sport = (struct imx_port *)port; 1385 int retval, i; 1386 unsigned long flags; 1387 int dma_is_inited = 0; 1388 u32 ucr1, ucr2, ucr3, ucr4; 1389 1390 retval = clk_prepare_enable(sport->clk_per); 1391 if (retval) 1392 return retval; 1393 retval = clk_prepare_enable(sport->clk_ipg); 1394 if (retval) { 1395 clk_disable_unprepare(sport->clk_per); 1396 return retval; 1397 } 1398 1399 imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); 1400 1401 /* disable the DREN bit (Data Ready interrupt enable) before 1402 * requesting IRQs 1403 */ 1404 ucr4 = imx_uart_readl(sport, UCR4); 1405 1406 /* set the trigger level for CTS */ 1407 ucr4 &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); 1408 ucr4 |= CTSTL << UCR4_CTSTL_SHF; 1409 1410 imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4); 1411 1412 /* Can we enable the DMA support? */ 1413 if (!uart_console(port) && imx_uart_dma_init(sport) == 0) 1414 dma_is_inited = 1; 1415 1416 spin_lock_irqsave(&sport->port.lock, flags); 1417 /* Reset fifo's and state machines */ 1418 i = 100; 1419 1420 ucr2 = imx_uart_readl(sport, UCR2); 1421 ucr2 &= ~UCR2_SRST; 1422 imx_uart_writel(sport, ucr2, UCR2); 1423 1424 while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) 1425 udelay(1); 1426 1427 /* 1428 * Finally, clear and enable interrupts 1429 */ 1430 imx_uart_writel(sport, USR1_RTSD | USR1_DTRD, USR1); 1431 imx_uart_writel(sport, USR2_ORE, USR2); 1432 1433 ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_RRDYEN; 1434 ucr1 |= UCR1_UARTEN; 1435 if (sport->have_rtscts) 1436 ucr1 |= UCR1_RTSDEN; 1437 1438 imx_uart_writel(sport, ucr1, UCR1); 1439 1440 ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); 1441 if (!sport->dma_is_enabled) 1442 ucr4 |= UCR4_OREN; 1443 if (sport->inverted_rx) 1444 ucr4 |= UCR4_INVR; 1445 imx_uart_writel(sport, ucr4, UCR4); 1446 1447 ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT; 1448 /* 1449 * configure tx polarity before enabling tx 1450 */ 1451 if (sport->inverted_tx) 1452 ucr3 |= UCR3_INVT; 1453 1454 if (!imx_uart_is_imx1(sport)) { 1455 ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD; 1456 1457 if (sport->dte_mode) 1458 /* disable broken interrupts */ 1459 ucr3 &= ~(UCR3_RI | UCR3_DCD); 1460 } 1461 imx_uart_writel(sport, ucr3, UCR3); 1462 1463 ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN; 1464 ucr2 |= (UCR2_RXEN | UCR2_TXEN); 1465 if (!sport->have_rtscts) 1466 ucr2 |= UCR2_IRTS; 1467 /* 1468 * make sure the edge sensitive RTS-irq is disabled, 1469 * we're using RTSD instead. 1470 */ 1471 if (!imx_uart_is_imx1(sport)) 1472 ucr2 &= ~UCR2_RTSEN; 1473 imx_uart_writel(sport, ucr2, UCR2); 1474 1475 /* 1476 * Enable modem status interrupts 1477 */ 1478 imx_uart_enable_ms(&sport->port); 1479 1480 if (dma_is_inited) { 1481 imx_uart_enable_dma(sport); 1482 imx_uart_start_rx_dma(sport); 1483 } else { 1484 ucr1 = imx_uart_readl(sport, UCR1); 1485 ucr1 |= UCR1_RRDYEN; 1486 imx_uart_writel(sport, ucr1, UCR1); 1487 1488 ucr2 = imx_uart_readl(sport, UCR2); 1489 ucr2 |= UCR2_ATEN; 1490 imx_uart_writel(sport, ucr2, UCR2); 1491 } 1492 1493 spin_unlock_irqrestore(&sport->port.lock, flags); 1494 1495 return 0; 1496 } 1497 1498 static void imx_uart_shutdown(struct uart_port *port) 1499 { 1500 struct imx_port *sport = (struct imx_port *)port; 1501 unsigned long flags; 1502 u32 ucr1, ucr2, ucr4; 1503 1504 if (sport->dma_is_enabled) { 1505 dmaengine_terminate_sync(sport->dma_chan_tx); 1506 if (sport->dma_is_txing) { 1507 dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0], 1508 sport->dma_tx_nents, DMA_TO_DEVICE); 1509 sport->dma_is_txing = 0; 1510 } 1511 dmaengine_terminate_sync(sport->dma_chan_rx); 1512 if (sport->dma_is_rxing) { 1513 dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1514 1, DMA_FROM_DEVICE); 1515 sport->dma_is_rxing = 0; 1516 } 1517 1518 spin_lock_irqsave(&sport->port.lock, flags); 1519 imx_uart_stop_tx(port); 1520 imx_uart_stop_rx(port); 1521 imx_uart_disable_dma(sport); 1522 spin_unlock_irqrestore(&sport->port.lock, flags); 1523 imx_uart_dma_exit(sport); 1524 } 1525 1526 mctrl_gpio_disable_ms(sport->gpios); 1527 1528 spin_lock_irqsave(&sport->port.lock, flags); 1529 ucr2 = imx_uart_readl(sport, UCR2); 1530 ucr2 &= ~(UCR2_TXEN | UCR2_ATEN); 1531 imx_uart_writel(sport, ucr2, UCR2); 1532 spin_unlock_irqrestore(&sport->port.lock, flags); 1533 1534 /* 1535 * Stop our timer. 1536 */ 1537 del_timer_sync(&sport->timer); 1538 1539 /* 1540 * Disable all interrupts, port and break condition. 1541 */ 1542 1543 spin_lock_irqsave(&sport->port.lock, flags); 1544 1545 ucr1 = imx_uart_readl(sport, UCR1); 1546 ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN); 1547 imx_uart_writel(sport, ucr1, UCR1); 1548 1549 ucr4 = imx_uart_readl(sport, UCR4); 1550 ucr4 &= ~UCR4_TCEN; 1551 imx_uart_writel(sport, ucr4, UCR4); 1552 1553 spin_unlock_irqrestore(&sport->port.lock, flags); 1554 1555 clk_disable_unprepare(sport->clk_per); 1556 clk_disable_unprepare(sport->clk_ipg); 1557 } 1558 1559 /* called with port.lock taken and irqs off */ 1560 static void imx_uart_flush_buffer(struct uart_port *port) 1561 { 1562 struct imx_port *sport = (struct imx_port *)port; 1563 struct scatterlist *sgl = &sport->tx_sgl[0]; 1564 u32 ucr2; 1565 int i = 100, ubir, ubmr, uts; 1566 1567 if (!sport->dma_chan_tx) 1568 return; 1569 1570 sport->tx_bytes = 0; 1571 dmaengine_terminate_all(sport->dma_chan_tx); 1572 if (sport->dma_is_txing) { 1573 u32 ucr1; 1574 1575 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, 1576 DMA_TO_DEVICE); 1577 ucr1 = imx_uart_readl(sport, UCR1); 1578 ucr1 &= ~UCR1_TXDMAEN; 1579 imx_uart_writel(sport, ucr1, UCR1); 1580 sport->dma_is_txing = 0; 1581 } 1582 1583 /* 1584 * According to the Reference Manual description of the UART SRST bit: 1585 * 1586 * "Reset the transmit and receive state machines, 1587 * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD 1588 * and UTS[6-3]". 1589 * 1590 * We don't need to restore the old values from USR1, USR2, URXD and 1591 * UTXD. UBRC is read only, so only save/restore the other three 1592 * registers. 1593 */ 1594 ubir = imx_uart_readl(sport, UBIR); 1595 ubmr = imx_uart_readl(sport, UBMR); 1596 uts = imx_uart_readl(sport, IMX21_UTS); 1597 1598 ucr2 = imx_uart_readl(sport, UCR2); 1599 ucr2 &= ~UCR2_SRST; 1600 imx_uart_writel(sport, ucr2, UCR2); 1601 1602 while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) 1603 udelay(1); 1604 1605 /* Restore the registers */ 1606 imx_uart_writel(sport, ubir, UBIR); 1607 imx_uart_writel(sport, ubmr, UBMR); 1608 imx_uart_writel(sport, uts, IMX21_UTS); 1609 } 1610 1611 static void 1612 imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, 1613 struct ktermios *old) 1614 { 1615 struct imx_port *sport = (struct imx_port *)port; 1616 unsigned long flags; 1617 u32 ucr2, old_ucr2, ufcr; 1618 unsigned int baud, quot; 1619 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; 1620 unsigned long div; 1621 unsigned long num, denom, old_ubir, old_ubmr; 1622 uint64_t tdiv64; 1623 1624 /* 1625 * We only support CS7 and CS8. 1626 */ 1627 while ((termios->c_cflag & CSIZE) != CS7 && 1628 (termios->c_cflag & CSIZE) != CS8) { 1629 termios->c_cflag &= ~CSIZE; 1630 termios->c_cflag |= old_csize; 1631 old_csize = CS8; 1632 } 1633 1634 del_timer_sync(&sport->timer); 1635 1636 /* 1637 * Ask the core to calculate the divisor for us. 1638 */ 1639 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); 1640 quot = uart_get_divisor(port, baud); 1641 1642 spin_lock_irqsave(&sport->port.lock, flags); 1643 1644 /* 1645 * Read current UCR2 and save it for future use, then clear all the bits 1646 * except those we will or may need to preserve. 1647 */ 1648 old_ucr2 = imx_uart_readl(sport, UCR2); 1649 ucr2 = old_ucr2 & (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN | UCR2_CTS); 1650 1651 ucr2 |= UCR2_SRST | UCR2_IRTS; 1652 if ((termios->c_cflag & CSIZE) == CS8) 1653 ucr2 |= UCR2_WS; 1654 1655 if (!sport->have_rtscts) 1656 termios->c_cflag &= ~CRTSCTS; 1657 1658 if (port->rs485.flags & SER_RS485_ENABLED) { 1659 /* 1660 * RTS is mandatory for rs485 operation, so keep 1661 * it under manual control and keep transmitter 1662 * disabled. 1663 */ 1664 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1665 imx_uart_rts_active(sport, &ucr2); 1666 else 1667 imx_uart_rts_inactive(sport, &ucr2); 1668 1669 } else if (termios->c_cflag & CRTSCTS) { 1670 /* 1671 * Only let receiver control RTS output if we were not requested 1672 * to have RTS inactive (which then should take precedence). 1673 */ 1674 if (ucr2 & UCR2_CTS) 1675 ucr2 |= UCR2_CTSC; 1676 } 1677 1678 if (termios->c_cflag & CRTSCTS) 1679 ucr2 &= ~UCR2_IRTS; 1680 if (termios->c_cflag & CSTOPB) 1681 ucr2 |= UCR2_STPB; 1682 if (termios->c_cflag & PARENB) { 1683 ucr2 |= UCR2_PREN; 1684 if (termios->c_cflag & PARODD) 1685 ucr2 |= UCR2_PROE; 1686 } 1687 1688 sport->port.read_status_mask = 0; 1689 if (termios->c_iflag & INPCK) 1690 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); 1691 if (termios->c_iflag & (BRKINT | PARMRK)) 1692 sport->port.read_status_mask |= URXD_BRK; 1693 1694 /* 1695 * Characters to ignore 1696 */ 1697 sport->port.ignore_status_mask = 0; 1698 if (termios->c_iflag & IGNPAR) 1699 sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR; 1700 if (termios->c_iflag & IGNBRK) { 1701 sport->port.ignore_status_mask |= URXD_BRK; 1702 /* 1703 * If we're ignoring parity and break indicators, 1704 * ignore overruns too (for real raw support). 1705 */ 1706 if (termios->c_iflag & IGNPAR) 1707 sport->port.ignore_status_mask |= URXD_OVRRUN; 1708 } 1709 1710 if ((termios->c_cflag & CREAD) == 0) 1711 sport->port.ignore_status_mask |= URXD_DUMMY_READ; 1712 1713 /* 1714 * Update the per-port timeout. 1715 */ 1716 uart_update_timeout(port, termios->c_cflag, baud); 1717 1718 /* custom-baudrate handling */ 1719 div = sport->port.uartclk / (baud * 16); 1720 if (baud == 38400 && quot != div) 1721 baud = sport->port.uartclk / (quot * 16); 1722 1723 div = sport->port.uartclk / (baud * 16); 1724 if (div > 7) 1725 div = 7; 1726 if (!div) 1727 div = 1; 1728 1729 rational_best_approximation(16 * div * baud, sport->port.uartclk, 1730 1 << 16, 1 << 16, &num, &denom); 1731 1732 tdiv64 = sport->port.uartclk; 1733 tdiv64 *= num; 1734 do_div(tdiv64, denom * 16 * div); 1735 tty_termios_encode_baud_rate(termios, 1736 (speed_t)tdiv64, (speed_t)tdiv64); 1737 1738 num -= 1; 1739 denom -= 1; 1740 1741 ufcr = imx_uart_readl(sport, UFCR); 1742 ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); 1743 imx_uart_writel(sport, ufcr, UFCR); 1744 1745 /* 1746 * Two registers below should always be written both and in this 1747 * particular order. One consequence is that we need to check if any of 1748 * them changes and then update both. We do need the check for change 1749 * as even writing the same values seem to "restart" 1750 * transmission/receiving logic in the hardware, that leads to data 1751 * breakage even when rate doesn't in fact change. E.g., user switches 1752 * RTS/CTS handshake and suddenly gets broken bytes. 1753 */ 1754 old_ubir = imx_uart_readl(sport, UBIR); 1755 old_ubmr = imx_uart_readl(sport, UBMR); 1756 if (old_ubir != num || old_ubmr != denom) { 1757 imx_uart_writel(sport, num, UBIR); 1758 imx_uart_writel(sport, denom, UBMR); 1759 } 1760 1761 if (!imx_uart_is_imx1(sport)) 1762 imx_uart_writel(sport, sport->port.uartclk / div / 1000, 1763 IMX21_ONEMS); 1764 1765 imx_uart_writel(sport, ucr2, UCR2); 1766 1767 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1768 imx_uart_enable_ms(&sport->port); 1769 1770 spin_unlock_irqrestore(&sport->port.lock, flags); 1771 } 1772 1773 static const char *imx_uart_type(struct uart_port *port) 1774 { 1775 struct imx_port *sport = (struct imx_port *)port; 1776 1777 return sport->port.type == PORT_IMX ? "IMX" : NULL; 1778 } 1779 1780 /* 1781 * Configure/autoconfigure the port. 1782 */ 1783 static void imx_uart_config_port(struct uart_port *port, int flags) 1784 { 1785 struct imx_port *sport = (struct imx_port *)port; 1786 1787 if (flags & UART_CONFIG_TYPE) 1788 sport->port.type = PORT_IMX; 1789 } 1790 1791 /* 1792 * Verify the new serial_struct (for TIOCSSERIAL). 1793 * The only change we allow are to the flags and type, and 1794 * even then only between PORT_IMX and PORT_UNKNOWN 1795 */ 1796 static int 1797 imx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) 1798 { 1799 struct imx_port *sport = (struct imx_port *)port; 1800 int ret = 0; 1801 1802 if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) 1803 ret = -EINVAL; 1804 if (sport->port.irq != ser->irq) 1805 ret = -EINVAL; 1806 if (ser->io_type != UPIO_MEM) 1807 ret = -EINVAL; 1808 if (sport->port.uartclk / 16 != ser->baud_base) 1809 ret = -EINVAL; 1810 if (sport->port.mapbase != (unsigned long)ser->iomem_base) 1811 ret = -EINVAL; 1812 if (sport->port.iobase != ser->port) 1813 ret = -EINVAL; 1814 if (ser->hub6 != 0) 1815 ret = -EINVAL; 1816 return ret; 1817 } 1818 1819 #if defined(CONFIG_CONSOLE_POLL) 1820 1821 static int imx_uart_poll_init(struct uart_port *port) 1822 { 1823 struct imx_port *sport = (struct imx_port *)port; 1824 unsigned long flags; 1825 u32 ucr1, ucr2; 1826 int retval; 1827 1828 retval = clk_prepare_enable(sport->clk_ipg); 1829 if (retval) 1830 return retval; 1831 retval = clk_prepare_enable(sport->clk_per); 1832 if (retval) 1833 clk_disable_unprepare(sport->clk_ipg); 1834 1835 imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); 1836 1837 spin_lock_irqsave(&sport->port.lock, flags); 1838 1839 /* 1840 * Be careful about the order of enabling bits here. First enable the 1841 * receiver (UARTEN + RXEN) and only then the corresponding irqs. 1842 * This prevents that a character that already sits in the RX fifo is 1843 * triggering an irq but the try to fetch it from there results in an 1844 * exception because UARTEN or RXEN is still off. 1845 */ 1846 ucr1 = imx_uart_readl(sport, UCR1); 1847 ucr2 = imx_uart_readl(sport, UCR2); 1848 1849 if (imx_uart_is_imx1(sport)) 1850 ucr1 |= IMX1_UCR1_UARTCLKEN; 1851 1852 ucr1 |= UCR1_UARTEN; 1853 ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN); 1854 1855 ucr2 |= UCR2_RXEN | UCR2_TXEN; 1856 ucr2 &= ~UCR2_ATEN; 1857 1858 imx_uart_writel(sport, ucr1, UCR1); 1859 imx_uart_writel(sport, ucr2, UCR2); 1860 1861 /* now enable irqs */ 1862 imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1); 1863 imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2); 1864 1865 spin_unlock_irqrestore(&sport->port.lock, flags); 1866 1867 return 0; 1868 } 1869 1870 static int imx_uart_poll_get_char(struct uart_port *port) 1871 { 1872 struct imx_port *sport = (struct imx_port *)port; 1873 if (!(imx_uart_readl(sport, USR2) & USR2_RDR)) 1874 return NO_POLL_CHAR; 1875 1876 return imx_uart_readl(sport, URXD0) & URXD_RX_DATA; 1877 } 1878 1879 static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c) 1880 { 1881 struct imx_port *sport = (struct imx_port *)port; 1882 unsigned int status; 1883 1884 /* drain */ 1885 do { 1886 status = imx_uart_readl(sport, USR1); 1887 } while (~status & USR1_TRDY); 1888 1889 /* write */ 1890 imx_uart_writel(sport, c, URTX0); 1891 1892 /* flush */ 1893 do { 1894 status = imx_uart_readl(sport, USR2); 1895 } while (~status & USR2_TXDC); 1896 } 1897 #endif 1898 1899 /* called with port.lock taken and irqs off or from .probe without locking */ 1900 static int imx_uart_rs485_config(struct uart_port *port, 1901 struct serial_rs485 *rs485conf) 1902 { 1903 struct imx_port *sport = (struct imx_port *)port; 1904 u32 ucr2; 1905 1906 /* RTS is required to control the transmitter */ 1907 if (!sport->have_rtscts && !sport->have_rtsgpio) 1908 rs485conf->flags &= ~SER_RS485_ENABLED; 1909 1910 if (rs485conf->flags & SER_RS485_ENABLED) { 1911 /* Enable receiver if low-active RTS signal is requested */ 1912 if (sport->have_rtscts && !sport->have_rtsgpio && 1913 !(rs485conf->flags & SER_RS485_RTS_ON_SEND)) 1914 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1915 1916 /* disable transmitter */ 1917 ucr2 = imx_uart_readl(sport, UCR2); 1918 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 1919 imx_uart_rts_active(sport, &ucr2); 1920 else 1921 imx_uart_rts_inactive(sport, &ucr2); 1922 imx_uart_writel(sport, ucr2, UCR2); 1923 } 1924 1925 /* Make sure Rx is enabled in case Tx is active with Rx disabled */ 1926 if (!(rs485conf->flags & SER_RS485_ENABLED) || 1927 rs485conf->flags & SER_RS485_RX_DURING_TX) 1928 imx_uart_start_rx(port); 1929 1930 port->rs485 = *rs485conf; 1931 1932 return 0; 1933 } 1934 1935 static const struct uart_ops imx_uart_pops = { 1936 .tx_empty = imx_uart_tx_empty, 1937 .set_mctrl = imx_uart_set_mctrl, 1938 .get_mctrl = imx_uart_get_mctrl, 1939 .stop_tx = imx_uart_stop_tx, 1940 .start_tx = imx_uart_start_tx, 1941 .stop_rx = imx_uart_stop_rx, 1942 .enable_ms = imx_uart_enable_ms, 1943 .break_ctl = imx_uart_break_ctl, 1944 .startup = imx_uart_startup, 1945 .shutdown = imx_uart_shutdown, 1946 .flush_buffer = imx_uart_flush_buffer, 1947 .set_termios = imx_uart_set_termios, 1948 .type = imx_uart_type, 1949 .config_port = imx_uart_config_port, 1950 .verify_port = imx_uart_verify_port, 1951 #if defined(CONFIG_CONSOLE_POLL) 1952 .poll_init = imx_uart_poll_init, 1953 .poll_get_char = imx_uart_poll_get_char, 1954 .poll_put_char = imx_uart_poll_put_char, 1955 #endif 1956 }; 1957 1958 static struct imx_port *imx_uart_ports[UART_NR]; 1959 1960 #if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) 1961 static void imx_uart_console_putchar(struct uart_port *port, int ch) 1962 { 1963 struct imx_port *sport = (struct imx_port *)port; 1964 1965 while (imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL) 1966 barrier(); 1967 1968 imx_uart_writel(sport, ch, URTX0); 1969 } 1970 1971 /* 1972 * Interrupts are disabled on entering 1973 */ 1974 static void 1975 imx_uart_console_write(struct console *co, const char *s, unsigned int count) 1976 { 1977 struct imx_port *sport = imx_uart_ports[co->index]; 1978 struct imx_port_ucrs old_ucr; 1979 unsigned long flags; 1980 unsigned int ucr1; 1981 int locked = 1; 1982 1983 if (sport->port.sysrq) 1984 locked = 0; 1985 else if (oops_in_progress) 1986 locked = spin_trylock_irqsave(&sport->port.lock, flags); 1987 else 1988 spin_lock_irqsave(&sport->port.lock, flags); 1989 1990 /* 1991 * First, save UCR1/2/3 and then disable interrupts 1992 */ 1993 imx_uart_ucrs_save(sport, &old_ucr); 1994 ucr1 = old_ucr.ucr1; 1995 1996 if (imx_uart_is_imx1(sport)) 1997 ucr1 |= IMX1_UCR1_UARTCLKEN; 1998 ucr1 |= UCR1_UARTEN; 1999 ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN); 2000 2001 imx_uart_writel(sport, ucr1, UCR1); 2002 2003 imx_uart_writel(sport, old_ucr.ucr2 | UCR2_TXEN, UCR2); 2004 2005 uart_console_write(&sport->port, s, count, imx_uart_console_putchar); 2006 2007 /* 2008 * Finally, wait for transmitter to become empty 2009 * and restore UCR1/2/3 2010 */ 2011 while (!(imx_uart_readl(sport, USR2) & USR2_TXDC)); 2012 2013 imx_uart_ucrs_restore(sport, &old_ucr); 2014 2015 if (locked) 2016 spin_unlock_irqrestore(&sport->port.lock, flags); 2017 } 2018 2019 /* 2020 * If the port was already initialised (eg, by a boot loader), 2021 * try to determine the current setup. 2022 */ 2023 static void 2024 imx_uart_console_get_options(struct imx_port *sport, int *baud, 2025 int *parity, int *bits) 2026 { 2027 2028 if (imx_uart_readl(sport, UCR1) & UCR1_UARTEN) { 2029 /* ok, the port was enabled */ 2030 unsigned int ucr2, ubir, ubmr, uartclk; 2031 unsigned int baud_raw; 2032 unsigned int ucfr_rfdiv; 2033 2034 ucr2 = imx_uart_readl(sport, UCR2); 2035 2036 *parity = 'n'; 2037 if (ucr2 & UCR2_PREN) { 2038 if (ucr2 & UCR2_PROE) 2039 *parity = 'o'; 2040 else 2041 *parity = 'e'; 2042 } 2043 2044 if (ucr2 & UCR2_WS) 2045 *bits = 8; 2046 else 2047 *bits = 7; 2048 2049 ubir = imx_uart_readl(sport, UBIR) & 0xffff; 2050 ubmr = imx_uart_readl(sport, UBMR) & 0xffff; 2051 2052 ucfr_rfdiv = (imx_uart_readl(sport, UFCR) & UFCR_RFDIV) >> 7; 2053 if (ucfr_rfdiv == 6) 2054 ucfr_rfdiv = 7; 2055 else 2056 ucfr_rfdiv = 6 - ucfr_rfdiv; 2057 2058 uartclk = clk_get_rate(sport->clk_per); 2059 uartclk /= ucfr_rfdiv; 2060 2061 { /* 2062 * The next code provides exact computation of 2063 * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) 2064 * without need of float support or long long division, 2065 * which would be required to prevent 32bit arithmetic overflow 2066 */ 2067 unsigned int mul = ubir + 1; 2068 unsigned int div = 16 * (ubmr + 1); 2069 unsigned int rem = uartclk % div; 2070 2071 baud_raw = (uartclk / div) * mul; 2072 baud_raw += (rem * mul + div / 2) / div; 2073 *baud = (baud_raw + 50) / 100 * 100; 2074 } 2075 2076 if (*baud != baud_raw) 2077 dev_info(sport->port.dev, "Console IMX rounded baud rate from %d to %d\n", 2078 baud_raw, *baud); 2079 } 2080 } 2081 2082 static int 2083 imx_uart_console_setup(struct console *co, char *options) 2084 { 2085 struct imx_port *sport; 2086 int baud = 9600; 2087 int bits = 8; 2088 int parity = 'n'; 2089 int flow = 'n'; 2090 int retval; 2091 2092 /* 2093 * Check whether an invalid uart number has been specified, and 2094 * if so, search for the first available port that does have 2095 * console support. 2096 */ 2097 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_uart_ports)) 2098 co->index = 0; 2099 sport = imx_uart_ports[co->index]; 2100 if (sport == NULL) 2101 return -ENODEV; 2102 2103 /* For setting the registers, we only need to enable the ipg clock. */ 2104 retval = clk_prepare_enable(sport->clk_ipg); 2105 if (retval) 2106 goto error_console; 2107 2108 if (options) 2109 uart_parse_options(options, &baud, &parity, &bits, &flow); 2110 else 2111 imx_uart_console_get_options(sport, &baud, &parity, &bits); 2112 2113 imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); 2114 2115 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); 2116 2117 if (retval) { 2118 clk_disable_unprepare(sport->clk_ipg); 2119 goto error_console; 2120 } 2121 2122 retval = clk_prepare_enable(sport->clk_per); 2123 if (retval) 2124 clk_disable_unprepare(sport->clk_ipg); 2125 2126 error_console: 2127 return retval; 2128 } 2129 2130 static int 2131 imx_uart_console_exit(struct console *co) 2132 { 2133 struct imx_port *sport = imx_uart_ports[co->index]; 2134 2135 clk_disable_unprepare(sport->clk_per); 2136 clk_disable_unprepare(sport->clk_ipg); 2137 2138 return 0; 2139 } 2140 2141 static struct uart_driver imx_uart_uart_driver; 2142 static struct console imx_uart_console = { 2143 .name = DEV_NAME, 2144 .write = imx_uart_console_write, 2145 .device = uart_console_device, 2146 .setup = imx_uart_console_setup, 2147 .exit = imx_uart_console_exit, 2148 .flags = CON_PRINTBUFFER, 2149 .index = -1, 2150 .data = &imx_uart_uart_driver, 2151 }; 2152 2153 #define IMX_CONSOLE &imx_uart_console 2154 2155 #else 2156 #define IMX_CONSOLE NULL 2157 #endif 2158 2159 static struct uart_driver imx_uart_uart_driver = { 2160 .owner = THIS_MODULE, 2161 .driver_name = DRIVER_NAME, 2162 .dev_name = DEV_NAME, 2163 .major = SERIAL_IMX_MAJOR, 2164 .minor = MINOR_START, 2165 .nr = ARRAY_SIZE(imx_uart_ports), 2166 .cons = IMX_CONSOLE, 2167 }; 2168 2169 static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t) 2170 { 2171 struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx); 2172 unsigned long flags; 2173 2174 spin_lock_irqsave(&sport->port.lock, flags); 2175 if (sport->tx_state == WAIT_AFTER_RTS) 2176 imx_uart_start_tx(&sport->port); 2177 spin_unlock_irqrestore(&sport->port.lock, flags); 2178 2179 return HRTIMER_NORESTART; 2180 } 2181 2182 static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t) 2183 { 2184 struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx); 2185 unsigned long flags; 2186 2187 spin_lock_irqsave(&sport->port.lock, flags); 2188 if (sport->tx_state == WAIT_AFTER_SEND) 2189 imx_uart_stop_tx(&sport->port); 2190 spin_unlock_irqrestore(&sport->port.lock, flags); 2191 2192 return HRTIMER_NORESTART; 2193 } 2194 2195 /* Default RX DMA buffer configuration */ 2196 #define RX_DMA_PERIODS 16 2197 #define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4) 2198 2199 static int imx_uart_probe(struct platform_device *pdev) 2200 { 2201 struct device_node *np = pdev->dev.of_node; 2202 struct imx_port *sport; 2203 void __iomem *base; 2204 u32 dma_buf_conf[2]; 2205 int ret = 0; 2206 u32 ucr1; 2207 struct resource *res; 2208 int txirq, rxirq, rtsirq; 2209 2210 sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); 2211 if (!sport) 2212 return -ENOMEM; 2213 2214 sport->devdata = of_device_get_match_data(&pdev->dev); 2215 2216 ret = of_alias_get_id(np, "serial"); 2217 if (ret < 0) { 2218 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); 2219 return ret; 2220 } 2221 sport->port.line = ret; 2222 2223 if (of_get_property(np, "uart-has-rtscts", NULL) || 2224 of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */) 2225 sport->have_rtscts = 1; 2226 2227 if (of_get_property(np, "fsl,dte-mode", NULL)) 2228 sport->dte_mode = 1; 2229 2230 if (of_get_property(np, "rts-gpios", NULL)) 2231 sport->have_rtsgpio = 1; 2232 2233 if (of_get_property(np, "fsl,inverted-tx", NULL)) 2234 sport->inverted_tx = 1; 2235 2236 if (of_get_property(np, "fsl,inverted-rx", NULL)) 2237 sport->inverted_rx = 1; 2238 2239 if (!of_property_read_u32_array(np, "fsl,dma-info", dma_buf_conf, 2)) { 2240 sport->rx_period_length = dma_buf_conf[0]; 2241 sport->rx_periods = dma_buf_conf[1]; 2242 } else { 2243 sport->rx_period_length = RX_DMA_PERIOD_LEN; 2244 sport->rx_periods = RX_DMA_PERIODS; 2245 } 2246 2247 if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) { 2248 dev_err(&pdev->dev, "serial%d out of range\n", 2249 sport->port.line); 2250 return -EINVAL; 2251 } 2252 2253 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2254 base = devm_ioremap_resource(&pdev->dev, res); 2255 if (IS_ERR(base)) 2256 return PTR_ERR(base); 2257 2258 rxirq = platform_get_irq(pdev, 0); 2259 if (rxirq < 0) 2260 return rxirq; 2261 txirq = platform_get_irq_optional(pdev, 1); 2262 rtsirq = platform_get_irq_optional(pdev, 2); 2263 2264 sport->port.dev = &pdev->dev; 2265 sport->port.mapbase = res->start; 2266 sport->port.membase = base; 2267 sport->port.type = PORT_IMX; 2268 sport->port.iotype = UPIO_MEM; 2269 sport->port.irq = rxirq; 2270 sport->port.fifosize = 32; 2271 sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE); 2272 sport->port.ops = &imx_uart_pops; 2273 sport->port.rs485_config = imx_uart_rs485_config; 2274 sport->port.flags = UPF_BOOT_AUTOCONF; 2275 timer_setup(&sport->timer, imx_uart_timeout, 0); 2276 2277 sport->gpios = mctrl_gpio_init(&sport->port, 0); 2278 if (IS_ERR(sport->gpios)) 2279 return PTR_ERR(sport->gpios); 2280 2281 sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 2282 if (IS_ERR(sport->clk_ipg)) { 2283 ret = PTR_ERR(sport->clk_ipg); 2284 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); 2285 return ret; 2286 } 2287 2288 sport->clk_per = devm_clk_get(&pdev->dev, "per"); 2289 if (IS_ERR(sport->clk_per)) { 2290 ret = PTR_ERR(sport->clk_per); 2291 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret); 2292 return ret; 2293 } 2294 2295 sport->port.uartclk = clk_get_rate(sport->clk_per); 2296 2297 /* For register access, we only need to enable the ipg clock. */ 2298 ret = clk_prepare_enable(sport->clk_ipg); 2299 if (ret) { 2300 dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret); 2301 return ret; 2302 } 2303 2304 /* initialize shadow register values */ 2305 sport->ucr1 = readl(sport->port.membase + UCR1); 2306 sport->ucr2 = readl(sport->port.membase + UCR2); 2307 sport->ucr3 = readl(sport->port.membase + UCR3); 2308 sport->ucr4 = readl(sport->port.membase + UCR4); 2309 sport->ufcr = readl(sport->port.membase + UFCR); 2310 2311 ret = uart_get_rs485_mode(&sport->port); 2312 if (ret) { 2313 clk_disable_unprepare(sport->clk_ipg); 2314 return ret; 2315 } 2316 2317 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2318 (!sport->have_rtscts && !sport->have_rtsgpio)) 2319 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2320 2321 /* 2322 * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) 2323 * signal cannot be set low during transmission in case the 2324 * receiver is off (limitation of the i.MX UART IP). 2325 */ 2326 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2327 sport->have_rtscts && !sport->have_rtsgpio && 2328 (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && 2329 !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) 2330 dev_err(&pdev->dev, 2331 "low-active RTS not possible when receiver is off, enabling receiver\n"); 2332 2333 imx_uart_rs485_config(&sport->port, &sport->port.rs485); 2334 2335 /* Disable interrupts before requesting them */ 2336 ucr1 = imx_uart_readl(sport, UCR1); 2337 ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); 2338 imx_uart_writel(sport, ucr1, UCR1); 2339 2340 if (!imx_uart_is_imx1(sport) && sport->dte_mode) { 2341 /* 2342 * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI 2343 * and influences if UCR3_RI and UCR3_DCD changes the level of RI 2344 * and DCD (when they are outputs) or enables the respective 2345 * irqs. So set this bit early, i.e. before requesting irqs. 2346 */ 2347 u32 ufcr = imx_uart_readl(sport, UFCR); 2348 if (!(ufcr & UFCR_DCEDTE)) 2349 imx_uart_writel(sport, ufcr | UFCR_DCEDTE, UFCR); 2350 2351 /* 2352 * Disable UCR3_RI and UCR3_DCD irqs. They are also not 2353 * enabled later because they cannot be cleared 2354 * (confirmed on i.MX25) which makes them unusable. 2355 */ 2356 imx_uart_writel(sport, 2357 IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR, 2358 UCR3); 2359 2360 } else { 2361 u32 ucr3 = UCR3_DSR; 2362 u32 ufcr = imx_uart_readl(sport, UFCR); 2363 if (ufcr & UFCR_DCEDTE) 2364 imx_uart_writel(sport, ufcr & ~UFCR_DCEDTE, UFCR); 2365 2366 if (!imx_uart_is_imx1(sport)) 2367 ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; 2368 imx_uart_writel(sport, ucr3, UCR3); 2369 } 2370 2371 clk_disable_unprepare(sport->clk_ipg); 2372 2373 hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2374 hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2375 sport->trigger_start_tx.function = imx_trigger_start_tx; 2376 sport->trigger_stop_tx.function = imx_trigger_stop_tx; 2377 2378 /* 2379 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later 2380 * chips only have one interrupt. 2381 */ 2382 if (txirq > 0) { 2383 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_rxint, 0, 2384 dev_name(&pdev->dev), sport); 2385 if (ret) { 2386 dev_err(&pdev->dev, "failed to request rx irq: %d\n", 2387 ret); 2388 return ret; 2389 } 2390 2391 ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0, 2392 dev_name(&pdev->dev), sport); 2393 if (ret) { 2394 dev_err(&pdev->dev, "failed to request tx irq: %d\n", 2395 ret); 2396 return ret; 2397 } 2398 2399 ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, 2400 dev_name(&pdev->dev), sport); 2401 if (ret) { 2402 dev_err(&pdev->dev, "failed to request rts irq: %d\n", 2403 ret); 2404 return ret; 2405 } 2406 } else { 2407 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, 2408 dev_name(&pdev->dev), sport); 2409 if (ret) { 2410 dev_err(&pdev->dev, "failed to request irq: %d\n", ret); 2411 return ret; 2412 } 2413 } 2414 2415 imx_uart_ports[sport->port.line] = sport; 2416 2417 platform_set_drvdata(pdev, sport); 2418 2419 return uart_add_one_port(&imx_uart_uart_driver, &sport->port); 2420 } 2421 2422 static int imx_uart_remove(struct platform_device *pdev) 2423 { 2424 struct imx_port *sport = platform_get_drvdata(pdev); 2425 2426 return uart_remove_one_port(&imx_uart_uart_driver, &sport->port); 2427 } 2428 2429 static void imx_uart_restore_context(struct imx_port *sport) 2430 { 2431 unsigned long flags; 2432 2433 spin_lock_irqsave(&sport->port.lock, flags); 2434 if (!sport->context_saved) { 2435 spin_unlock_irqrestore(&sport->port.lock, flags); 2436 return; 2437 } 2438 2439 imx_uart_writel(sport, sport->saved_reg[4], UFCR); 2440 imx_uart_writel(sport, sport->saved_reg[5], UESC); 2441 imx_uart_writel(sport, sport->saved_reg[6], UTIM); 2442 imx_uart_writel(sport, sport->saved_reg[7], UBIR); 2443 imx_uart_writel(sport, sport->saved_reg[8], UBMR); 2444 imx_uart_writel(sport, sport->saved_reg[9], IMX21_UTS); 2445 imx_uart_writel(sport, sport->saved_reg[0], UCR1); 2446 imx_uart_writel(sport, sport->saved_reg[1] | UCR2_SRST, UCR2); 2447 imx_uart_writel(sport, sport->saved_reg[2], UCR3); 2448 imx_uart_writel(sport, sport->saved_reg[3], UCR4); 2449 sport->context_saved = false; 2450 spin_unlock_irqrestore(&sport->port.lock, flags); 2451 } 2452 2453 static void imx_uart_save_context(struct imx_port *sport) 2454 { 2455 unsigned long flags; 2456 2457 /* Save necessary regs */ 2458 spin_lock_irqsave(&sport->port.lock, flags); 2459 sport->saved_reg[0] = imx_uart_readl(sport, UCR1); 2460 sport->saved_reg[1] = imx_uart_readl(sport, UCR2); 2461 sport->saved_reg[2] = imx_uart_readl(sport, UCR3); 2462 sport->saved_reg[3] = imx_uart_readl(sport, UCR4); 2463 sport->saved_reg[4] = imx_uart_readl(sport, UFCR); 2464 sport->saved_reg[5] = imx_uart_readl(sport, UESC); 2465 sport->saved_reg[6] = imx_uart_readl(sport, UTIM); 2466 sport->saved_reg[7] = imx_uart_readl(sport, UBIR); 2467 sport->saved_reg[8] = imx_uart_readl(sport, UBMR); 2468 sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS); 2469 sport->context_saved = true; 2470 spin_unlock_irqrestore(&sport->port.lock, flags); 2471 } 2472 2473 static void imx_uart_enable_wakeup(struct imx_port *sport, bool on) 2474 { 2475 u32 ucr3; 2476 2477 ucr3 = imx_uart_readl(sport, UCR3); 2478 if (on) { 2479 imx_uart_writel(sport, USR1_AWAKE, USR1); 2480 ucr3 |= UCR3_AWAKEN; 2481 } else { 2482 ucr3 &= ~UCR3_AWAKEN; 2483 } 2484 imx_uart_writel(sport, ucr3, UCR3); 2485 2486 if (sport->have_rtscts) { 2487 u32 ucr1 = imx_uart_readl(sport, UCR1); 2488 if (on) { 2489 imx_uart_writel(sport, USR1_RTSD, USR1); 2490 ucr1 |= UCR1_RTSDEN; 2491 } else { 2492 ucr1 &= ~UCR1_RTSDEN; 2493 } 2494 imx_uart_writel(sport, ucr1, UCR1); 2495 } 2496 } 2497 2498 static int imx_uart_suspend_noirq(struct device *dev) 2499 { 2500 struct imx_port *sport = dev_get_drvdata(dev); 2501 2502 imx_uart_save_context(sport); 2503 2504 clk_disable(sport->clk_ipg); 2505 2506 pinctrl_pm_select_sleep_state(dev); 2507 2508 return 0; 2509 } 2510 2511 static int imx_uart_resume_noirq(struct device *dev) 2512 { 2513 struct imx_port *sport = dev_get_drvdata(dev); 2514 int ret; 2515 2516 pinctrl_pm_select_default_state(dev); 2517 2518 ret = clk_enable(sport->clk_ipg); 2519 if (ret) 2520 return ret; 2521 2522 imx_uart_restore_context(sport); 2523 2524 return 0; 2525 } 2526 2527 static int imx_uart_suspend(struct device *dev) 2528 { 2529 struct imx_port *sport = dev_get_drvdata(dev); 2530 int ret; 2531 2532 uart_suspend_port(&imx_uart_uart_driver, &sport->port); 2533 disable_irq(sport->port.irq); 2534 2535 ret = clk_prepare_enable(sport->clk_ipg); 2536 if (ret) 2537 return ret; 2538 2539 /* enable wakeup from i.MX UART */ 2540 imx_uart_enable_wakeup(sport, true); 2541 2542 return 0; 2543 } 2544 2545 static int imx_uart_resume(struct device *dev) 2546 { 2547 struct imx_port *sport = dev_get_drvdata(dev); 2548 2549 /* disable wakeup from i.MX UART */ 2550 imx_uart_enable_wakeup(sport, false); 2551 2552 uart_resume_port(&imx_uart_uart_driver, &sport->port); 2553 enable_irq(sport->port.irq); 2554 2555 clk_disable_unprepare(sport->clk_ipg); 2556 2557 return 0; 2558 } 2559 2560 static int imx_uart_freeze(struct device *dev) 2561 { 2562 struct imx_port *sport = dev_get_drvdata(dev); 2563 2564 uart_suspend_port(&imx_uart_uart_driver, &sport->port); 2565 2566 return clk_prepare_enable(sport->clk_ipg); 2567 } 2568 2569 static int imx_uart_thaw(struct device *dev) 2570 { 2571 struct imx_port *sport = dev_get_drvdata(dev); 2572 2573 uart_resume_port(&imx_uart_uart_driver, &sport->port); 2574 2575 clk_disable_unprepare(sport->clk_ipg); 2576 2577 return 0; 2578 } 2579 2580 static const struct dev_pm_ops imx_uart_pm_ops = { 2581 .suspend_noirq = imx_uart_suspend_noirq, 2582 .resume_noirq = imx_uart_resume_noirq, 2583 .freeze_noirq = imx_uart_suspend_noirq, 2584 .restore_noirq = imx_uart_resume_noirq, 2585 .suspend = imx_uart_suspend, 2586 .resume = imx_uart_resume, 2587 .freeze = imx_uart_freeze, 2588 .thaw = imx_uart_thaw, 2589 .restore = imx_uart_thaw, 2590 }; 2591 2592 static struct platform_driver imx_uart_platform_driver = { 2593 .probe = imx_uart_probe, 2594 .remove = imx_uart_remove, 2595 2596 .driver = { 2597 .name = "imx-uart", 2598 .of_match_table = imx_uart_dt_ids, 2599 .pm = &imx_uart_pm_ops, 2600 }, 2601 }; 2602 2603 static int __init imx_uart_init(void) 2604 { 2605 int ret = uart_register_driver(&imx_uart_uart_driver); 2606 2607 if (ret) 2608 return ret; 2609 2610 ret = platform_driver_register(&imx_uart_platform_driver); 2611 if (ret != 0) 2612 uart_unregister_driver(&imx_uart_uart_driver); 2613 2614 return ret; 2615 } 2616 2617 static void __exit imx_uart_exit(void) 2618 { 2619 platform_driver_unregister(&imx_uart_platform_driver); 2620 uart_unregister_driver(&imx_uart_uart_driver); 2621 } 2622 2623 module_init(imx_uart_init); 2624 module_exit(imx_uart_exit); 2625 2626 MODULE_AUTHOR("Sascha Hauer"); 2627 MODULE_DESCRIPTION("IMX generic serial port driver"); 2628 MODULE_LICENSE("GPL"); 2629 MODULE_ALIAS("platform:imx-uart"); 2630