1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * serial_tegra.c 4 * 5 * High-speed serial driver for NVIDIA Tegra SoCs 6 * 7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved. 8 * 9 * Author: Laxman Dewangan <ldewangan@nvidia.com> 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dmapool.h> 18 #include <linux/err.h> 19 #include <linux/io.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/pagemap.h> 25 #include <linux/platform_device.h> 26 #include <linux/reset.h> 27 #include <linux/serial.h> 28 #include <linux/serial_8250.h> 29 #include <linux/serial_core.h> 30 #include <linux/serial_reg.h> 31 #include <linux/slab.h> 32 #include <linux/string.h> 33 #include <linux/termios.h> 34 #include <linux/tty.h> 35 #include <linux/tty_flip.h> 36 37 #define TEGRA_UART_TYPE "TEGRA_UART" 38 #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE) 39 #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3) 40 41 #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096 42 #define TEGRA_UART_LSR_TXFIFO_FULL 0x100 43 #define TEGRA_UART_IER_EORD 0x20 44 #define TEGRA_UART_MCR_RTS_EN 0x40 45 #define TEGRA_UART_MCR_CTS_EN 0x20 46 #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \ 47 UART_LSR_PE | UART_LSR_FE) 48 #define TEGRA_UART_IRDA_CSR 0x08 49 #define TEGRA_UART_SIR_ENABLED 0x80 50 51 #define TEGRA_UART_TX_PIO 1 52 #define TEGRA_UART_TX_DMA 2 53 #define TEGRA_UART_MIN_DMA 16 54 #define TEGRA_UART_FIFO_SIZE 32 55 56 /* 57 * Tx fifo trigger level setting in tegra uart is in 58 * reverse way then conventional uart. 59 */ 60 #define TEGRA_UART_TX_TRIG_16B 0x00 61 #define TEGRA_UART_TX_TRIG_8B 0x10 62 #define TEGRA_UART_TX_TRIG_4B 0x20 63 #define TEGRA_UART_TX_TRIG_1B 0x30 64 65 #define TEGRA_UART_MAXIMUM 8 66 67 /* Default UART setting when started: 115200 no parity, stop, 8 data bits */ 68 #define TEGRA_UART_DEFAULT_BAUD 115200 69 #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8 70 71 /* Tx transfer mode */ 72 #define TEGRA_TX_PIO 1 73 #define TEGRA_TX_DMA 2 74 75 #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40 76 77 /** 78 * tegra_uart_chip_data: SOC specific data. 79 * 80 * @tx_fifo_full_status: Status flag available for checking tx fifo full. 81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not. 82 * Tegra30 does not allow this. 83 * @support_clk_src_div: Clock source support the clock divider. 84 */ 85 struct tegra_uart_chip_data { 86 bool tx_fifo_full_status; 87 bool allow_txfifo_reset_fifo_mode; 88 bool support_clk_src_div; 89 bool fifo_mode_enable_status; 90 int uart_max_port; 91 int max_dma_burst_bytes; 92 int error_tolerance_low_range; 93 int error_tolerance_high_range; 94 }; 95 96 struct tegra_baud_tolerance { 97 u32 lower_range_baud; 98 u32 upper_range_baud; 99 s32 tolerance; 100 }; 101 102 struct tegra_uart_port { 103 struct uart_port uport; 104 const struct tegra_uart_chip_data *cdata; 105 106 struct clk *uart_clk; 107 struct reset_control *rst; 108 unsigned int current_baud; 109 110 /* Register shadow */ 111 unsigned long fcr_shadow; 112 unsigned long mcr_shadow; 113 unsigned long lcr_shadow; 114 unsigned long ier_shadow; 115 bool rts_active; 116 117 int tx_in_progress; 118 unsigned int tx_bytes; 119 120 bool enable_modem_interrupt; 121 122 bool rx_timeout; 123 int rx_in_progress; 124 int symb_bit; 125 126 struct dma_chan *rx_dma_chan; 127 struct dma_chan *tx_dma_chan; 128 dma_addr_t rx_dma_buf_phys; 129 dma_addr_t tx_dma_buf_phys; 130 unsigned char *rx_dma_buf_virt; 131 unsigned char *tx_dma_buf_virt; 132 struct dma_async_tx_descriptor *tx_dma_desc; 133 struct dma_async_tx_descriptor *rx_dma_desc; 134 dma_cookie_t tx_cookie; 135 dma_cookie_t rx_cookie; 136 unsigned int tx_bytes_requested; 137 unsigned int rx_bytes_requested; 138 struct tegra_baud_tolerance *baud_tolerance; 139 int n_adjustable_baud_rates; 140 int required_rate; 141 int configured_rate; 142 bool use_rx_pio; 143 bool use_tx_pio; 144 bool rx_dma_active; 145 }; 146 147 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup); 148 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup); 149 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, 150 bool dma_to_memory); 151 152 static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup, 153 unsigned long reg) 154 { 155 return readl(tup->uport.membase + (reg << tup->uport.regshift)); 156 } 157 158 static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val, 159 unsigned long reg) 160 { 161 writel(val, tup->uport.membase + (reg << tup->uport.regshift)); 162 } 163 164 static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u) 165 { 166 return container_of(u, struct tegra_uart_port, uport); 167 } 168 169 static unsigned int tegra_uart_get_mctrl(struct uart_port *u) 170 { 171 struct tegra_uart_port *tup = to_tegra_uport(u); 172 173 /* 174 * RI - Ring detector is active 175 * CD/DCD/CAR - Carrier detect is always active. For some reason 176 * linux has different names for carrier detect. 177 * DSR - Data Set ready is active as the hardware doesn't support it. 178 * Don't know if the linux support this yet? 179 * CTS - Clear to send. Always set to active, as the hardware handles 180 * CTS automatically. 181 */ 182 if (tup->enable_modem_interrupt) 183 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS; 184 return TIOCM_CTS; 185 } 186 187 static void set_rts(struct tegra_uart_port *tup, bool active) 188 { 189 unsigned long mcr; 190 191 mcr = tup->mcr_shadow; 192 if (active) 193 mcr |= TEGRA_UART_MCR_RTS_EN; 194 else 195 mcr &= ~TEGRA_UART_MCR_RTS_EN; 196 if (mcr != tup->mcr_shadow) { 197 tegra_uart_write(tup, mcr, UART_MCR); 198 tup->mcr_shadow = mcr; 199 } 200 } 201 202 static void set_dtr(struct tegra_uart_port *tup, bool active) 203 { 204 unsigned long mcr; 205 206 mcr = tup->mcr_shadow; 207 if (active) 208 mcr |= UART_MCR_DTR; 209 else 210 mcr &= ~UART_MCR_DTR; 211 if (mcr != tup->mcr_shadow) { 212 tegra_uart_write(tup, mcr, UART_MCR); 213 tup->mcr_shadow = mcr; 214 } 215 } 216 217 static void set_loopbk(struct tegra_uart_port *tup, bool active) 218 { 219 unsigned long mcr = tup->mcr_shadow; 220 221 if (active) 222 mcr |= UART_MCR_LOOP; 223 else 224 mcr &= ~UART_MCR_LOOP; 225 226 if (mcr != tup->mcr_shadow) { 227 tegra_uart_write(tup, mcr, UART_MCR); 228 tup->mcr_shadow = mcr; 229 } 230 } 231 232 static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl) 233 { 234 struct tegra_uart_port *tup = to_tegra_uport(u); 235 int enable; 236 237 tup->rts_active = !!(mctrl & TIOCM_RTS); 238 set_rts(tup, tup->rts_active); 239 240 enable = !!(mctrl & TIOCM_DTR); 241 set_dtr(tup, enable); 242 243 enable = !!(mctrl & TIOCM_LOOP); 244 set_loopbk(tup, enable); 245 } 246 247 static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl) 248 { 249 struct tegra_uart_port *tup = to_tegra_uport(u); 250 unsigned long lcr; 251 252 lcr = tup->lcr_shadow; 253 if (break_ctl) 254 lcr |= UART_LCR_SBC; 255 else 256 lcr &= ~UART_LCR_SBC; 257 tegra_uart_write(tup, lcr, UART_LCR); 258 tup->lcr_shadow = lcr; 259 } 260 261 /** 262 * tegra_uart_wait_cycle_time: Wait for N UART clock periods 263 * 264 * @tup: Tegra serial port data structure. 265 * @cycles: Number of clock periods to wait. 266 * 267 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART 268 * clock speed is 16X the current baud rate. 269 */ 270 static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup, 271 unsigned int cycles) 272 { 273 if (tup->current_baud) 274 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16)); 275 } 276 277 /* Wait for a symbol-time. */ 278 static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup, 279 unsigned int syms) 280 { 281 if (tup->current_baud) 282 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000, 283 tup->current_baud)); 284 } 285 286 static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup) 287 { 288 unsigned long iir; 289 unsigned int tmout = 100; 290 291 do { 292 iir = tegra_uart_read(tup, UART_IIR); 293 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN) 294 return 0; 295 udelay(1); 296 } while (--tmout); 297 298 return -ETIMEDOUT; 299 } 300 301 static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits) 302 { 303 unsigned long fcr = tup->fcr_shadow; 304 unsigned int lsr, tmout = 10000; 305 306 if (tup->rts_active) 307 set_rts(tup, false); 308 309 if (tup->cdata->allow_txfifo_reset_fifo_mode) { 310 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); 311 tegra_uart_write(tup, fcr, UART_FCR); 312 } else { 313 fcr &= ~UART_FCR_ENABLE_FIFO; 314 tegra_uart_write(tup, fcr, UART_FCR); 315 udelay(60); 316 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); 317 tegra_uart_write(tup, fcr, UART_FCR); 318 fcr |= UART_FCR_ENABLE_FIFO; 319 tegra_uart_write(tup, fcr, UART_FCR); 320 if (tup->cdata->fifo_mode_enable_status) 321 tegra_uart_wait_fifo_mode_enabled(tup); 322 } 323 324 /* Dummy read to ensure the write is posted */ 325 tegra_uart_read(tup, UART_SCR); 326 327 /* 328 * For all tegra devices (up to t210), there is a hardware issue that 329 * requires software to wait for 32 UART clock periods for the flush 330 * to propagate, otherwise data could be lost. 331 */ 332 tegra_uart_wait_cycle_time(tup, 32); 333 334 do { 335 lsr = tegra_uart_read(tup, UART_LSR); 336 if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR)) 337 break; 338 udelay(1); 339 } while (--tmout); 340 341 if (tup->rts_active) 342 set_rts(tup, true); 343 } 344 345 static long tegra_get_tolerance_rate(struct tegra_uart_port *tup, 346 unsigned int baud, long rate) 347 { 348 int i; 349 350 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) { 351 if (baud >= tup->baud_tolerance[i].lower_range_baud && 352 baud <= tup->baud_tolerance[i].upper_range_baud) 353 return (rate + (rate * 354 tup->baud_tolerance[i].tolerance) / 10000); 355 } 356 357 return rate; 358 } 359 360 static int tegra_check_rate_in_range(struct tegra_uart_port *tup) 361 { 362 long diff; 363 364 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000) 365 / tup->required_rate; 366 if (diff < (tup->cdata->error_tolerance_low_range * 100) || 367 diff > (tup->cdata->error_tolerance_high_range * 100)) { 368 dev_err(tup->uport.dev, 369 "configured baud rate is out of range by %ld", diff); 370 return -EIO; 371 } 372 373 return 0; 374 } 375 376 static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud) 377 { 378 unsigned long rate; 379 unsigned int divisor; 380 unsigned long lcr; 381 unsigned long flags; 382 int ret; 383 384 if (tup->current_baud == baud) 385 return 0; 386 387 if (tup->cdata->support_clk_src_div) { 388 rate = baud * 16; 389 tup->required_rate = rate; 390 391 if (tup->n_adjustable_baud_rates) 392 rate = tegra_get_tolerance_rate(tup, baud, rate); 393 394 ret = clk_set_rate(tup->uart_clk, rate); 395 if (ret < 0) { 396 dev_err(tup->uport.dev, 397 "clk_set_rate() failed for rate %lu\n", rate); 398 return ret; 399 } 400 tup->configured_rate = clk_get_rate(tup->uart_clk); 401 divisor = 1; 402 ret = tegra_check_rate_in_range(tup); 403 if (ret < 0) 404 return ret; 405 } else { 406 rate = clk_get_rate(tup->uart_clk); 407 divisor = DIV_ROUND_CLOSEST(rate, baud * 16); 408 } 409 410 spin_lock_irqsave(&tup->uport.lock, flags); 411 lcr = tup->lcr_shadow; 412 lcr |= UART_LCR_DLAB; 413 tegra_uart_write(tup, lcr, UART_LCR); 414 415 tegra_uart_write(tup, divisor & 0xFF, UART_TX); 416 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER); 417 418 lcr &= ~UART_LCR_DLAB; 419 tegra_uart_write(tup, lcr, UART_LCR); 420 421 /* Dummy read to ensure the write is posted */ 422 tegra_uart_read(tup, UART_SCR); 423 spin_unlock_irqrestore(&tup->uport.lock, flags); 424 425 tup->current_baud = baud; 426 427 /* wait two character intervals at new rate */ 428 tegra_uart_wait_sym_time(tup, 2); 429 return 0; 430 } 431 432 static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup, 433 unsigned long lsr) 434 { 435 char flag = TTY_NORMAL; 436 437 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) { 438 if (lsr & UART_LSR_OE) { 439 /* Overrrun error */ 440 flag = TTY_OVERRUN; 441 tup->uport.icount.overrun++; 442 dev_err(tup->uport.dev, "Got overrun errors\n"); 443 } else if (lsr & UART_LSR_PE) { 444 /* Parity error */ 445 flag = TTY_PARITY; 446 tup->uport.icount.parity++; 447 dev_err(tup->uport.dev, "Got Parity errors\n"); 448 } else if (lsr & UART_LSR_FE) { 449 flag = TTY_FRAME; 450 tup->uport.icount.frame++; 451 dev_err(tup->uport.dev, "Got frame errors\n"); 452 } else if (lsr & UART_LSR_BI) { 453 /* 454 * Break error 455 * If FIFO read error without any data, reset Rx FIFO 456 */ 457 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE)) 458 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR); 459 if (tup->uport.ignore_status_mask & UART_LSR_BI) 460 return TTY_BREAK; 461 flag = TTY_BREAK; 462 tup->uport.icount.brk++; 463 dev_dbg(tup->uport.dev, "Got Break\n"); 464 } 465 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag); 466 } 467 468 return flag; 469 } 470 471 static int tegra_uart_request_port(struct uart_port *u) 472 { 473 return 0; 474 } 475 476 static void tegra_uart_release_port(struct uart_port *u) 477 { 478 /* Nothing to do here */ 479 } 480 481 static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes) 482 { 483 struct circ_buf *xmit = &tup->uport.state->xmit; 484 int i; 485 486 for (i = 0; i < max_bytes; i++) { 487 BUG_ON(uart_circ_empty(xmit)); 488 if (tup->cdata->tx_fifo_full_status) { 489 unsigned long lsr = tegra_uart_read(tup, UART_LSR); 490 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL)) 491 break; 492 } 493 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); 494 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 495 tup->uport.icount.tx++; 496 } 497 } 498 499 static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup, 500 unsigned int bytes) 501 { 502 if (bytes > TEGRA_UART_MIN_DMA) 503 bytes = TEGRA_UART_MIN_DMA; 504 505 tup->tx_in_progress = TEGRA_UART_TX_PIO; 506 tup->tx_bytes = bytes; 507 tup->ier_shadow |= UART_IER_THRI; 508 tegra_uart_write(tup, tup->ier_shadow, UART_IER); 509 } 510 511 static void tegra_uart_tx_dma_complete(void *args) 512 { 513 struct tegra_uart_port *tup = args; 514 struct circ_buf *xmit = &tup->uport.state->xmit; 515 struct dma_tx_state state; 516 unsigned long flags; 517 unsigned int count; 518 519 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); 520 count = tup->tx_bytes_requested - state.residue; 521 async_tx_ack(tup->tx_dma_desc); 522 spin_lock_irqsave(&tup->uport.lock, flags); 523 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 524 tup->tx_in_progress = 0; 525 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 526 uart_write_wakeup(&tup->uport); 527 tegra_uart_start_next_tx(tup); 528 spin_unlock_irqrestore(&tup->uport.lock, flags); 529 } 530 531 static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup, 532 unsigned long count) 533 { 534 struct circ_buf *xmit = &tup->uport.state->xmit; 535 dma_addr_t tx_phys_addr; 536 537 tup->tx_bytes = count & ~(0xF); 538 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail; 539 540 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr, 541 tup->tx_bytes, DMA_TO_DEVICE); 542 543 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan, 544 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV, 545 DMA_PREP_INTERRUPT); 546 if (!tup->tx_dma_desc) { 547 dev_err(tup->uport.dev, "Not able to get desc for Tx\n"); 548 return -EIO; 549 } 550 551 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete; 552 tup->tx_dma_desc->callback_param = tup; 553 tup->tx_in_progress = TEGRA_UART_TX_DMA; 554 tup->tx_bytes_requested = tup->tx_bytes; 555 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc); 556 dma_async_issue_pending(tup->tx_dma_chan); 557 return 0; 558 } 559 560 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) 561 { 562 unsigned long tail; 563 unsigned long count; 564 struct circ_buf *xmit = &tup->uport.state->xmit; 565 566 if (!tup->current_baud) 567 return; 568 569 tail = (unsigned long)&xmit->buf[xmit->tail]; 570 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 571 if (!count) 572 return; 573 574 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA) 575 tegra_uart_start_pio_tx(tup, count); 576 else if (BYTES_TO_ALIGN(tail) > 0) 577 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail)); 578 else 579 tegra_uart_start_tx_dma(tup, count); 580 } 581 582 /* Called by serial core driver with u->lock taken. */ 583 static void tegra_uart_start_tx(struct uart_port *u) 584 { 585 struct tegra_uart_port *tup = to_tegra_uport(u); 586 struct circ_buf *xmit = &u->state->xmit; 587 588 if (!uart_circ_empty(xmit) && !tup->tx_in_progress) 589 tegra_uart_start_next_tx(tup); 590 } 591 592 static unsigned int tegra_uart_tx_empty(struct uart_port *u) 593 { 594 struct tegra_uart_port *tup = to_tegra_uport(u); 595 unsigned int ret = 0; 596 unsigned long flags; 597 598 spin_lock_irqsave(&u->lock, flags); 599 if (!tup->tx_in_progress) { 600 unsigned long lsr = tegra_uart_read(tup, UART_LSR); 601 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS) 602 ret = TIOCSER_TEMT; 603 } 604 spin_unlock_irqrestore(&u->lock, flags); 605 return ret; 606 } 607 608 static void tegra_uart_stop_tx(struct uart_port *u) 609 { 610 struct tegra_uart_port *tup = to_tegra_uport(u); 611 struct circ_buf *xmit = &tup->uport.state->xmit; 612 struct dma_tx_state state; 613 unsigned int count; 614 615 if (tup->tx_in_progress != TEGRA_UART_TX_DMA) 616 return; 617 618 dmaengine_terminate_all(tup->tx_dma_chan); 619 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); 620 count = tup->tx_bytes_requested - state.residue; 621 async_tx_ack(tup->tx_dma_desc); 622 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 623 tup->tx_in_progress = 0; 624 } 625 626 static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) 627 { 628 struct circ_buf *xmit = &tup->uport.state->xmit; 629 630 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes); 631 tup->tx_in_progress = 0; 632 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 633 uart_write_wakeup(&tup->uport); 634 tegra_uart_start_next_tx(tup); 635 } 636 637 static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, 638 struct tty_port *tty) 639 { 640 do { 641 char flag = TTY_NORMAL; 642 unsigned long lsr = 0; 643 unsigned char ch; 644 645 lsr = tegra_uart_read(tup, UART_LSR); 646 if (!(lsr & UART_LSR_DR)) 647 break; 648 649 flag = tegra_uart_decode_rx_error(tup, lsr); 650 if (flag != TTY_NORMAL) 651 continue; 652 653 ch = (unsigned char) tegra_uart_read(tup, UART_RX); 654 tup->uport.icount.rx++; 655 656 if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) 657 tty_insert_flip_char(tty, ch, flag); 658 659 if (tup->uport.ignore_status_mask & UART_LSR_DR) 660 continue; 661 } while (1); 662 } 663 664 static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, 665 struct tty_port *tty, 666 unsigned int count) 667 { 668 int copied; 669 670 /* If count is zero, then there is no data to be copied */ 671 if (!count) 672 return; 673 674 tup->uport.icount.rx += count; 675 if (!tty) { 676 dev_err(tup->uport.dev, "No tty port\n"); 677 return; 678 } 679 680 if (tup->uport.ignore_status_mask & UART_LSR_DR) 681 return; 682 683 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, 684 count, DMA_FROM_DEVICE); 685 copied = tty_insert_flip_string(tty, 686 ((unsigned char *)(tup->rx_dma_buf_virt)), count); 687 if (copied != count) { 688 WARN_ON(1); 689 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n"); 690 } 691 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys, 692 count, DMA_TO_DEVICE); 693 } 694 695 static void do_handle_rx_pio(struct tegra_uart_port *tup) 696 { 697 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 698 struct tty_port *port = &tup->uport.state->port; 699 700 tegra_uart_handle_rx_pio(tup, port); 701 if (tty) { 702 tty_flip_buffer_push(port); 703 tty_kref_put(tty); 704 } 705 } 706 707 static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup, 708 unsigned int residue) 709 { 710 struct tty_port *port = &tup->uport.state->port; 711 unsigned int count; 712 713 async_tx_ack(tup->rx_dma_desc); 714 count = tup->rx_bytes_requested - residue; 715 716 /* If we are here, DMA is stopped */ 717 tegra_uart_copy_rx_to_tty(tup, port, count); 718 719 do_handle_rx_pio(tup); 720 } 721 722 static void tegra_uart_rx_dma_complete(void *args) 723 { 724 struct tegra_uart_port *tup = args; 725 struct uart_port *u = &tup->uport; 726 unsigned long flags; 727 struct dma_tx_state state; 728 enum dma_status status; 729 730 spin_lock_irqsave(&u->lock, flags); 731 732 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); 733 734 if (status == DMA_IN_PROGRESS) { 735 dev_dbg(tup->uport.dev, "RX DMA is in progress\n"); 736 goto done; 737 } 738 739 /* Deactivate flow control to stop sender */ 740 if (tup->rts_active) 741 set_rts(tup, false); 742 743 tup->rx_dma_active = false; 744 tegra_uart_rx_buffer_push(tup, 0); 745 tegra_uart_start_rx_dma(tup); 746 747 /* Activate flow control to start transfer */ 748 if (tup->rts_active) 749 set_rts(tup, true); 750 751 done: 752 spin_unlock_irqrestore(&u->lock, flags); 753 } 754 755 static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) 756 { 757 struct dma_tx_state state; 758 759 if (!tup->rx_dma_active) { 760 do_handle_rx_pio(tup); 761 return; 762 } 763 764 dmaengine_terminate_all(tup->rx_dma_chan); 765 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); 766 767 tegra_uart_rx_buffer_push(tup, state.residue); 768 tup->rx_dma_active = false; 769 } 770 771 static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup) 772 { 773 /* Deactivate flow control to stop sender */ 774 if (tup->rts_active) 775 set_rts(tup, false); 776 777 tegra_uart_terminate_rx_dma(tup); 778 779 if (tup->rts_active) 780 set_rts(tup, true); 781 } 782 783 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup) 784 { 785 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE; 786 787 if (tup->rx_dma_active) 788 return 0; 789 790 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan, 791 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM, 792 DMA_PREP_INTERRUPT); 793 if (!tup->rx_dma_desc) { 794 dev_err(tup->uport.dev, "Not able to get desc for Rx\n"); 795 return -EIO; 796 } 797 798 tup->rx_dma_active = true; 799 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete; 800 tup->rx_dma_desc->callback_param = tup; 801 tup->rx_bytes_requested = count; 802 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc); 803 dma_async_issue_pending(tup->rx_dma_chan); 804 return 0; 805 } 806 807 static void tegra_uart_handle_modem_signal_change(struct uart_port *u) 808 { 809 struct tegra_uart_port *tup = to_tegra_uport(u); 810 unsigned long msr; 811 812 msr = tegra_uart_read(tup, UART_MSR); 813 if (!(msr & UART_MSR_ANY_DELTA)) 814 return; 815 816 if (msr & UART_MSR_TERI) 817 tup->uport.icount.rng++; 818 if (msr & UART_MSR_DDSR) 819 tup->uport.icount.dsr++; 820 /* We may only get DDCD when HW init and reset */ 821 if (msr & UART_MSR_DDCD) 822 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD); 823 /* Will start/stop_tx accordingly */ 824 if (msr & UART_MSR_DCTS) 825 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS); 826 } 827 828 static irqreturn_t tegra_uart_isr(int irq, void *data) 829 { 830 struct tegra_uart_port *tup = data; 831 struct uart_port *u = &tup->uport; 832 unsigned long iir; 833 unsigned long ier; 834 bool is_rx_start = false; 835 bool is_rx_int = false; 836 unsigned long flags; 837 838 spin_lock_irqsave(&u->lock, flags); 839 while (1) { 840 iir = tegra_uart_read(tup, UART_IIR); 841 if (iir & UART_IIR_NO_INT) { 842 if (!tup->use_rx_pio && is_rx_int) { 843 tegra_uart_handle_rx_dma(tup); 844 if (tup->rx_in_progress) { 845 ier = tup->ier_shadow; 846 ier |= (UART_IER_RLSI | UART_IER_RTOIE | 847 TEGRA_UART_IER_EORD | UART_IER_RDI); 848 tup->ier_shadow = ier; 849 tegra_uart_write(tup, ier, UART_IER); 850 } 851 } else if (is_rx_start) { 852 tegra_uart_start_rx_dma(tup); 853 } 854 spin_unlock_irqrestore(&u->lock, flags); 855 return IRQ_HANDLED; 856 } 857 858 switch ((iir >> 1) & 0x7) { 859 case 0: /* Modem signal change interrupt */ 860 tegra_uart_handle_modem_signal_change(u); 861 break; 862 863 case 1: /* Transmit interrupt only triggered when using PIO */ 864 tup->ier_shadow &= ~UART_IER_THRI; 865 tegra_uart_write(tup, tup->ier_shadow, UART_IER); 866 tegra_uart_handle_tx_pio(tup); 867 break; 868 869 case 4: /* End of data */ 870 case 6: /* Rx timeout */ 871 if (!tup->use_rx_pio) { 872 is_rx_int = tup->rx_in_progress; 873 /* Disable Rx interrupts */ 874 ier = tup->ier_shadow; 875 ier &= ~(UART_IER_RDI | UART_IER_RLSI | 876 UART_IER_RTOIE | TEGRA_UART_IER_EORD); 877 tup->ier_shadow = ier; 878 tegra_uart_write(tup, ier, UART_IER); 879 break; 880 } 881 /* Fall through */ 882 case 2: /* Receive */ 883 if (!tup->use_rx_pio) { 884 is_rx_start = tup->rx_in_progress; 885 tup->ier_shadow &= ~UART_IER_RDI; 886 tegra_uart_write(tup, tup->ier_shadow, 887 UART_IER); 888 } else { 889 do_handle_rx_pio(tup); 890 } 891 break; 892 893 case 3: /* Receive error */ 894 tegra_uart_decode_rx_error(tup, 895 tegra_uart_read(tup, UART_LSR)); 896 break; 897 898 case 5: /* break nothing to handle */ 899 case 7: /* break nothing to handle */ 900 break; 901 } 902 } 903 } 904 905 static void tegra_uart_stop_rx(struct uart_port *u) 906 { 907 struct tegra_uart_port *tup = to_tegra_uport(u); 908 struct tty_port *port = &tup->uport.state->port; 909 unsigned long ier; 910 911 if (tup->rts_active) 912 set_rts(tup, false); 913 914 if (!tup->rx_in_progress) 915 return; 916 917 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */ 918 919 ier = tup->ier_shadow; 920 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | 921 TEGRA_UART_IER_EORD); 922 tup->ier_shadow = ier; 923 tegra_uart_write(tup, ier, UART_IER); 924 tup->rx_in_progress = 0; 925 926 if (!tup->use_rx_pio) 927 tegra_uart_terminate_rx_dma(tup); 928 else 929 tegra_uart_handle_rx_pio(tup, port); 930 } 931 932 static void tegra_uart_hw_deinit(struct tegra_uart_port *tup) 933 { 934 unsigned long flags; 935 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud); 936 unsigned long fifo_empty_time = tup->uport.fifosize * char_time; 937 unsigned long wait_time; 938 unsigned long lsr; 939 unsigned long msr; 940 unsigned long mcr; 941 942 /* Disable interrupts */ 943 tegra_uart_write(tup, 0, UART_IER); 944 945 lsr = tegra_uart_read(tup, UART_LSR); 946 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { 947 msr = tegra_uart_read(tup, UART_MSR); 948 mcr = tegra_uart_read(tup, UART_MCR); 949 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS)) 950 dev_err(tup->uport.dev, 951 "Tx Fifo not empty, CTS disabled, waiting\n"); 952 953 /* Wait for Tx fifo to be empty */ 954 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { 955 wait_time = min(fifo_empty_time, 100lu); 956 udelay(wait_time); 957 fifo_empty_time -= wait_time; 958 if (!fifo_empty_time) { 959 msr = tegra_uart_read(tup, UART_MSR); 960 mcr = tegra_uart_read(tup, UART_MCR); 961 if ((mcr & TEGRA_UART_MCR_CTS_EN) && 962 (msr & UART_MSR_CTS)) 963 dev_err(tup->uport.dev, 964 "Slave not ready\n"); 965 break; 966 } 967 lsr = tegra_uart_read(tup, UART_LSR); 968 } 969 } 970 971 spin_lock_irqsave(&tup->uport.lock, flags); 972 /* Reset the Rx and Tx FIFOs */ 973 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); 974 tup->current_baud = 0; 975 spin_unlock_irqrestore(&tup->uport.lock, flags); 976 977 tup->rx_in_progress = 0; 978 tup->tx_in_progress = 0; 979 980 if (!tup->use_rx_pio) 981 tegra_uart_dma_channel_free(tup, true); 982 if (!tup->use_tx_pio) 983 tegra_uart_dma_channel_free(tup, false); 984 985 clk_disable_unprepare(tup->uart_clk); 986 } 987 988 static int tegra_uart_hw_init(struct tegra_uart_port *tup) 989 { 990 int ret; 991 992 tup->fcr_shadow = 0; 993 tup->mcr_shadow = 0; 994 tup->lcr_shadow = 0; 995 tup->ier_shadow = 0; 996 tup->current_baud = 0; 997 998 clk_prepare_enable(tup->uart_clk); 999 1000 /* Reset the UART controller to clear all previous status.*/ 1001 reset_control_assert(tup->rst); 1002 udelay(10); 1003 reset_control_deassert(tup->rst); 1004 1005 tup->rx_in_progress = 0; 1006 tup->tx_in_progress = 0; 1007 1008 /* 1009 * Set the trigger level 1010 * 1011 * For PIO mode: 1012 * 1013 * For receive, this will interrupt the CPU after that many number of 1014 * bytes are received, for the remaining bytes the receive timeout 1015 * interrupt is received. Rx high watermark is set to 4. 1016 * 1017 * For transmit, if the trasnmit interrupt is enabled, this will 1018 * interrupt the CPU when the number of entries in the FIFO reaches the 1019 * low watermark. Tx low watermark is set to 16 bytes. 1020 * 1021 * For DMA mode: 1022 * 1023 * Set the Tx trigger to 16. This should match the DMA burst size that 1024 * programmed in the DMA registers. 1025 */ 1026 tup->fcr_shadow = UART_FCR_ENABLE_FIFO; 1027 1028 if (tup->use_rx_pio) { 1029 tup->fcr_shadow |= UART_FCR_R_TRIG_11; 1030 } else { 1031 if (tup->cdata->max_dma_burst_bytes == 8) 1032 tup->fcr_shadow |= UART_FCR_R_TRIG_10; 1033 else 1034 tup->fcr_shadow |= UART_FCR_R_TRIG_01; 1035 } 1036 1037 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B; 1038 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); 1039 1040 /* Dummy read to ensure the write is posted */ 1041 tegra_uart_read(tup, UART_SCR); 1042 1043 if (tup->cdata->fifo_mode_enable_status) { 1044 ret = tegra_uart_wait_fifo_mode_enabled(tup); 1045 dev_err(tup->uport.dev, "FIFO mode not enabled\n"); 1046 if (ret < 0) 1047 return ret; 1048 } else { 1049 /* 1050 * For all tegra devices (up to t210), there is a hardware 1051 * issue that requires software to wait for 3 UART clock 1052 * periods after enabling the TX fifo, otherwise data could 1053 * be lost. 1054 */ 1055 tegra_uart_wait_cycle_time(tup, 3); 1056 } 1057 1058 /* 1059 * Initialize the UART with default configuration 1060 * (115200, N, 8, 1) so that the receive DMA buffer may be 1061 * enqueued 1062 */ 1063 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD); 1064 if (ret < 0) { 1065 dev_err(tup->uport.dev, "Failed to set baud rate\n"); 1066 return ret; 1067 } 1068 if (!tup->use_rx_pio) { 1069 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR; 1070 tup->fcr_shadow |= UART_FCR_DMA_SELECT; 1071 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); 1072 } else { 1073 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); 1074 } 1075 tup->rx_in_progress = 1; 1076 1077 /* 1078 * Enable IE_RXS for the receive status interrupts like line errros. 1079 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd. 1080 * 1081 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when 1082 * the DATA is sitting in the FIFO and couldn't be transferred to the 1083 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be 1084 * triggered when there is a pause of the incomming data stream for 4 1085 * characters long. 1086 * 1087 * For pauses in the data which is not aligned to 4 bytes, we get 1088 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first 1089 * then the EORD. 1090 */ 1091 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI; 1092 1093 /* 1094 * If using DMA mode, enable EORD interrupt to notify about RX 1095 * completion. 1096 */ 1097 if (!tup->use_rx_pio) 1098 tup->ier_shadow |= TEGRA_UART_IER_EORD; 1099 1100 tegra_uart_write(tup, tup->ier_shadow, UART_IER); 1101 return 0; 1102 } 1103 1104 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, 1105 bool dma_to_memory) 1106 { 1107 if (dma_to_memory) { 1108 dmaengine_terminate_all(tup->rx_dma_chan); 1109 dma_release_channel(tup->rx_dma_chan); 1110 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE, 1111 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys); 1112 tup->rx_dma_chan = NULL; 1113 tup->rx_dma_buf_phys = 0; 1114 tup->rx_dma_buf_virt = NULL; 1115 } else { 1116 dmaengine_terminate_all(tup->tx_dma_chan); 1117 dma_release_channel(tup->tx_dma_chan); 1118 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys, 1119 UART_XMIT_SIZE, DMA_TO_DEVICE); 1120 tup->tx_dma_chan = NULL; 1121 tup->tx_dma_buf_phys = 0; 1122 tup->tx_dma_buf_virt = NULL; 1123 } 1124 } 1125 1126 static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup, 1127 bool dma_to_memory) 1128 { 1129 struct dma_chan *dma_chan; 1130 unsigned char *dma_buf; 1131 dma_addr_t dma_phys; 1132 int ret; 1133 struct dma_slave_config dma_sconfig; 1134 1135 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx"); 1136 if (IS_ERR(dma_chan)) { 1137 ret = PTR_ERR(dma_chan); 1138 dev_err(tup->uport.dev, 1139 "DMA channel alloc failed: %d\n", ret); 1140 return ret; 1141 } 1142 1143 if (dma_to_memory) { 1144 dma_buf = dma_alloc_coherent(tup->uport.dev, 1145 TEGRA_UART_RX_DMA_BUFFER_SIZE, 1146 &dma_phys, GFP_KERNEL); 1147 if (!dma_buf) { 1148 dev_err(tup->uport.dev, 1149 "Not able to allocate the dma buffer\n"); 1150 dma_release_channel(dma_chan); 1151 return -ENOMEM; 1152 } 1153 dma_sync_single_for_device(tup->uport.dev, dma_phys, 1154 TEGRA_UART_RX_DMA_BUFFER_SIZE, 1155 DMA_TO_DEVICE); 1156 dma_sconfig.src_addr = tup->uport.mapbase; 1157 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1158 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes; 1159 tup->rx_dma_chan = dma_chan; 1160 tup->rx_dma_buf_virt = dma_buf; 1161 tup->rx_dma_buf_phys = dma_phys; 1162 } else { 1163 dma_phys = dma_map_single(tup->uport.dev, 1164 tup->uport.state->xmit.buf, UART_XMIT_SIZE, 1165 DMA_TO_DEVICE); 1166 if (dma_mapping_error(tup->uport.dev, dma_phys)) { 1167 dev_err(tup->uport.dev, "dma_map_single tx failed\n"); 1168 dma_release_channel(dma_chan); 1169 return -ENOMEM; 1170 } 1171 dma_buf = tup->uport.state->xmit.buf; 1172 dma_sconfig.dst_addr = tup->uport.mapbase; 1173 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1174 dma_sconfig.dst_maxburst = 16; 1175 tup->tx_dma_chan = dma_chan; 1176 tup->tx_dma_buf_virt = dma_buf; 1177 tup->tx_dma_buf_phys = dma_phys; 1178 } 1179 1180 ret = dmaengine_slave_config(dma_chan, &dma_sconfig); 1181 if (ret < 0) { 1182 dev_err(tup->uport.dev, 1183 "Dma slave config failed, err = %d\n", ret); 1184 tegra_uart_dma_channel_free(tup, dma_to_memory); 1185 return ret; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int tegra_uart_startup(struct uart_port *u) 1192 { 1193 struct tegra_uart_port *tup = to_tegra_uport(u); 1194 int ret; 1195 1196 if (!tup->use_tx_pio) { 1197 ret = tegra_uart_dma_channel_allocate(tup, false); 1198 if (ret < 0) { 1199 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", 1200 ret); 1201 return ret; 1202 } 1203 } 1204 1205 if (!tup->use_rx_pio) { 1206 ret = tegra_uart_dma_channel_allocate(tup, true); 1207 if (ret < 0) { 1208 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", 1209 ret); 1210 goto fail_rx_dma; 1211 } 1212 } 1213 1214 ret = tegra_uart_hw_init(tup); 1215 if (ret < 0) { 1216 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret); 1217 goto fail_hw_init; 1218 } 1219 1220 ret = request_irq(u->irq, tegra_uart_isr, 0, 1221 dev_name(u->dev), tup); 1222 if (ret < 0) { 1223 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq); 1224 goto fail_hw_init; 1225 } 1226 return 0; 1227 1228 fail_hw_init: 1229 if (!tup->use_rx_pio) 1230 tegra_uart_dma_channel_free(tup, true); 1231 fail_rx_dma: 1232 if (!tup->use_tx_pio) 1233 tegra_uart_dma_channel_free(tup, false); 1234 return ret; 1235 } 1236 1237 /* 1238 * Flush any TX data submitted for DMA and PIO. Called when the 1239 * TX circular buffer is reset. 1240 */ 1241 static void tegra_uart_flush_buffer(struct uart_port *u) 1242 { 1243 struct tegra_uart_port *tup = to_tegra_uport(u); 1244 1245 tup->tx_bytes = 0; 1246 if (tup->tx_dma_chan) 1247 dmaengine_terminate_all(tup->tx_dma_chan); 1248 } 1249 1250 static void tegra_uart_shutdown(struct uart_port *u) 1251 { 1252 struct tegra_uart_port *tup = to_tegra_uport(u); 1253 1254 tegra_uart_hw_deinit(tup); 1255 free_irq(u->irq, tup); 1256 } 1257 1258 static void tegra_uart_enable_ms(struct uart_port *u) 1259 { 1260 struct tegra_uart_port *tup = to_tegra_uport(u); 1261 1262 if (tup->enable_modem_interrupt) { 1263 tup->ier_shadow |= UART_IER_MSI; 1264 tegra_uart_write(tup, tup->ier_shadow, UART_IER); 1265 } 1266 } 1267 1268 static void tegra_uart_set_termios(struct uart_port *u, 1269 struct ktermios *termios, struct ktermios *oldtermios) 1270 { 1271 struct tegra_uart_port *tup = to_tegra_uport(u); 1272 unsigned int baud; 1273 unsigned long flags; 1274 unsigned int lcr; 1275 int symb_bit = 1; 1276 struct clk *parent_clk = clk_get_parent(tup->uart_clk); 1277 unsigned long parent_clk_rate = clk_get_rate(parent_clk); 1278 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF; 1279 int ret; 1280 1281 max_divider *= 16; 1282 spin_lock_irqsave(&u->lock, flags); 1283 1284 /* Changing configuration, it is safe to stop any rx now */ 1285 if (tup->rts_active) 1286 set_rts(tup, false); 1287 1288 /* Clear all interrupts as configuration is going to be changed */ 1289 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER); 1290 tegra_uart_read(tup, UART_IER); 1291 tegra_uart_write(tup, 0, UART_IER); 1292 tegra_uart_read(tup, UART_IER); 1293 1294 /* Parity */ 1295 lcr = tup->lcr_shadow; 1296 lcr &= ~UART_LCR_PARITY; 1297 1298 /* CMSPAR isn't supported by this driver */ 1299 termios->c_cflag &= ~CMSPAR; 1300 1301 if ((termios->c_cflag & PARENB) == PARENB) { 1302 symb_bit++; 1303 if (termios->c_cflag & PARODD) { 1304 lcr |= UART_LCR_PARITY; 1305 lcr &= ~UART_LCR_EPAR; 1306 lcr &= ~UART_LCR_SPAR; 1307 } else { 1308 lcr |= UART_LCR_PARITY; 1309 lcr |= UART_LCR_EPAR; 1310 lcr &= ~UART_LCR_SPAR; 1311 } 1312 } 1313 1314 lcr &= ~UART_LCR_WLEN8; 1315 switch (termios->c_cflag & CSIZE) { 1316 case CS5: 1317 lcr |= UART_LCR_WLEN5; 1318 symb_bit += 5; 1319 break; 1320 case CS6: 1321 lcr |= UART_LCR_WLEN6; 1322 symb_bit += 6; 1323 break; 1324 case CS7: 1325 lcr |= UART_LCR_WLEN7; 1326 symb_bit += 7; 1327 break; 1328 default: 1329 lcr |= UART_LCR_WLEN8; 1330 symb_bit += 8; 1331 break; 1332 } 1333 1334 /* Stop bits */ 1335 if (termios->c_cflag & CSTOPB) { 1336 lcr |= UART_LCR_STOP; 1337 symb_bit += 2; 1338 } else { 1339 lcr &= ~UART_LCR_STOP; 1340 symb_bit++; 1341 } 1342 1343 tegra_uart_write(tup, lcr, UART_LCR); 1344 tup->lcr_shadow = lcr; 1345 tup->symb_bit = symb_bit; 1346 1347 /* Baud rate. */ 1348 baud = uart_get_baud_rate(u, termios, oldtermios, 1349 parent_clk_rate/max_divider, 1350 parent_clk_rate/16); 1351 spin_unlock_irqrestore(&u->lock, flags); 1352 ret = tegra_set_baudrate(tup, baud); 1353 if (ret < 0) { 1354 dev_err(tup->uport.dev, "Failed to set baud rate\n"); 1355 return; 1356 } 1357 if (tty_termios_baud_rate(termios)) 1358 tty_termios_encode_baud_rate(termios, baud, baud); 1359 spin_lock_irqsave(&u->lock, flags); 1360 1361 /* Flow control */ 1362 if (termios->c_cflag & CRTSCTS) { 1363 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN; 1364 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; 1365 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); 1366 /* if top layer has asked to set rts active then do so here */ 1367 if (tup->rts_active) 1368 set_rts(tup, true); 1369 } else { 1370 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN; 1371 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; 1372 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); 1373 } 1374 1375 /* update the port timeout based on new settings */ 1376 uart_update_timeout(u, termios->c_cflag, baud); 1377 1378 /* Make sure all writes have completed */ 1379 tegra_uart_read(tup, UART_IER); 1380 1381 /* Re-enable interrupt */ 1382 tegra_uart_write(tup, tup->ier_shadow, UART_IER); 1383 tegra_uart_read(tup, UART_IER); 1384 1385 tup->uport.ignore_status_mask = 0; 1386 /* Ignore all characters if CREAD is not set */ 1387 if ((termios->c_cflag & CREAD) == 0) 1388 tup->uport.ignore_status_mask |= UART_LSR_DR; 1389 if (termios->c_iflag & IGNBRK) 1390 tup->uport.ignore_status_mask |= UART_LSR_BI; 1391 1392 spin_unlock_irqrestore(&u->lock, flags); 1393 } 1394 1395 static const char *tegra_uart_type(struct uart_port *u) 1396 { 1397 return TEGRA_UART_TYPE; 1398 } 1399 1400 static const struct uart_ops tegra_uart_ops = { 1401 .tx_empty = tegra_uart_tx_empty, 1402 .set_mctrl = tegra_uart_set_mctrl, 1403 .get_mctrl = tegra_uart_get_mctrl, 1404 .stop_tx = tegra_uart_stop_tx, 1405 .start_tx = tegra_uart_start_tx, 1406 .stop_rx = tegra_uart_stop_rx, 1407 .flush_buffer = tegra_uart_flush_buffer, 1408 .enable_ms = tegra_uart_enable_ms, 1409 .break_ctl = tegra_uart_break_ctl, 1410 .startup = tegra_uart_startup, 1411 .shutdown = tegra_uart_shutdown, 1412 .set_termios = tegra_uart_set_termios, 1413 .type = tegra_uart_type, 1414 .request_port = tegra_uart_request_port, 1415 .release_port = tegra_uart_release_port, 1416 }; 1417 1418 static struct uart_driver tegra_uart_driver = { 1419 .owner = THIS_MODULE, 1420 .driver_name = "tegra_hsuart", 1421 .dev_name = "ttyTHS", 1422 .cons = NULL, 1423 .nr = TEGRA_UART_MAXIMUM, 1424 }; 1425 1426 static int tegra_uart_parse_dt(struct platform_device *pdev, 1427 struct tegra_uart_port *tup) 1428 { 1429 struct device_node *np = pdev->dev.of_node; 1430 int port; 1431 int ret; 1432 int index; 1433 u32 pval; 1434 int count; 1435 int n_entries; 1436 1437 port = of_alias_get_id(np, "serial"); 1438 if (port < 0) { 1439 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port); 1440 return port; 1441 } 1442 tup->uport.line = port; 1443 1444 tup->enable_modem_interrupt = of_property_read_bool(np, 1445 "nvidia,enable-modem-interrupt"); 1446 1447 index = of_property_match_string(np, "dma-names", "rx"); 1448 if (index < 0) { 1449 tup->use_rx_pio = true; 1450 dev_info(&pdev->dev, "RX in PIO mode\n"); 1451 } 1452 index = of_property_match_string(np, "dma-names", "tx"); 1453 if (index < 0) { 1454 tup->use_tx_pio = true; 1455 dev_info(&pdev->dev, "TX in PIO mode\n"); 1456 } 1457 1458 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates"); 1459 if (n_entries > 0) { 1460 tup->n_adjustable_baud_rates = n_entries / 3; 1461 tup->baud_tolerance = 1462 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) * 1463 sizeof(*tup->baud_tolerance), GFP_KERNEL); 1464 if (!tup->baud_tolerance) 1465 return -ENOMEM; 1466 for (count = 0, index = 0; count < n_entries; count += 3, 1467 index++) { 1468 ret = 1469 of_property_read_u32_index(np, 1470 "nvidia,adjust-baud-rates", 1471 count, &pval); 1472 if (!ret) 1473 tup->baud_tolerance[index].lower_range_baud = 1474 pval; 1475 ret = 1476 of_property_read_u32_index(np, 1477 "nvidia,adjust-baud-rates", 1478 count + 1, &pval); 1479 if (!ret) 1480 tup->baud_tolerance[index].upper_range_baud = 1481 pval; 1482 ret = 1483 of_property_read_u32_index(np, 1484 "nvidia,adjust-baud-rates", 1485 count + 2, &pval); 1486 if (!ret) 1487 tup->baud_tolerance[index].tolerance = 1488 (s32)pval; 1489 } 1490 } else { 1491 tup->n_adjustable_baud_rates = 0; 1492 } 1493 1494 return 0; 1495 } 1496 1497 static struct tegra_uart_chip_data tegra20_uart_chip_data = { 1498 .tx_fifo_full_status = false, 1499 .allow_txfifo_reset_fifo_mode = true, 1500 .support_clk_src_div = false, 1501 .fifo_mode_enable_status = false, 1502 .uart_max_port = 5, 1503 .max_dma_burst_bytes = 4, 1504 .error_tolerance_low_range = 0, 1505 .error_tolerance_high_range = 4, 1506 }; 1507 1508 static struct tegra_uart_chip_data tegra30_uart_chip_data = { 1509 .tx_fifo_full_status = true, 1510 .allow_txfifo_reset_fifo_mode = false, 1511 .support_clk_src_div = true, 1512 .fifo_mode_enable_status = false, 1513 .uart_max_port = 5, 1514 .max_dma_burst_bytes = 4, 1515 .error_tolerance_low_range = 0, 1516 .error_tolerance_high_range = 4, 1517 }; 1518 1519 static struct tegra_uart_chip_data tegra186_uart_chip_data = { 1520 .tx_fifo_full_status = true, 1521 .allow_txfifo_reset_fifo_mode = false, 1522 .support_clk_src_div = true, 1523 .fifo_mode_enable_status = true, 1524 .uart_max_port = 8, 1525 .max_dma_burst_bytes = 8, 1526 .error_tolerance_low_range = 0, 1527 .error_tolerance_high_range = 4, 1528 }; 1529 1530 static struct tegra_uart_chip_data tegra194_uart_chip_data = { 1531 .tx_fifo_full_status = true, 1532 .allow_txfifo_reset_fifo_mode = false, 1533 .support_clk_src_div = true, 1534 .fifo_mode_enable_status = true, 1535 .uart_max_port = 8, 1536 .max_dma_burst_bytes = 8, 1537 .error_tolerance_low_range = -2, 1538 .error_tolerance_high_range = 2, 1539 }; 1540 1541 static const struct of_device_id tegra_uart_of_match[] = { 1542 { 1543 .compatible = "nvidia,tegra30-hsuart", 1544 .data = &tegra30_uart_chip_data, 1545 }, { 1546 .compatible = "nvidia,tegra20-hsuart", 1547 .data = &tegra20_uart_chip_data, 1548 }, { 1549 .compatible = "nvidia,tegra186-hsuart", 1550 .data = &tegra186_uart_chip_data, 1551 }, { 1552 .compatible = "nvidia,tegra194-hsuart", 1553 .data = &tegra194_uart_chip_data, 1554 }, { 1555 }, 1556 }; 1557 MODULE_DEVICE_TABLE(of, tegra_uart_of_match); 1558 1559 static int tegra_uart_probe(struct platform_device *pdev) 1560 { 1561 struct tegra_uart_port *tup; 1562 struct uart_port *u; 1563 struct resource *resource; 1564 int ret; 1565 const struct tegra_uart_chip_data *cdata; 1566 const struct of_device_id *match; 1567 1568 match = of_match_device(tegra_uart_of_match, &pdev->dev); 1569 if (!match) { 1570 dev_err(&pdev->dev, "Error: No device match found\n"); 1571 return -ENODEV; 1572 } 1573 cdata = match->data; 1574 1575 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL); 1576 if (!tup) { 1577 dev_err(&pdev->dev, "Failed to allocate memory for tup\n"); 1578 return -ENOMEM; 1579 } 1580 1581 ret = tegra_uart_parse_dt(pdev, tup); 1582 if (ret < 0) 1583 return ret; 1584 1585 u = &tup->uport; 1586 u->dev = &pdev->dev; 1587 u->ops = &tegra_uart_ops; 1588 u->type = PORT_TEGRA; 1589 u->fifosize = 32; 1590 tup->cdata = cdata; 1591 1592 platform_set_drvdata(pdev, tup); 1593 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1594 if (!resource) { 1595 dev_err(&pdev->dev, "No IO memory resource\n"); 1596 return -ENODEV; 1597 } 1598 1599 u->mapbase = resource->start; 1600 u->membase = devm_ioremap_resource(&pdev->dev, resource); 1601 if (IS_ERR(u->membase)) 1602 return PTR_ERR(u->membase); 1603 1604 tup->uart_clk = devm_clk_get(&pdev->dev, NULL); 1605 if (IS_ERR(tup->uart_clk)) { 1606 dev_err(&pdev->dev, "Couldn't get the clock\n"); 1607 return PTR_ERR(tup->uart_clk); 1608 } 1609 1610 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial"); 1611 if (IS_ERR(tup->rst)) { 1612 dev_err(&pdev->dev, "Couldn't get the reset\n"); 1613 return PTR_ERR(tup->rst); 1614 } 1615 1616 u->iotype = UPIO_MEM32; 1617 ret = platform_get_irq(pdev, 0); 1618 if (ret < 0) 1619 return ret; 1620 u->irq = ret; 1621 u->regshift = 2; 1622 ret = uart_add_one_port(&tegra_uart_driver, u); 1623 if (ret < 0) { 1624 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret); 1625 return ret; 1626 } 1627 return ret; 1628 } 1629 1630 static int tegra_uart_remove(struct platform_device *pdev) 1631 { 1632 struct tegra_uart_port *tup = platform_get_drvdata(pdev); 1633 struct uart_port *u = &tup->uport; 1634 1635 uart_remove_one_port(&tegra_uart_driver, u); 1636 return 0; 1637 } 1638 1639 #ifdef CONFIG_PM_SLEEP 1640 static int tegra_uart_suspend(struct device *dev) 1641 { 1642 struct tegra_uart_port *tup = dev_get_drvdata(dev); 1643 struct uart_port *u = &tup->uport; 1644 1645 return uart_suspend_port(&tegra_uart_driver, u); 1646 } 1647 1648 static int tegra_uart_resume(struct device *dev) 1649 { 1650 struct tegra_uart_port *tup = dev_get_drvdata(dev); 1651 struct uart_port *u = &tup->uport; 1652 1653 return uart_resume_port(&tegra_uart_driver, u); 1654 } 1655 #endif 1656 1657 static const struct dev_pm_ops tegra_uart_pm_ops = { 1658 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume) 1659 }; 1660 1661 static struct platform_driver tegra_uart_platform_driver = { 1662 .probe = tegra_uart_probe, 1663 .remove = tegra_uart_remove, 1664 .driver = { 1665 .name = "serial-tegra", 1666 .of_match_table = tegra_uart_of_match, 1667 .pm = &tegra_uart_pm_ops, 1668 }, 1669 }; 1670 1671 static int __init tegra_uart_init(void) 1672 { 1673 int ret; 1674 struct device_node *node; 1675 const struct of_device_id *match = NULL; 1676 const struct tegra_uart_chip_data *cdata = NULL; 1677 1678 node = of_find_matching_node(NULL, tegra_uart_of_match); 1679 if (node) 1680 match = of_match_node(tegra_uart_of_match, node); 1681 if (match) 1682 cdata = match->data; 1683 if (cdata) 1684 tegra_uart_driver.nr = cdata->uart_max_port; 1685 1686 ret = uart_register_driver(&tegra_uart_driver); 1687 if (ret < 0) { 1688 pr_err("Could not register %s driver\n", 1689 tegra_uart_driver.driver_name); 1690 return ret; 1691 } 1692 1693 ret = platform_driver_register(&tegra_uart_platform_driver); 1694 if (ret < 0) { 1695 pr_err("Uart platform driver register failed, e = %d\n", ret); 1696 uart_unregister_driver(&tegra_uart_driver); 1697 return ret; 1698 } 1699 return 0; 1700 } 1701 1702 static void __exit tegra_uart_exit(void) 1703 { 1704 pr_info("Unloading tegra uart driver\n"); 1705 platform_driver_unregister(&tegra_uart_platform_driver); 1706 uart_unregister_driver(&tegra_uart_driver); 1707 } 1708 1709 module_init(tegra_uart_init); 1710 module_exit(tegra_uart_exit); 1711 1712 MODULE_ALIAS("platform:serial-tegra"); 1713 MODULE_DESCRIPTION("High speed UART driver for tegra chipset"); 1714 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1715 MODULE_LICENSE("GPL v2"); 1716