1 /* 2 * Driver for Atmel AT91 / AT32 Serial ports 3 * Copyright (C) 2003 Rick Bronson 4 * 5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 7 * 8 * DMA support added by Chip Coldwell. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 */ 25 #include <linux/module.h> 26 #include <linux/tty.h> 27 #include <linux/ioport.h> 28 #include <linux/slab.h> 29 #include <linux/init.h> 30 #include <linux/serial.h> 31 #include <linux/clk.h> 32 #include <linux/console.h> 33 #include <linux/sysrq.h> 34 #include <linux/tty_flip.h> 35 #include <linux/platform_device.h> 36 #include <linux/of.h> 37 #include <linux/of_device.h> 38 #include <linux/of_gpio.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/dmaengine.h> 41 #include <linux/atmel_pdc.h> 42 #include <linux/atmel_serial.h> 43 #include <linux/uaccess.h> 44 #include <linux/platform_data/atmel.h> 45 #include <linux/timer.h> 46 #include <linux/gpio.h> 47 #include <linux/gpio/consumer.h> 48 #include <linux/err.h> 49 #include <linux/irq.h> 50 #include <linux/suspend.h> 51 52 #include <asm/io.h> 53 #include <asm/ioctls.h> 54 55 #define PDC_BUFFER_SIZE 512 56 /* Revisit: We should calculate this based on the actual port settings */ 57 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 58 59 /* The minium number of data FIFOs should be able to contain */ 60 #define ATMEL_MIN_FIFO_SIZE 8 61 /* 62 * These two offsets are substracted from the RX FIFO size to define the RTS 63 * high and low thresholds 64 */ 65 #define ATMEL_RTS_HIGH_OFFSET 16 66 #define ATMEL_RTS_LOW_OFFSET 20 67 68 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 69 #define SUPPORT_SYSRQ 70 #endif 71 72 #include <linux/serial_core.h> 73 74 #include "serial_mctrl_gpio.h" 75 76 static void atmel_start_rx(struct uart_port *port); 77 static void atmel_stop_rx(struct uart_port *port); 78 79 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 80 81 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 82 * should coexist with the 8250 driver, such as if we have an external 16C550 83 * UART. */ 84 #define SERIAL_ATMEL_MAJOR 204 85 #define MINOR_START 154 86 #define ATMEL_DEVICENAME "ttyAT" 87 88 #else 89 90 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 91 * name, but it is legally reserved for the 8250 driver. */ 92 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 93 #define MINOR_START 64 94 #define ATMEL_DEVICENAME "ttyS" 95 96 #endif 97 98 #define ATMEL_ISR_PASS_LIMIT 256 99 100 struct atmel_dma_buffer { 101 unsigned char *buf; 102 dma_addr_t dma_addr; 103 unsigned int dma_size; 104 unsigned int ofs; 105 }; 106 107 struct atmel_uart_char { 108 u16 status; 109 u16 ch; 110 }; 111 112 #define ATMEL_SERIAL_RINGSIZE 1024 113 114 /* 115 * We wrap our port structure around the generic uart_port. 116 */ 117 struct atmel_uart_port { 118 struct uart_port uart; /* uart */ 119 struct clk *clk; /* uart clock */ 120 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 121 u32 backup_imr; /* IMR saved during suspend */ 122 int break_active; /* break being received */ 123 124 bool use_dma_rx; /* enable DMA receiver */ 125 bool use_pdc_rx; /* enable PDC receiver */ 126 short pdc_rx_idx; /* current PDC RX buffer */ 127 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 128 129 bool use_dma_tx; /* enable DMA transmitter */ 130 bool use_pdc_tx; /* enable PDC transmitter */ 131 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 132 133 spinlock_t lock_tx; /* port lock */ 134 spinlock_t lock_rx; /* port lock */ 135 struct dma_chan *chan_tx; 136 struct dma_chan *chan_rx; 137 struct dma_async_tx_descriptor *desc_tx; 138 struct dma_async_tx_descriptor *desc_rx; 139 dma_cookie_t cookie_tx; 140 dma_cookie_t cookie_rx; 141 struct scatterlist sg_tx; 142 struct scatterlist sg_rx; 143 struct tasklet_struct tasklet; 144 unsigned int irq_status; 145 unsigned int irq_status_prev; 146 unsigned int status_change; 147 unsigned int tx_len; 148 149 struct circ_buf rx_ring; 150 151 struct mctrl_gpios *gpios; 152 int gpio_irq[UART_GPIO_MAX]; 153 unsigned int tx_done_mask; 154 u32 fifo_size; 155 u32 rts_high; 156 u32 rts_low; 157 bool ms_irq_enabled; 158 bool is_usart; /* usart or uart */ 159 struct timer_list uart_timer; /* uart timer */ 160 161 bool suspended; 162 unsigned int pending; 163 unsigned int pending_status; 164 spinlock_t lock_suspended; 165 166 int (*prepare_rx)(struct uart_port *port); 167 int (*prepare_tx)(struct uart_port *port); 168 void (*schedule_rx)(struct uart_port *port); 169 void (*schedule_tx)(struct uart_port *port); 170 void (*release_rx)(struct uart_port *port); 171 void (*release_tx)(struct uart_port *port); 172 }; 173 174 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 175 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 176 177 #ifdef SUPPORT_SYSRQ 178 static struct console atmel_console; 179 #endif 180 181 #if defined(CONFIG_OF) 182 static const struct of_device_id atmel_serial_dt_ids[] = { 183 { .compatible = "atmel,at91rm9200-usart" }, 184 { .compatible = "atmel,at91sam9260-usart" }, 185 { /* sentinel */ } 186 }; 187 188 MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids); 189 #endif 190 191 static inline struct atmel_uart_port * 192 to_atmel_uart_port(struct uart_port *uart) 193 { 194 return container_of(uart, struct atmel_uart_port, uart); 195 } 196 197 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 198 { 199 return __raw_readl(port->membase + reg); 200 } 201 202 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 203 { 204 __raw_writel(value, port->membase + reg); 205 } 206 207 #ifdef CONFIG_AVR32 208 209 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */ 210 static inline u8 atmel_uart_read_char(struct uart_port *port) 211 { 212 return __raw_readl(port->membase + ATMEL_US_RHR); 213 } 214 215 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 216 { 217 __raw_writel(value, port->membase + ATMEL_US_THR); 218 } 219 220 #else 221 222 static inline u8 atmel_uart_read_char(struct uart_port *port) 223 { 224 return __raw_readb(port->membase + ATMEL_US_RHR); 225 } 226 227 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 228 { 229 __raw_writeb(value, port->membase + ATMEL_US_THR); 230 } 231 232 #endif 233 234 #ifdef CONFIG_SERIAL_ATMEL_PDC 235 static bool atmel_use_pdc_rx(struct uart_port *port) 236 { 237 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 238 239 return atmel_port->use_pdc_rx; 240 } 241 242 static bool atmel_use_pdc_tx(struct uart_port *port) 243 { 244 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 245 246 return atmel_port->use_pdc_tx; 247 } 248 #else 249 static bool atmel_use_pdc_rx(struct uart_port *port) 250 { 251 return false; 252 } 253 254 static bool atmel_use_pdc_tx(struct uart_port *port) 255 { 256 return false; 257 } 258 #endif 259 260 static bool atmel_use_dma_tx(struct uart_port *port) 261 { 262 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 263 264 return atmel_port->use_dma_tx; 265 } 266 267 static bool atmel_use_dma_rx(struct uart_port *port) 268 { 269 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 270 271 return atmel_port->use_dma_rx; 272 } 273 274 static unsigned int atmel_get_lines_status(struct uart_port *port) 275 { 276 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 277 unsigned int status, ret = 0; 278 279 status = atmel_uart_readl(port, ATMEL_US_CSR); 280 281 mctrl_gpio_get(atmel_port->gpios, &ret); 282 283 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 284 UART_GPIO_CTS))) { 285 if (ret & TIOCM_CTS) 286 status &= ~ATMEL_US_CTS; 287 else 288 status |= ATMEL_US_CTS; 289 } 290 291 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 292 UART_GPIO_DSR))) { 293 if (ret & TIOCM_DSR) 294 status &= ~ATMEL_US_DSR; 295 else 296 status |= ATMEL_US_DSR; 297 } 298 299 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 300 UART_GPIO_RI))) { 301 if (ret & TIOCM_RI) 302 status &= ~ATMEL_US_RI; 303 else 304 status |= ATMEL_US_RI; 305 } 306 307 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 308 UART_GPIO_DCD))) { 309 if (ret & TIOCM_CD) 310 status &= ~ATMEL_US_DCD; 311 else 312 status |= ATMEL_US_DCD; 313 } 314 315 return status; 316 } 317 318 /* Enable or disable the rs485 support */ 319 static int atmel_config_rs485(struct uart_port *port, 320 struct serial_rs485 *rs485conf) 321 { 322 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 323 unsigned int mode; 324 325 /* Disable interrupts */ 326 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 327 328 mode = atmel_uart_readl(port, ATMEL_US_MR); 329 330 /* Resetting serial mode to RS232 (0x0) */ 331 mode &= ~ATMEL_US_USMODE; 332 333 port->rs485 = *rs485conf; 334 335 if (rs485conf->flags & SER_RS485_ENABLED) { 336 dev_dbg(port->dev, "Setting UART to RS485\n"); 337 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 338 atmel_uart_writel(port, ATMEL_US_TTGR, 339 rs485conf->delay_rts_after_send); 340 mode |= ATMEL_US_USMODE_RS485; 341 } else { 342 dev_dbg(port->dev, "Setting UART to RS232\n"); 343 if (atmel_use_pdc_tx(port)) 344 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 345 ATMEL_US_TXBUFE; 346 else 347 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 348 } 349 atmel_uart_writel(port, ATMEL_US_MR, mode); 350 351 /* Enable interrupts */ 352 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 353 354 return 0; 355 } 356 357 /* 358 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 359 */ 360 static u_int atmel_tx_empty(struct uart_port *port) 361 { 362 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 363 TIOCSER_TEMT : 364 0; 365 } 366 367 /* 368 * Set state of the modem control output lines 369 */ 370 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 371 { 372 unsigned int control = 0; 373 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 374 unsigned int rts_paused, rts_ready; 375 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 376 377 /* override mode to RS485 if needed, otherwise keep the current mode */ 378 if (port->rs485.flags & SER_RS485_ENABLED) { 379 atmel_uart_writel(port, ATMEL_US_TTGR, 380 port->rs485.delay_rts_after_send); 381 mode &= ~ATMEL_US_USMODE; 382 mode |= ATMEL_US_USMODE_RS485; 383 } 384 385 /* set the RTS line state according to the mode */ 386 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 387 /* force RTS line to high level */ 388 rts_paused = ATMEL_US_RTSEN; 389 390 /* give the control of the RTS line back to the hardware */ 391 rts_ready = ATMEL_US_RTSDIS; 392 } else { 393 /* force RTS line to high level */ 394 rts_paused = ATMEL_US_RTSDIS; 395 396 /* force RTS line to low level */ 397 rts_ready = ATMEL_US_RTSEN; 398 } 399 400 if (mctrl & TIOCM_RTS) 401 control |= rts_ready; 402 else 403 control |= rts_paused; 404 405 if (mctrl & TIOCM_DTR) 406 control |= ATMEL_US_DTREN; 407 else 408 control |= ATMEL_US_DTRDIS; 409 410 atmel_uart_writel(port, ATMEL_US_CR, control); 411 412 mctrl_gpio_set(atmel_port->gpios, mctrl); 413 414 /* Local loopback mode? */ 415 mode &= ~ATMEL_US_CHMODE; 416 if (mctrl & TIOCM_LOOP) 417 mode |= ATMEL_US_CHMODE_LOC_LOOP; 418 else 419 mode |= ATMEL_US_CHMODE_NORMAL; 420 421 atmel_uart_writel(port, ATMEL_US_MR, mode); 422 } 423 424 /* 425 * Get state of the modem control input lines 426 */ 427 static u_int atmel_get_mctrl(struct uart_port *port) 428 { 429 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 430 unsigned int ret = 0, status; 431 432 status = atmel_uart_readl(port, ATMEL_US_CSR); 433 434 /* 435 * The control signals are active low. 436 */ 437 if (!(status & ATMEL_US_DCD)) 438 ret |= TIOCM_CD; 439 if (!(status & ATMEL_US_CTS)) 440 ret |= TIOCM_CTS; 441 if (!(status & ATMEL_US_DSR)) 442 ret |= TIOCM_DSR; 443 if (!(status & ATMEL_US_RI)) 444 ret |= TIOCM_RI; 445 446 return mctrl_gpio_get(atmel_port->gpios, &ret); 447 } 448 449 /* 450 * Stop transmitting. 451 */ 452 static void atmel_stop_tx(struct uart_port *port) 453 { 454 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 455 456 if (atmel_use_pdc_tx(port)) { 457 /* disable PDC transmit */ 458 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 459 } 460 /* Disable interrupts */ 461 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 462 463 if ((port->rs485.flags & SER_RS485_ENABLED) && 464 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) 465 atmel_start_rx(port); 466 } 467 468 /* 469 * Start transmitting. 470 */ 471 static void atmel_start_tx(struct uart_port *port) 472 { 473 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 474 475 if (atmel_use_pdc_tx(port)) { 476 if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN) 477 /* The transmitter is already running. Yes, we 478 really need this.*/ 479 return; 480 481 if ((port->rs485.flags & SER_RS485_ENABLED) && 482 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) 483 atmel_stop_rx(port); 484 485 /* re-enable PDC transmit */ 486 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 487 } 488 /* Enable interrupts */ 489 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 490 } 491 492 /* 493 * start receiving - port is in process of being opened. 494 */ 495 static void atmel_start_rx(struct uart_port *port) 496 { 497 /* reset status and receiver */ 498 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 499 500 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 501 502 if (atmel_use_pdc_rx(port)) { 503 /* enable PDC controller */ 504 atmel_uart_writel(port, ATMEL_US_IER, 505 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 506 port->read_status_mask); 507 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 508 } else { 509 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 510 } 511 } 512 513 /* 514 * Stop receiving - port is in process of being closed. 515 */ 516 static void atmel_stop_rx(struct uart_port *port) 517 { 518 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 519 520 if (atmel_use_pdc_rx(port)) { 521 /* disable PDC receive */ 522 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 523 atmel_uart_writel(port, ATMEL_US_IDR, 524 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 525 port->read_status_mask); 526 } else { 527 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 528 } 529 } 530 531 /* 532 * Enable modem status interrupts 533 */ 534 static void atmel_enable_ms(struct uart_port *port) 535 { 536 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 537 uint32_t ier = 0; 538 539 /* 540 * Interrupt should not be enabled twice 541 */ 542 if (atmel_port->ms_irq_enabled) 543 return; 544 545 atmel_port->ms_irq_enabled = true; 546 547 if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) 548 enable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); 549 else 550 ier |= ATMEL_US_CTSIC; 551 552 if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) 553 enable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); 554 else 555 ier |= ATMEL_US_DSRIC; 556 557 if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) 558 enable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); 559 else 560 ier |= ATMEL_US_RIIC; 561 562 if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) 563 enable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); 564 else 565 ier |= ATMEL_US_DCDIC; 566 567 atmel_uart_writel(port, ATMEL_US_IER, ier); 568 } 569 570 /* 571 * Disable modem status interrupts 572 */ 573 static void atmel_disable_ms(struct uart_port *port) 574 { 575 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 576 uint32_t idr = 0; 577 578 /* 579 * Interrupt should not be disabled twice 580 */ 581 if (!atmel_port->ms_irq_enabled) 582 return; 583 584 atmel_port->ms_irq_enabled = false; 585 586 if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) 587 disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); 588 else 589 idr |= ATMEL_US_CTSIC; 590 591 if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) 592 disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); 593 else 594 idr |= ATMEL_US_DSRIC; 595 596 if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) 597 disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); 598 else 599 idr |= ATMEL_US_RIIC; 600 601 if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) 602 disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); 603 else 604 idr |= ATMEL_US_DCDIC; 605 606 atmel_uart_writel(port, ATMEL_US_IDR, idr); 607 } 608 609 /* 610 * Control the transmission of a break signal 611 */ 612 static void atmel_break_ctl(struct uart_port *port, int break_state) 613 { 614 if (break_state != 0) 615 /* start break */ 616 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 617 else 618 /* stop break */ 619 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 620 } 621 622 /* 623 * Stores the incoming character in the ring buffer 624 */ 625 static void 626 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 627 unsigned int ch) 628 { 629 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 630 struct circ_buf *ring = &atmel_port->rx_ring; 631 struct atmel_uart_char *c; 632 633 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 634 /* Buffer overflow, ignore char */ 635 return; 636 637 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 638 c->status = status; 639 c->ch = ch; 640 641 /* Make sure the character is stored before we update head. */ 642 smp_wmb(); 643 644 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 645 } 646 647 /* 648 * Deal with parity, framing and overrun errors. 649 */ 650 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 651 { 652 /* clear error */ 653 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 654 655 if (status & ATMEL_US_RXBRK) { 656 /* ignore side-effect */ 657 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 658 port->icount.brk++; 659 } 660 if (status & ATMEL_US_PARE) 661 port->icount.parity++; 662 if (status & ATMEL_US_FRAME) 663 port->icount.frame++; 664 if (status & ATMEL_US_OVRE) 665 port->icount.overrun++; 666 } 667 668 /* 669 * Characters received (called from interrupt handler) 670 */ 671 static void atmel_rx_chars(struct uart_port *port) 672 { 673 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 674 unsigned int status, ch; 675 676 status = atmel_uart_readl(port, ATMEL_US_CSR); 677 while (status & ATMEL_US_RXRDY) { 678 ch = atmel_uart_read_char(port); 679 680 /* 681 * note that the error handling code is 682 * out of the main execution path 683 */ 684 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 685 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 686 || atmel_port->break_active)) { 687 688 /* clear error */ 689 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 690 691 if (status & ATMEL_US_RXBRK 692 && !atmel_port->break_active) { 693 atmel_port->break_active = 1; 694 atmel_uart_writel(port, ATMEL_US_IER, 695 ATMEL_US_RXBRK); 696 } else { 697 /* 698 * This is either the end-of-break 699 * condition or we've received at 700 * least one character without RXBRK 701 * being set. In both cases, the next 702 * RXBRK will indicate start-of-break. 703 */ 704 atmel_uart_writel(port, ATMEL_US_IDR, 705 ATMEL_US_RXBRK); 706 status &= ~ATMEL_US_RXBRK; 707 atmel_port->break_active = 0; 708 } 709 } 710 711 atmel_buffer_rx_char(port, status, ch); 712 status = atmel_uart_readl(port, ATMEL_US_CSR); 713 } 714 715 tasklet_schedule(&atmel_port->tasklet); 716 } 717 718 /* 719 * Transmit characters (called from tasklet with TXRDY interrupt 720 * disabled) 721 */ 722 static void atmel_tx_chars(struct uart_port *port) 723 { 724 struct circ_buf *xmit = &port->state->xmit; 725 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 726 727 if (port->x_char && 728 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) { 729 atmel_uart_write_char(port, port->x_char); 730 port->icount.tx++; 731 port->x_char = 0; 732 } 733 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 734 return; 735 736 while (atmel_uart_readl(port, ATMEL_US_CSR) & 737 atmel_port->tx_done_mask) { 738 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 739 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 740 port->icount.tx++; 741 if (uart_circ_empty(xmit)) 742 break; 743 } 744 745 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 746 uart_write_wakeup(port); 747 748 if (!uart_circ_empty(xmit)) 749 /* Enable interrupts */ 750 atmel_uart_writel(port, ATMEL_US_IER, 751 atmel_port->tx_done_mask); 752 } 753 754 static void atmel_complete_tx_dma(void *arg) 755 { 756 struct atmel_uart_port *atmel_port = arg; 757 struct uart_port *port = &atmel_port->uart; 758 struct circ_buf *xmit = &port->state->xmit; 759 struct dma_chan *chan = atmel_port->chan_tx; 760 unsigned long flags; 761 762 spin_lock_irqsave(&port->lock, flags); 763 764 if (chan) 765 dmaengine_terminate_all(chan); 766 xmit->tail += atmel_port->tx_len; 767 xmit->tail &= UART_XMIT_SIZE - 1; 768 769 port->icount.tx += atmel_port->tx_len; 770 771 spin_lock_irq(&atmel_port->lock_tx); 772 async_tx_ack(atmel_port->desc_tx); 773 atmel_port->cookie_tx = -EINVAL; 774 atmel_port->desc_tx = NULL; 775 spin_unlock_irq(&atmel_port->lock_tx); 776 777 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 778 uart_write_wakeup(port); 779 780 /* 781 * xmit is a circular buffer so, if we have just send data from 782 * xmit->tail to the end of xmit->buf, now we have to transmit the 783 * remaining data from the beginning of xmit->buf to xmit->head. 784 */ 785 if (!uart_circ_empty(xmit)) 786 tasklet_schedule(&atmel_port->tasklet); 787 788 spin_unlock_irqrestore(&port->lock, flags); 789 } 790 791 static void atmel_release_tx_dma(struct uart_port *port) 792 { 793 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 794 struct dma_chan *chan = atmel_port->chan_tx; 795 796 if (chan) { 797 dmaengine_terminate_all(chan); 798 dma_release_channel(chan); 799 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 800 DMA_TO_DEVICE); 801 } 802 803 atmel_port->desc_tx = NULL; 804 atmel_port->chan_tx = NULL; 805 atmel_port->cookie_tx = -EINVAL; 806 } 807 808 /* 809 * Called from tasklet with TXRDY interrupt is disabled. 810 */ 811 static void atmel_tx_dma(struct uart_port *port) 812 { 813 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 814 struct circ_buf *xmit = &port->state->xmit; 815 struct dma_chan *chan = atmel_port->chan_tx; 816 struct dma_async_tx_descriptor *desc; 817 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 818 unsigned int tx_len, part1_len, part2_len, sg_len; 819 dma_addr_t phys_addr; 820 821 /* Make sure we have an idle channel */ 822 if (atmel_port->desc_tx != NULL) 823 return; 824 825 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 826 /* 827 * DMA is idle now. 828 * Port xmit buffer is already mapped, 829 * and it is one page... Just adjust 830 * offsets and lengths. Since it is a circular buffer, 831 * we have to transmit till the end, and then the rest. 832 * Take the port lock to get a 833 * consistent xmit buffer state. 834 */ 835 tx_len = CIRC_CNT_TO_END(xmit->head, 836 xmit->tail, 837 UART_XMIT_SIZE); 838 839 if (atmel_port->fifo_size) { 840 /* multi data mode */ 841 part1_len = (tx_len & ~0x3); /* DWORD access */ 842 part2_len = (tx_len & 0x3); /* BYTE access */ 843 } else { 844 /* single data (legacy) mode */ 845 part1_len = 0; 846 part2_len = tx_len; /* BYTE access only */ 847 } 848 849 sg_init_table(sgl, 2); 850 sg_len = 0; 851 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 852 if (part1_len) { 853 sg = &sgl[sg_len++]; 854 sg_dma_address(sg) = phys_addr; 855 sg_dma_len(sg) = part1_len; 856 857 phys_addr += part1_len; 858 } 859 860 if (part2_len) { 861 sg = &sgl[sg_len++]; 862 sg_dma_address(sg) = phys_addr; 863 sg_dma_len(sg) = part2_len; 864 } 865 866 /* 867 * save tx_len so atmel_complete_tx_dma() will increase 868 * xmit->tail correctly 869 */ 870 atmel_port->tx_len = tx_len; 871 872 desc = dmaengine_prep_slave_sg(chan, 873 sgl, 874 sg_len, 875 DMA_MEM_TO_DEV, 876 DMA_PREP_INTERRUPT | 877 DMA_CTRL_ACK); 878 if (!desc) { 879 dev_err(port->dev, "Failed to send via dma!\n"); 880 return; 881 } 882 883 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 884 885 atmel_port->desc_tx = desc; 886 desc->callback = atmel_complete_tx_dma; 887 desc->callback_param = atmel_port; 888 atmel_port->cookie_tx = dmaengine_submit(desc); 889 890 } else { 891 if (port->rs485.flags & SER_RS485_ENABLED) { 892 /* DMA done, stop TX, start RX for RS485 */ 893 atmel_start_rx(port); 894 } 895 } 896 897 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 898 uart_write_wakeup(port); 899 } 900 901 static int atmel_prepare_tx_dma(struct uart_port *port) 902 { 903 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 904 dma_cap_mask_t mask; 905 struct dma_slave_config config; 906 int ret, nent; 907 908 dma_cap_zero(mask); 909 dma_cap_set(DMA_SLAVE, mask); 910 911 atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx"); 912 if (atmel_port->chan_tx == NULL) 913 goto chan_err; 914 dev_info(port->dev, "using %s for tx DMA transfers\n", 915 dma_chan_name(atmel_port->chan_tx)); 916 917 spin_lock_init(&atmel_port->lock_tx); 918 sg_init_table(&atmel_port->sg_tx, 1); 919 /* UART circular tx buffer is an aligned page. */ 920 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 921 sg_set_page(&atmel_port->sg_tx, 922 virt_to_page(port->state->xmit.buf), 923 UART_XMIT_SIZE, 924 (int)port->state->xmit.buf & ~PAGE_MASK); 925 nent = dma_map_sg(port->dev, 926 &atmel_port->sg_tx, 927 1, 928 DMA_TO_DEVICE); 929 930 if (!nent) { 931 dev_dbg(port->dev, "need to release resource of dma\n"); 932 goto chan_err; 933 } else { 934 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, 935 sg_dma_len(&atmel_port->sg_tx), 936 port->state->xmit.buf, 937 sg_dma_address(&atmel_port->sg_tx)); 938 } 939 940 /* Configure the slave DMA */ 941 memset(&config, 0, sizeof(config)); 942 config.direction = DMA_MEM_TO_DEV; 943 config.dst_addr_width = (atmel_port->fifo_size) ? 944 DMA_SLAVE_BUSWIDTH_4_BYTES : 945 DMA_SLAVE_BUSWIDTH_1_BYTE; 946 config.dst_addr = port->mapbase + ATMEL_US_THR; 947 config.dst_maxburst = 1; 948 949 ret = dmaengine_slave_config(atmel_port->chan_tx, 950 &config); 951 if (ret) { 952 dev_err(port->dev, "DMA tx slave configuration failed\n"); 953 goto chan_err; 954 } 955 956 return 0; 957 958 chan_err: 959 dev_err(port->dev, "TX channel not available, switch to pio\n"); 960 atmel_port->use_dma_tx = 0; 961 if (atmel_port->chan_tx) 962 atmel_release_tx_dma(port); 963 return -EINVAL; 964 } 965 966 static void atmel_complete_rx_dma(void *arg) 967 { 968 struct uart_port *port = arg; 969 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 970 971 tasklet_schedule(&atmel_port->tasklet); 972 } 973 974 static void atmel_release_rx_dma(struct uart_port *port) 975 { 976 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 977 struct dma_chan *chan = atmel_port->chan_rx; 978 979 if (chan) { 980 dmaengine_terminate_all(chan); 981 dma_release_channel(chan); 982 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 983 DMA_FROM_DEVICE); 984 } 985 986 atmel_port->desc_rx = NULL; 987 atmel_port->chan_rx = NULL; 988 atmel_port->cookie_rx = -EINVAL; 989 } 990 991 static void atmel_rx_from_dma(struct uart_port *port) 992 { 993 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 994 struct tty_port *tport = &port->state->port; 995 struct circ_buf *ring = &atmel_port->rx_ring; 996 struct dma_chan *chan = atmel_port->chan_rx; 997 struct dma_tx_state state; 998 enum dma_status dmastat; 999 size_t count; 1000 1001 1002 /* Reset the UART timeout early so that we don't miss one */ 1003 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1004 dmastat = dmaengine_tx_status(chan, 1005 atmel_port->cookie_rx, 1006 &state); 1007 /* Restart a new tasklet if DMA status is error */ 1008 if (dmastat == DMA_ERROR) { 1009 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1010 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1011 tasklet_schedule(&atmel_port->tasklet); 1012 return; 1013 } 1014 1015 /* CPU claims ownership of RX DMA buffer */ 1016 dma_sync_sg_for_cpu(port->dev, 1017 &atmel_port->sg_rx, 1018 1, 1019 DMA_FROM_DEVICE); 1020 1021 /* 1022 * ring->head points to the end of data already written by the DMA. 1023 * ring->tail points to the beginning of data to be read by the 1024 * framework. 1025 * The current transfer size should not be larger than the dma buffer 1026 * length. 1027 */ 1028 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1029 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1030 /* 1031 * At this point ring->head may point to the first byte right after the 1032 * last byte of the dma buffer: 1033 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1034 * 1035 * However ring->tail must always points inside the dma buffer: 1036 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1037 * 1038 * Since we use a ring buffer, we have to handle the case 1039 * where head is lower than tail. In such a case, we first read from 1040 * tail to the end of the buffer then reset tail. 1041 */ 1042 if (ring->head < ring->tail) { 1043 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1044 1045 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1046 ring->tail = 0; 1047 port->icount.rx += count; 1048 } 1049 1050 /* Finally we read data from tail to head */ 1051 if (ring->tail < ring->head) { 1052 count = ring->head - ring->tail; 1053 1054 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1055 /* Wrap ring->head if needed */ 1056 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1057 ring->head = 0; 1058 ring->tail = ring->head; 1059 port->icount.rx += count; 1060 } 1061 1062 /* USART retreives ownership of RX DMA buffer */ 1063 dma_sync_sg_for_device(port->dev, 1064 &atmel_port->sg_rx, 1065 1, 1066 DMA_FROM_DEVICE); 1067 1068 /* 1069 * Drop the lock here since it might end up calling 1070 * uart_start(), which takes the lock. 1071 */ 1072 spin_unlock(&port->lock); 1073 tty_flip_buffer_push(tport); 1074 spin_lock(&port->lock); 1075 1076 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1077 } 1078 1079 static int atmel_prepare_rx_dma(struct uart_port *port) 1080 { 1081 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1082 struct dma_async_tx_descriptor *desc; 1083 dma_cap_mask_t mask; 1084 struct dma_slave_config config; 1085 struct circ_buf *ring; 1086 int ret, nent; 1087 1088 ring = &atmel_port->rx_ring; 1089 1090 dma_cap_zero(mask); 1091 dma_cap_set(DMA_CYCLIC, mask); 1092 1093 atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx"); 1094 if (atmel_port->chan_rx == NULL) 1095 goto chan_err; 1096 dev_info(port->dev, "using %s for rx DMA transfers\n", 1097 dma_chan_name(atmel_port->chan_rx)); 1098 1099 spin_lock_init(&atmel_port->lock_rx); 1100 sg_init_table(&atmel_port->sg_rx, 1); 1101 /* UART circular rx buffer is an aligned page. */ 1102 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1103 sg_set_page(&atmel_port->sg_rx, 1104 virt_to_page(ring->buf), 1105 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1106 (int)ring->buf & ~PAGE_MASK); 1107 nent = dma_map_sg(port->dev, 1108 &atmel_port->sg_rx, 1109 1, 1110 DMA_FROM_DEVICE); 1111 1112 if (!nent) { 1113 dev_dbg(port->dev, "need to release resource of dma\n"); 1114 goto chan_err; 1115 } else { 1116 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, 1117 sg_dma_len(&atmel_port->sg_rx), 1118 ring->buf, 1119 sg_dma_address(&atmel_port->sg_rx)); 1120 } 1121 1122 /* Configure the slave DMA */ 1123 memset(&config, 0, sizeof(config)); 1124 config.direction = DMA_DEV_TO_MEM; 1125 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1126 config.src_addr = port->mapbase + ATMEL_US_RHR; 1127 config.src_maxburst = 1; 1128 1129 ret = dmaengine_slave_config(atmel_port->chan_rx, 1130 &config); 1131 if (ret) { 1132 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1133 goto chan_err; 1134 } 1135 /* 1136 * Prepare a cyclic dma transfer, assign 2 descriptors, 1137 * each one is half ring buffer size 1138 */ 1139 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1140 sg_dma_address(&atmel_port->sg_rx), 1141 sg_dma_len(&atmel_port->sg_rx), 1142 sg_dma_len(&atmel_port->sg_rx)/2, 1143 DMA_DEV_TO_MEM, 1144 DMA_PREP_INTERRUPT); 1145 desc->callback = atmel_complete_rx_dma; 1146 desc->callback_param = port; 1147 atmel_port->desc_rx = desc; 1148 atmel_port->cookie_rx = dmaengine_submit(desc); 1149 1150 return 0; 1151 1152 chan_err: 1153 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1154 atmel_port->use_dma_rx = 0; 1155 if (atmel_port->chan_rx) 1156 atmel_release_rx_dma(port); 1157 return -EINVAL; 1158 } 1159 1160 static void atmel_uart_timer_callback(unsigned long data) 1161 { 1162 struct uart_port *port = (void *)data; 1163 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1164 1165 tasklet_schedule(&atmel_port->tasklet); 1166 mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port)); 1167 } 1168 1169 /* 1170 * receive interrupt handler. 1171 */ 1172 static void 1173 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1174 { 1175 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1176 1177 if (atmel_use_pdc_rx(port)) { 1178 /* 1179 * PDC receive. Just schedule the tasklet and let it 1180 * figure out the details. 1181 * 1182 * TODO: We're not handling error flags correctly at 1183 * the moment. 1184 */ 1185 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1186 atmel_uart_writel(port, ATMEL_US_IDR, 1187 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1188 tasklet_schedule(&atmel_port->tasklet); 1189 } 1190 1191 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1192 ATMEL_US_FRAME | ATMEL_US_PARE)) 1193 atmel_pdc_rxerr(port, pending); 1194 } 1195 1196 if (atmel_use_dma_rx(port)) { 1197 if (pending & ATMEL_US_TIMEOUT) { 1198 atmel_uart_writel(port, ATMEL_US_IDR, 1199 ATMEL_US_TIMEOUT); 1200 tasklet_schedule(&atmel_port->tasklet); 1201 } 1202 } 1203 1204 /* Interrupt receive */ 1205 if (pending & ATMEL_US_RXRDY) 1206 atmel_rx_chars(port); 1207 else if (pending & ATMEL_US_RXBRK) { 1208 /* 1209 * End of break detected. If it came along with a 1210 * character, atmel_rx_chars will handle it. 1211 */ 1212 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1213 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1214 atmel_port->break_active = 0; 1215 } 1216 } 1217 1218 /* 1219 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1220 */ 1221 static void 1222 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1223 { 1224 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1225 1226 if (pending & atmel_port->tx_done_mask) { 1227 /* Either PDC or interrupt transmission */ 1228 atmel_uart_writel(port, ATMEL_US_IDR, 1229 atmel_port->tx_done_mask); 1230 tasklet_schedule(&atmel_port->tasklet); 1231 } 1232 } 1233 1234 /* 1235 * status flags interrupt handler. 1236 */ 1237 static void 1238 atmel_handle_status(struct uart_port *port, unsigned int pending, 1239 unsigned int status) 1240 { 1241 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1242 1243 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1244 | ATMEL_US_CTSIC)) { 1245 atmel_port->irq_status = status; 1246 atmel_port->status_change = atmel_port->irq_status ^ 1247 atmel_port->irq_status_prev; 1248 atmel_port->irq_status_prev = status; 1249 tasklet_schedule(&atmel_port->tasklet); 1250 } 1251 } 1252 1253 /* 1254 * Interrupt handler 1255 */ 1256 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1257 { 1258 struct uart_port *port = dev_id; 1259 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1260 unsigned int status, pending, mask, pass_counter = 0; 1261 bool gpio_handled = false; 1262 1263 spin_lock(&atmel_port->lock_suspended); 1264 1265 do { 1266 status = atmel_get_lines_status(port); 1267 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1268 pending = status & mask; 1269 if (!gpio_handled) { 1270 /* 1271 * Dealing with GPIO interrupt 1272 */ 1273 if (irq == atmel_port->gpio_irq[UART_GPIO_CTS]) 1274 pending |= ATMEL_US_CTSIC; 1275 1276 if (irq == atmel_port->gpio_irq[UART_GPIO_DSR]) 1277 pending |= ATMEL_US_DSRIC; 1278 1279 if (irq == atmel_port->gpio_irq[UART_GPIO_RI]) 1280 pending |= ATMEL_US_RIIC; 1281 1282 if (irq == atmel_port->gpio_irq[UART_GPIO_DCD]) 1283 pending |= ATMEL_US_DCDIC; 1284 1285 gpio_handled = true; 1286 } 1287 if (!pending) 1288 break; 1289 1290 if (atmel_port->suspended) { 1291 atmel_port->pending |= pending; 1292 atmel_port->pending_status = status; 1293 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1294 pm_system_wakeup(); 1295 break; 1296 } 1297 1298 atmel_handle_receive(port, pending); 1299 atmel_handle_status(port, pending, status); 1300 atmel_handle_transmit(port, pending); 1301 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1302 1303 spin_unlock(&atmel_port->lock_suspended); 1304 1305 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1306 } 1307 1308 static void atmel_release_tx_pdc(struct uart_port *port) 1309 { 1310 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1311 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1312 1313 dma_unmap_single(port->dev, 1314 pdc->dma_addr, 1315 pdc->dma_size, 1316 DMA_TO_DEVICE); 1317 } 1318 1319 /* 1320 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1321 */ 1322 static void atmel_tx_pdc(struct uart_port *port) 1323 { 1324 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1325 struct circ_buf *xmit = &port->state->xmit; 1326 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1327 int count; 1328 1329 /* nothing left to transmit? */ 1330 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1331 return; 1332 1333 xmit->tail += pdc->ofs; 1334 xmit->tail &= UART_XMIT_SIZE - 1; 1335 1336 port->icount.tx += pdc->ofs; 1337 pdc->ofs = 0; 1338 1339 /* more to transmit - setup next transfer */ 1340 1341 /* disable PDC transmit */ 1342 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1343 1344 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1345 dma_sync_single_for_device(port->dev, 1346 pdc->dma_addr, 1347 pdc->dma_size, 1348 DMA_TO_DEVICE); 1349 1350 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1351 pdc->ofs = count; 1352 1353 atmel_uart_writel(port, ATMEL_PDC_TPR, 1354 pdc->dma_addr + xmit->tail); 1355 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1356 /* re-enable PDC transmit */ 1357 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1358 /* Enable interrupts */ 1359 atmel_uart_writel(port, ATMEL_US_IER, 1360 atmel_port->tx_done_mask); 1361 } else { 1362 if ((port->rs485.flags & SER_RS485_ENABLED) && 1363 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { 1364 /* DMA done, stop TX, start RX for RS485 */ 1365 atmel_start_rx(port); 1366 } 1367 } 1368 1369 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1370 uart_write_wakeup(port); 1371 } 1372 1373 static int atmel_prepare_tx_pdc(struct uart_port *port) 1374 { 1375 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1376 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1377 struct circ_buf *xmit = &port->state->xmit; 1378 1379 pdc->buf = xmit->buf; 1380 pdc->dma_addr = dma_map_single(port->dev, 1381 pdc->buf, 1382 UART_XMIT_SIZE, 1383 DMA_TO_DEVICE); 1384 pdc->dma_size = UART_XMIT_SIZE; 1385 pdc->ofs = 0; 1386 1387 return 0; 1388 } 1389 1390 static void atmel_rx_from_ring(struct uart_port *port) 1391 { 1392 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1393 struct circ_buf *ring = &atmel_port->rx_ring; 1394 unsigned int flg; 1395 unsigned int status; 1396 1397 while (ring->head != ring->tail) { 1398 struct atmel_uart_char c; 1399 1400 /* Make sure c is loaded after head. */ 1401 smp_rmb(); 1402 1403 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1404 1405 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1406 1407 port->icount.rx++; 1408 status = c.status; 1409 flg = TTY_NORMAL; 1410 1411 /* 1412 * note that the error handling code is 1413 * out of the main execution path 1414 */ 1415 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1416 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1417 if (status & ATMEL_US_RXBRK) { 1418 /* ignore side-effect */ 1419 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1420 1421 port->icount.brk++; 1422 if (uart_handle_break(port)) 1423 continue; 1424 } 1425 if (status & ATMEL_US_PARE) 1426 port->icount.parity++; 1427 if (status & ATMEL_US_FRAME) 1428 port->icount.frame++; 1429 if (status & ATMEL_US_OVRE) 1430 port->icount.overrun++; 1431 1432 status &= port->read_status_mask; 1433 1434 if (status & ATMEL_US_RXBRK) 1435 flg = TTY_BREAK; 1436 else if (status & ATMEL_US_PARE) 1437 flg = TTY_PARITY; 1438 else if (status & ATMEL_US_FRAME) 1439 flg = TTY_FRAME; 1440 } 1441 1442 1443 if (uart_handle_sysrq_char(port, c.ch)) 1444 continue; 1445 1446 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1447 } 1448 1449 /* 1450 * Drop the lock here since it might end up calling 1451 * uart_start(), which takes the lock. 1452 */ 1453 spin_unlock(&port->lock); 1454 tty_flip_buffer_push(&port->state->port); 1455 spin_lock(&port->lock); 1456 } 1457 1458 static void atmel_release_rx_pdc(struct uart_port *port) 1459 { 1460 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1461 int i; 1462 1463 for (i = 0; i < 2; i++) { 1464 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1465 1466 dma_unmap_single(port->dev, 1467 pdc->dma_addr, 1468 pdc->dma_size, 1469 DMA_FROM_DEVICE); 1470 kfree(pdc->buf); 1471 } 1472 } 1473 1474 static void atmel_rx_from_pdc(struct uart_port *port) 1475 { 1476 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1477 struct tty_port *tport = &port->state->port; 1478 struct atmel_dma_buffer *pdc; 1479 int rx_idx = atmel_port->pdc_rx_idx; 1480 unsigned int head; 1481 unsigned int tail; 1482 unsigned int count; 1483 1484 do { 1485 /* Reset the UART timeout early so that we don't miss one */ 1486 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1487 1488 pdc = &atmel_port->pdc_rx[rx_idx]; 1489 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1490 tail = pdc->ofs; 1491 1492 /* If the PDC has switched buffers, RPR won't contain 1493 * any address within the current buffer. Since head 1494 * is unsigned, we just need a one-way comparison to 1495 * find out. 1496 * 1497 * In this case, we just need to consume the entire 1498 * buffer and resubmit it for DMA. This will clear the 1499 * ENDRX bit as well, so that we can safely re-enable 1500 * all interrupts below. 1501 */ 1502 head = min(head, pdc->dma_size); 1503 1504 if (likely(head != tail)) { 1505 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1506 pdc->dma_size, DMA_FROM_DEVICE); 1507 1508 /* 1509 * head will only wrap around when we recycle 1510 * the DMA buffer, and when that happens, we 1511 * explicitly set tail to 0. So head will 1512 * always be greater than tail. 1513 */ 1514 count = head - tail; 1515 1516 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1517 count); 1518 1519 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1520 pdc->dma_size, DMA_FROM_DEVICE); 1521 1522 port->icount.rx += count; 1523 pdc->ofs = head; 1524 } 1525 1526 /* 1527 * If the current buffer is full, we need to check if 1528 * the next one contains any additional data. 1529 */ 1530 if (head >= pdc->dma_size) { 1531 pdc->ofs = 0; 1532 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1533 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1534 1535 rx_idx = !rx_idx; 1536 atmel_port->pdc_rx_idx = rx_idx; 1537 } 1538 } while (head >= pdc->dma_size); 1539 1540 /* 1541 * Drop the lock here since it might end up calling 1542 * uart_start(), which takes the lock. 1543 */ 1544 spin_unlock(&port->lock); 1545 tty_flip_buffer_push(tport); 1546 spin_lock(&port->lock); 1547 1548 atmel_uart_writel(port, ATMEL_US_IER, 1549 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1550 } 1551 1552 static int atmel_prepare_rx_pdc(struct uart_port *port) 1553 { 1554 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1555 int i; 1556 1557 for (i = 0; i < 2; i++) { 1558 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1559 1560 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1561 if (pdc->buf == NULL) { 1562 if (i != 0) { 1563 dma_unmap_single(port->dev, 1564 atmel_port->pdc_rx[0].dma_addr, 1565 PDC_BUFFER_SIZE, 1566 DMA_FROM_DEVICE); 1567 kfree(atmel_port->pdc_rx[0].buf); 1568 } 1569 atmel_port->use_pdc_rx = 0; 1570 return -ENOMEM; 1571 } 1572 pdc->dma_addr = dma_map_single(port->dev, 1573 pdc->buf, 1574 PDC_BUFFER_SIZE, 1575 DMA_FROM_DEVICE); 1576 pdc->dma_size = PDC_BUFFER_SIZE; 1577 pdc->ofs = 0; 1578 } 1579 1580 atmel_port->pdc_rx_idx = 0; 1581 1582 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1583 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1584 1585 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1586 atmel_port->pdc_rx[1].dma_addr); 1587 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1588 1589 return 0; 1590 } 1591 1592 /* 1593 * tasklet handling tty stuff outside the interrupt handler. 1594 */ 1595 static void atmel_tasklet_func(unsigned long data) 1596 { 1597 struct uart_port *port = (struct uart_port *)data; 1598 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1599 unsigned int status = atmel_port->irq_status; 1600 unsigned int status_change = atmel_port->status_change; 1601 1602 /* The interrupt handler does not take the lock */ 1603 spin_lock(&port->lock); 1604 1605 atmel_port->schedule_tx(port); 1606 1607 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1608 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1609 /* TODO: All reads to CSR will clear these interrupts! */ 1610 if (status_change & ATMEL_US_RI) 1611 port->icount.rng++; 1612 if (status_change & ATMEL_US_DSR) 1613 port->icount.dsr++; 1614 if (status_change & ATMEL_US_DCD) 1615 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1616 if (status_change & ATMEL_US_CTS) 1617 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1618 1619 wake_up_interruptible(&port->state->port.delta_msr_wait); 1620 1621 atmel_port->status_change = 0; 1622 } 1623 1624 atmel_port->schedule_rx(port); 1625 1626 spin_unlock(&port->lock); 1627 } 1628 1629 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1630 struct platform_device *pdev) 1631 { 1632 struct device_node *np = pdev->dev.of_node; 1633 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 1634 1635 if (np) { 1636 /* DMA/PDC usage specification */ 1637 if (of_get_property(np, "atmel,use-dma-rx", NULL)) { 1638 if (of_get_property(np, "dmas", NULL)) { 1639 atmel_port->use_dma_rx = true; 1640 atmel_port->use_pdc_rx = false; 1641 } else { 1642 atmel_port->use_dma_rx = false; 1643 atmel_port->use_pdc_rx = true; 1644 } 1645 } else { 1646 atmel_port->use_dma_rx = false; 1647 atmel_port->use_pdc_rx = false; 1648 } 1649 1650 if (of_get_property(np, "atmel,use-dma-tx", NULL)) { 1651 if (of_get_property(np, "dmas", NULL)) { 1652 atmel_port->use_dma_tx = true; 1653 atmel_port->use_pdc_tx = false; 1654 } else { 1655 atmel_port->use_dma_tx = false; 1656 atmel_port->use_pdc_tx = true; 1657 } 1658 } else { 1659 atmel_port->use_dma_tx = false; 1660 atmel_port->use_pdc_tx = false; 1661 } 1662 1663 } else { 1664 atmel_port->use_pdc_rx = pdata->use_dma_rx; 1665 atmel_port->use_pdc_tx = pdata->use_dma_tx; 1666 atmel_port->use_dma_rx = false; 1667 atmel_port->use_dma_tx = false; 1668 } 1669 1670 } 1671 1672 static void atmel_init_rs485(struct uart_port *port, 1673 struct platform_device *pdev) 1674 { 1675 struct device_node *np = pdev->dev.of_node; 1676 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 1677 1678 if (np) { 1679 u32 rs485_delay[2]; 1680 /* rs485 properties */ 1681 if (of_property_read_u32_array(np, "rs485-rts-delay", 1682 rs485_delay, 2) == 0) { 1683 struct serial_rs485 *rs485conf = &port->rs485; 1684 1685 rs485conf->delay_rts_before_send = rs485_delay[0]; 1686 rs485conf->delay_rts_after_send = rs485_delay[1]; 1687 rs485conf->flags = 0; 1688 1689 if (of_get_property(np, "rs485-rx-during-tx", NULL)) 1690 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1691 1692 if (of_get_property(np, "linux,rs485-enabled-at-boot-time", 1693 NULL)) 1694 rs485conf->flags |= SER_RS485_ENABLED; 1695 } 1696 } else { 1697 port->rs485 = pdata->rs485; 1698 } 1699 1700 } 1701 1702 static void atmel_set_ops(struct uart_port *port) 1703 { 1704 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1705 1706 if (atmel_use_dma_rx(port)) { 1707 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1708 atmel_port->schedule_rx = &atmel_rx_from_dma; 1709 atmel_port->release_rx = &atmel_release_rx_dma; 1710 } else if (atmel_use_pdc_rx(port)) { 1711 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1712 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1713 atmel_port->release_rx = &atmel_release_rx_pdc; 1714 } else { 1715 atmel_port->prepare_rx = NULL; 1716 atmel_port->schedule_rx = &atmel_rx_from_ring; 1717 atmel_port->release_rx = NULL; 1718 } 1719 1720 if (atmel_use_dma_tx(port)) { 1721 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1722 atmel_port->schedule_tx = &atmel_tx_dma; 1723 atmel_port->release_tx = &atmel_release_tx_dma; 1724 } else if (atmel_use_pdc_tx(port)) { 1725 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1726 atmel_port->schedule_tx = &atmel_tx_pdc; 1727 atmel_port->release_tx = &atmel_release_tx_pdc; 1728 } else { 1729 atmel_port->prepare_tx = NULL; 1730 atmel_port->schedule_tx = &atmel_tx_chars; 1731 atmel_port->release_tx = NULL; 1732 } 1733 } 1734 1735 /* 1736 * Get ip name usart or uart 1737 */ 1738 static void atmel_get_ip_name(struct uart_port *port) 1739 { 1740 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1741 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1742 u32 version; 1743 int usart, uart; 1744 /* usart and uart ascii */ 1745 usart = 0x55534152; 1746 uart = 0x44424755; 1747 1748 atmel_port->is_usart = false; 1749 1750 if (name == usart) { 1751 dev_dbg(port->dev, "This is usart\n"); 1752 atmel_port->is_usart = true; 1753 } else if (name == uart) { 1754 dev_dbg(port->dev, "This is uart\n"); 1755 atmel_port->is_usart = false; 1756 } else { 1757 /* fallback for older SoCs: use version field */ 1758 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1759 switch (version) { 1760 case 0x302: 1761 case 0x10213: 1762 dev_dbg(port->dev, "This version is usart\n"); 1763 atmel_port->is_usart = true; 1764 break; 1765 case 0x203: 1766 case 0x10202: 1767 dev_dbg(port->dev, "This version is uart\n"); 1768 atmel_port->is_usart = false; 1769 break; 1770 default: 1771 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1772 } 1773 } 1774 } 1775 1776 static void atmel_free_gpio_irq(struct uart_port *port) 1777 { 1778 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1779 enum mctrl_gpio_idx i; 1780 1781 for (i = 0; i < UART_GPIO_MAX; i++) 1782 if (atmel_port->gpio_irq[i] >= 0) 1783 free_irq(atmel_port->gpio_irq[i], port); 1784 } 1785 1786 static int atmel_request_gpio_irq(struct uart_port *port) 1787 { 1788 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1789 int *irq = atmel_port->gpio_irq; 1790 enum mctrl_gpio_idx i; 1791 int err = 0; 1792 1793 for (i = 0; (i < UART_GPIO_MAX) && !err; i++) { 1794 if (irq[i] < 0) 1795 continue; 1796 1797 irq_set_status_flags(irq[i], IRQ_NOAUTOEN); 1798 err = request_irq(irq[i], atmel_interrupt, IRQ_TYPE_EDGE_BOTH, 1799 "atmel_serial", port); 1800 if (err) 1801 dev_err(port->dev, "atmel_startup - Can't get %d irq\n", 1802 irq[i]); 1803 } 1804 1805 /* 1806 * If something went wrong, rollback. 1807 */ 1808 while (err && (--i >= 0)) 1809 if (irq[i] >= 0) 1810 free_irq(irq[i], port); 1811 1812 return err; 1813 } 1814 1815 /* 1816 * Perform initialization and enable port for reception 1817 */ 1818 static int atmel_startup(struct uart_port *port) 1819 { 1820 struct platform_device *pdev = to_platform_device(port->dev); 1821 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1822 struct tty_struct *tty = port->state->port.tty; 1823 int retval; 1824 1825 /* 1826 * Ensure that no interrupts are enabled otherwise when 1827 * request_irq() is called we could get stuck trying to 1828 * handle an unexpected interrupt 1829 */ 1830 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1831 atmel_port->ms_irq_enabled = false; 1832 1833 /* 1834 * Allocate the IRQ 1835 */ 1836 retval = request_irq(port->irq, atmel_interrupt, 1837 IRQF_SHARED | IRQF_COND_SUSPEND, 1838 tty ? tty->name : "atmel_serial", port); 1839 if (retval) { 1840 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1841 return retval; 1842 } 1843 1844 /* 1845 * Get the GPIO lines IRQ 1846 */ 1847 retval = atmel_request_gpio_irq(port); 1848 if (retval) 1849 goto free_irq; 1850 1851 tasklet_enable(&atmel_port->tasklet); 1852 1853 /* 1854 * Initialize DMA (if necessary) 1855 */ 1856 atmel_init_property(atmel_port, pdev); 1857 atmel_set_ops(port); 1858 1859 if (atmel_port->prepare_rx) { 1860 retval = atmel_port->prepare_rx(port); 1861 if (retval < 0) 1862 atmel_set_ops(port); 1863 } 1864 1865 if (atmel_port->prepare_tx) { 1866 retval = atmel_port->prepare_tx(port); 1867 if (retval < 0) 1868 atmel_set_ops(port); 1869 } 1870 1871 /* 1872 * Enable FIFO when available 1873 */ 1874 if (atmel_port->fifo_size) { 1875 unsigned int txrdym = ATMEL_US_ONE_DATA; 1876 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1877 unsigned int fmr; 1878 1879 atmel_uart_writel(port, ATMEL_US_CR, 1880 ATMEL_US_FIFOEN | 1881 ATMEL_US_RXFCLR | 1882 ATMEL_US_TXFLCLR); 1883 1884 if (atmel_use_dma_tx(port)) 1885 txrdym = ATMEL_US_FOUR_DATA; 1886 1887 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1888 if (atmel_port->rts_high && 1889 atmel_port->rts_low) 1890 fmr |= ATMEL_US_FRTSC | 1891 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1892 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1893 1894 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1895 } 1896 1897 /* Save current CSR for comparison in atmel_tasklet_func() */ 1898 atmel_port->irq_status_prev = atmel_get_lines_status(port); 1899 atmel_port->irq_status = atmel_port->irq_status_prev; 1900 1901 /* 1902 * Finally, enable the serial port 1903 */ 1904 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1905 /* enable xmit & rcvr */ 1906 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1907 1908 setup_timer(&atmel_port->uart_timer, 1909 atmel_uart_timer_callback, 1910 (unsigned long)port); 1911 1912 if (atmel_use_pdc_rx(port)) { 1913 /* set UART timeout */ 1914 if (!atmel_port->is_usart) { 1915 mod_timer(&atmel_port->uart_timer, 1916 jiffies + uart_poll_timeout(port)); 1917 /* set USART timeout */ 1918 } else { 1919 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT); 1920 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1921 1922 atmel_uart_writel(port, ATMEL_US_IER, 1923 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1924 } 1925 /* enable PDC controller */ 1926 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 1927 } else if (atmel_use_dma_rx(port)) { 1928 /* set UART timeout */ 1929 if (!atmel_port->is_usart) { 1930 mod_timer(&atmel_port->uart_timer, 1931 jiffies + uart_poll_timeout(port)); 1932 /* set USART timeout */ 1933 } else { 1934 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT); 1935 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1936 1937 atmel_uart_writel(port, ATMEL_US_IER, 1938 ATMEL_US_TIMEOUT); 1939 } 1940 } else { 1941 /* enable receive only */ 1942 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 1943 } 1944 1945 return 0; 1946 1947 free_irq: 1948 free_irq(port->irq, port); 1949 1950 return retval; 1951 } 1952 1953 /* 1954 * Flush any TX data submitted for DMA. Called when the TX circular 1955 * buffer is reset. 1956 */ 1957 static void atmel_flush_buffer(struct uart_port *port) 1958 { 1959 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1960 1961 if (atmel_use_pdc_tx(port)) { 1962 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1963 atmel_port->pdc_tx.ofs = 0; 1964 } 1965 } 1966 1967 /* 1968 * Disable the port 1969 */ 1970 static void atmel_shutdown(struct uart_port *port) 1971 { 1972 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1973 1974 /* 1975 * Prevent any tasklets being scheduled during 1976 * cleanup 1977 */ 1978 del_timer_sync(&atmel_port->uart_timer); 1979 1980 /* 1981 * Clear out any scheduled tasklets before 1982 * we destroy the buffers 1983 */ 1984 tasklet_disable(&atmel_port->tasklet); 1985 tasklet_kill(&atmel_port->tasklet); 1986 1987 /* 1988 * Ensure everything is stopped and 1989 * disable all interrupts, port and break condition. 1990 */ 1991 atmel_stop_rx(port); 1992 atmel_stop_tx(port); 1993 1994 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1995 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1996 1997 1998 /* 1999 * Shut-down the DMA. 2000 */ 2001 if (atmel_port->release_rx) 2002 atmel_port->release_rx(port); 2003 if (atmel_port->release_tx) 2004 atmel_port->release_tx(port); 2005 2006 /* 2007 * Reset ring buffer pointers 2008 */ 2009 atmel_port->rx_ring.head = 0; 2010 atmel_port->rx_ring.tail = 0; 2011 2012 /* 2013 * Free the interrupts 2014 */ 2015 free_irq(port->irq, port); 2016 atmel_free_gpio_irq(port); 2017 2018 atmel_port->ms_irq_enabled = false; 2019 2020 atmel_flush_buffer(port); 2021 } 2022 2023 /* 2024 * Power / Clock management. 2025 */ 2026 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2027 unsigned int oldstate) 2028 { 2029 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2030 2031 switch (state) { 2032 case 0: 2033 /* 2034 * Enable the peripheral clock for this serial port. 2035 * This is called on uart_open() or a resume event. 2036 */ 2037 clk_prepare_enable(atmel_port->clk); 2038 2039 /* re-enable interrupts if we disabled some on suspend */ 2040 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2041 break; 2042 case 3: 2043 /* Back up the interrupt mask and disable all interrupts */ 2044 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2045 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2046 2047 /* 2048 * Disable the peripheral clock for this serial port. 2049 * This is called on uart_close() or a suspend event. 2050 */ 2051 clk_disable_unprepare(atmel_port->clk); 2052 break; 2053 default: 2054 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2055 } 2056 } 2057 2058 /* 2059 * Change the port parameters 2060 */ 2061 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2062 struct ktermios *old) 2063 { 2064 unsigned long flags; 2065 unsigned int old_mode, mode, imr, quot, baud; 2066 2067 /* save the current mode register */ 2068 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2069 2070 /* reset the mode, clock divisor, parity, stop bits and data size */ 2071 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2072 ATMEL_US_PAR | ATMEL_US_USMODE); 2073 2074 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2075 quot = uart_get_divisor(port, baud); 2076 2077 if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2078 quot /= 8; 2079 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2080 } 2081 2082 /* byte size */ 2083 switch (termios->c_cflag & CSIZE) { 2084 case CS5: 2085 mode |= ATMEL_US_CHRL_5; 2086 break; 2087 case CS6: 2088 mode |= ATMEL_US_CHRL_6; 2089 break; 2090 case CS7: 2091 mode |= ATMEL_US_CHRL_7; 2092 break; 2093 default: 2094 mode |= ATMEL_US_CHRL_8; 2095 break; 2096 } 2097 2098 /* stop bits */ 2099 if (termios->c_cflag & CSTOPB) 2100 mode |= ATMEL_US_NBSTOP_2; 2101 2102 /* parity */ 2103 if (termios->c_cflag & PARENB) { 2104 /* Mark or Space parity */ 2105 if (termios->c_cflag & CMSPAR) { 2106 if (termios->c_cflag & PARODD) 2107 mode |= ATMEL_US_PAR_MARK; 2108 else 2109 mode |= ATMEL_US_PAR_SPACE; 2110 } else if (termios->c_cflag & PARODD) 2111 mode |= ATMEL_US_PAR_ODD; 2112 else 2113 mode |= ATMEL_US_PAR_EVEN; 2114 } else 2115 mode |= ATMEL_US_PAR_NONE; 2116 2117 spin_lock_irqsave(&port->lock, flags); 2118 2119 port->read_status_mask = ATMEL_US_OVRE; 2120 if (termios->c_iflag & INPCK) 2121 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2122 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2123 port->read_status_mask |= ATMEL_US_RXBRK; 2124 2125 if (atmel_use_pdc_rx(port)) 2126 /* need to enable error interrupts */ 2127 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2128 2129 /* 2130 * Characters to ignore 2131 */ 2132 port->ignore_status_mask = 0; 2133 if (termios->c_iflag & IGNPAR) 2134 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2135 if (termios->c_iflag & IGNBRK) { 2136 port->ignore_status_mask |= ATMEL_US_RXBRK; 2137 /* 2138 * If we're ignoring parity and break indicators, 2139 * ignore overruns too (for real raw support). 2140 */ 2141 if (termios->c_iflag & IGNPAR) 2142 port->ignore_status_mask |= ATMEL_US_OVRE; 2143 } 2144 /* TODO: Ignore all characters if CREAD is set.*/ 2145 2146 /* update the per-port timeout */ 2147 uart_update_timeout(port, termios->c_cflag, baud); 2148 2149 /* 2150 * save/disable interrupts. The tty layer will ensure that the 2151 * transmitter is empty if requested by the caller, so there's 2152 * no need to wait for it here. 2153 */ 2154 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2155 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2156 2157 /* disable receiver and transmitter */ 2158 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2159 2160 /* mode */ 2161 if (port->rs485.flags & SER_RS485_ENABLED) { 2162 atmel_uart_writel(port, ATMEL_US_TTGR, 2163 port->rs485.delay_rts_after_send); 2164 mode |= ATMEL_US_USMODE_RS485; 2165 } else if (termios->c_cflag & CRTSCTS) { 2166 /* RS232 with hardware handshake (RTS/CTS) */ 2167 mode |= ATMEL_US_USMODE_HWHS; 2168 } else { 2169 /* RS232 without hadware handshake */ 2170 mode |= ATMEL_US_USMODE_NORMAL; 2171 } 2172 2173 /* set the mode, clock divisor, parity, stop bits and data size */ 2174 atmel_uart_writel(port, ATMEL_US_MR, mode); 2175 2176 /* 2177 * when switching the mode, set the RTS line state according to the 2178 * new mode, otherwise keep the former state 2179 */ 2180 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2181 unsigned int rts_state; 2182 2183 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2184 /* let the hardware control the RTS line */ 2185 rts_state = ATMEL_US_RTSDIS; 2186 } else { 2187 /* force RTS line to low level */ 2188 rts_state = ATMEL_US_RTSEN; 2189 } 2190 2191 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2192 } 2193 2194 /* set the baud rate */ 2195 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2196 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2197 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2198 2199 /* restore interrupts */ 2200 atmel_uart_writel(port, ATMEL_US_IER, imr); 2201 2202 /* CTS flow-control and modem-status interrupts */ 2203 if (UART_ENABLE_MS(port, termios->c_cflag)) 2204 atmel_enable_ms(port); 2205 else 2206 atmel_disable_ms(port); 2207 2208 spin_unlock_irqrestore(&port->lock, flags); 2209 } 2210 2211 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2212 { 2213 if (termios->c_line == N_PPS) { 2214 port->flags |= UPF_HARDPPS_CD; 2215 spin_lock_irq(&port->lock); 2216 atmel_enable_ms(port); 2217 spin_unlock_irq(&port->lock); 2218 } else { 2219 port->flags &= ~UPF_HARDPPS_CD; 2220 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2221 spin_lock_irq(&port->lock); 2222 atmel_disable_ms(port); 2223 spin_unlock_irq(&port->lock); 2224 } 2225 } 2226 } 2227 2228 /* 2229 * Return string describing the specified port 2230 */ 2231 static const char *atmel_type(struct uart_port *port) 2232 { 2233 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2234 } 2235 2236 /* 2237 * Release the memory region(s) being used by 'port'. 2238 */ 2239 static void atmel_release_port(struct uart_port *port) 2240 { 2241 struct platform_device *pdev = to_platform_device(port->dev); 2242 int size = pdev->resource[0].end - pdev->resource[0].start + 1; 2243 2244 release_mem_region(port->mapbase, size); 2245 2246 if (port->flags & UPF_IOREMAP) { 2247 iounmap(port->membase); 2248 port->membase = NULL; 2249 } 2250 } 2251 2252 /* 2253 * Request the memory region(s) being used by 'port'. 2254 */ 2255 static int atmel_request_port(struct uart_port *port) 2256 { 2257 struct platform_device *pdev = to_platform_device(port->dev); 2258 int size = pdev->resource[0].end - pdev->resource[0].start + 1; 2259 2260 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2261 return -EBUSY; 2262 2263 if (port->flags & UPF_IOREMAP) { 2264 port->membase = ioremap(port->mapbase, size); 2265 if (port->membase == NULL) { 2266 release_mem_region(port->mapbase, size); 2267 return -ENOMEM; 2268 } 2269 } 2270 2271 return 0; 2272 } 2273 2274 /* 2275 * Configure/autoconfigure the port. 2276 */ 2277 static void atmel_config_port(struct uart_port *port, int flags) 2278 { 2279 if (flags & UART_CONFIG_TYPE) { 2280 port->type = PORT_ATMEL; 2281 atmel_request_port(port); 2282 } 2283 } 2284 2285 /* 2286 * Verify the new serial_struct (for TIOCSSERIAL). 2287 */ 2288 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2289 { 2290 int ret = 0; 2291 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2292 ret = -EINVAL; 2293 if (port->irq != ser->irq) 2294 ret = -EINVAL; 2295 if (ser->io_type != SERIAL_IO_MEM) 2296 ret = -EINVAL; 2297 if (port->uartclk / 16 != ser->baud_base) 2298 ret = -EINVAL; 2299 if ((void *)port->mapbase != ser->iomem_base) 2300 ret = -EINVAL; 2301 if (port->iobase != ser->port) 2302 ret = -EINVAL; 2303 if (ser->hub6 != 0) 2304 ret = -EINVAL; 2305 return ret; 2306 } 2307 2308 #ifdef CONFIG_CONSOLE_POLL 2309 static int atmel_poll_get_char(struct uart_port *port) 2310 { 2311 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2312 cpu_relax(); 2313 2314 return atmel_uart_read_char(port); 2315 } 2316 2317 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2318 { 2319 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2320 cpu_relax(); 2321 2322 atmel_uart_write_char(port, ch); 2323 } 2324 #endif 2325 2326 static struct uart_ops atmel_pops = { 2327 .tx_empty = atmel_tx_empty, 2328 .set_mctrl = atmel_set_mctrl, 2329 .get_mctrl = atmel_get_mctrl, 2330 .stop_tx = atmel_stop_tx, 2331 .start_tx = atmel_start_tx, 2332 .stop_rx = atmel_stop_rx, 2333 .enable_ms = atmel_enable_ms, 2334 .break_ctl = atmel_break_ctl, 2335 .startup = atmel_startup, 2336 .shutdown = atmel_shutdown, 2337 .flush_buffer = atmel_flush_buffer, 2338 .set_termios = atmel_set_termios, 2339 .set_ldisc = atmel_set_ldisc, 2340 .type = atmel_type, 2341 .release_port = atmel_release_port, 2342 .request_port = atmel_request_port, 2343 .config_port = atmel_config_port, 2344 .verify_port = atmel_verify_port, 2345 .pm = atmel_serial_pm, 2346 #ifdef CONFIG_CONSOLE_POLL 2347 .poll_get_char = atmel_poll_get_char, 2348 .poll_put_char = atmel_poll_put_char, 2349 #endif 2350 }; 2351 2352 /* 2353 * Configure the port from the platform device resource info. 2354 */ 2355 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2356 struct platform_device *pdev) 2357 { 2358 int ret; 2359 struct uart_port *port = &atmel_port->uart; 2360 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 2361 2362 atmel_init_property(atmel_port, pdev); 2363 atmel_set_ops(port); 2364 2365 atmel_init_rs485(port, pdev); 2366 2367 port->iotype = UPIO_MEM; 2368 port->flags = UPF_BOOT_AUTOCONF; 2369 port->ops = &atmel_pops; 2370 port->fifosize = 1; 2371 port->dev = &pdev->dev; 2372 port->mapbase = pdev->resource[0].start; 2373 port->irq = pdev->resource[1].start; 2374 port->rs485_config = atmel_config_rs485; 2375 2376 tasklet_init(&atmel_port->tasklet, atmel_tasklet_func, 2377 (unsigned long)port); 2378 tasklet_disable(&atmel_port->tasklet); 2379 2380 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2381 2382 if (pdata && pdata->regs) { 2383 /* Already mapped by setup code */ 2384 port->membase = pdata->regs; 2385 } else { 2386 port->flags |= UPF_IOREMAP; 2387 port->membase = NULL; 2388 } 2389 2390 /* for console, the clock could already be configured */ 2391 if (!atmel_port->clk) { 2392 atmel_port->clk = clk_get(&pdev->dev, "usart"); 2393 if (IS_ERR(atmel_port->clk)) { 2394 ret = PTR_ERR(atmel_port->clk); 2395 atmel_port->clk = NULL; 2396 return ret; 2397 } 2398 ret = clk_prepare_enable(atmel_port->clk); 2399 if (ret) { 2400 clk_put(atmel_port->clk); 2401 atmel_port->clk = NULL; 2402 return ret; 2403 } 2404 port->uartclk = clk_get_rate(atmel_port->clk); 2405 clk_disable_unprepare(atmel_port->clk); 2406 /* only enable clock when USART is in use */ 2407 } 2408 2409 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */ 2410 if (port->rs485.flags & SER_RS485_ENABLED) 2411 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2412 else if (atmel_use_pdc_tx(port)) { 2413 port->fifosize = PDC_BUFFER_SIZE; 2414 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2415 } else { 2416 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2417 } 2418 2419 return 0; 2420 } 2421 2422 struct platform_device *atmel_default_console_device; /* the serial console device */ 2423 2424 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2425 static void atmel_console_putchar(struct uart_port *port, int ch) 2426 { 2427 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2428 cpu_relax(); 2429 atmel_uart_write_char(port, ch); 2430 } 2431 2432 /* 2433 * Interrupts are disabled on entering 2434 */ 2435 static void atmel_console_write(struct console *co, const char *s, u_int count) 2436 { 2437 struct uart_port *port = &atmel_ports[co->index].uart; 2438 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2439 unsigned int status, imr; 2440 unsigned int pdc_tx; 2441 2442 /* 2443 * First, save IMR and then disable interrupts 2444 */ 2445 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2446 atmel_uart_writel(port, ATMEL_US_IDR, 2447 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2448 2449 /* Store PDC transmit status and disable it */ 2450 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2451 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2452 2453 uart_console_write(port, s, count, atmel_console_putchar); 2454 2455 /* 2456 * Finally, wait for transmitter to become empty 2457 * and restore IMR 2458 */ 2459 do { 2460 status = atmel_uart_readl(port, ATMEL_US_CSR); 2461 } while (!(status & ATMEL_US_TXRDY)); 2462 2463 /* Restore PDC transmit status */ 2464 if (pdc_tx) 2465 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2466 2467 /* set interrupts back the way they were */ 2468 atmel_uart_writel(port, ATMEL_US_IER, imr); 2469 } 2470 2471 /* 2472 * If the port was already initialised (eg, by a boot loader), 2473 * try to determine the current setup. 2474 */ 2475 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2476 int *parity, int *bits) 2477 { 2478 unsigned int mr, quot; 2479 2480 /* 2481 * If the baud rate generator isn't running, the port wasn't 2482 * initialized by the boot loader. 2483 */ 2484 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2485 if (!quot) 2486 return; 2487 2488 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2489 if (mr == ATMEL_US_CHRL_8) 2490 *bits = 8; 2491 else 2492 *bits = 7; 2493 2494 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2495 if (mr == ATMEL_US_PAR_EVEN) 2496 *parity = 'e'; 2497 else if (mr == ATMEL_US_PAR_ODD) 2498 *parity = 'o'; 2499 2500 /* 2501 * The serial core only rounds down when matching this to a 2502 * supported baud rate. Make sure we don't end up slightly 2503 * lower than one of those, as it would make us fall through 2504 * to a much lower baud rate than we really want. 2505 */ 2506 *baud = port->uartclk / (16 * (quot - 1)); 2507 } 2508 2509 static int __init atmel_console_setup(struct console *co, char *options) 2510 { 2511 int ret; 2512 struct uart_port *port = &atmel_ports[co->index].uart; 2513 int baud = 115200; 2514 int bits = 8; 2515 int parity = 'n'; 2516 int flow = 'n'; 2517 2518 if (port->membase == NULL) { 2519 /* Port not initialized yet - delay setup */ 2520 return -ENODEV; 2521 } 2522 2523 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2524 if (ret) 2525 return ret; 2526 2527 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2528 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2529 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2530 2531 if (options) 2532 uart_parse_options(options, &baud, &parity, &bits, &flow); 2533 else 2534 atmel_console_get_options(port, &baud, &parity, &bits); 2535 2536 return uart_set_options(port, co, baud, parity, bits, flow); 2537 } 2538 2539 static struct uart_driver atmel_uart; 2540 2541 static struct console atmel_console = { 2542 .name = ATMEL_DEVICENAME, 2543 .write = atmel_console_write, 2544 .device = uart_console_device, 2545 .setup = atmel_console_setup, 2546 .flags = CON_PRINTBUFFER, 2547 .index = -1, 2548 .data = &atmel_uart, 2549 }; 2550 2551 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2552 2553 /* 2554 * Early console initialization (before VM subsystem initialized). 2555 */ 2556 static int __init atmel_console_init(void) 2557 { 2558 int ret; 2559 if (atmel_default_console_device) { 2560 struct atmel_uart_data *pdata = 2561 dev_get_platdata(&atmel_default_console_device->dev); 2562 int id = pdata->num; 2563 struct atmel_uart_port *port = &atmel_ports[id]; 2564 2565 port->backup_imr = 0; 2566 port->uart.line = id; 2567 2568 add_preferred_console(ATMEL_DEVICENAME, id, NULL); 2569 ret = atmel_init_port(port, atmel_default_console_device); 2570 if (ret) 2571 return ret; 2572 register_console(&atmel_console); 2573 } 2574 2575 return 0; 2576 } 2577 2578 console_initcall(atmel_console_init); 2579 2580 /* 2581 * Late console initialization. 2582 */ 2583 static int __init atmel_late_console_init(void) 2584 { 2585 if (atmel_default_console_device 2586 && !(atmel_console.flags & CON_ENABLED)) 2587 register_console(&atmel_console); 2588 2589 return 0; 2590 } 2591 2592 core_initcall(atmel_late_console_init); 2593 2594 static inline bool atmel_is_console_port(struct uart_port *port) 2595 { 2596 return port->cons && port->cons->index == port->line; 2597 } 2598 2599 #else 2600 #define ATMEL_CONSOLE_DEVICE NULL 2601 2602 static inline bool atmel_is_console_port(struct uart_port *port) 2603 { 2604 return false; 2605 } 2606 #endif 2607 2608 static struct uart_driver atmel_uart = { 2609 .owner = THIS_MODULE, 2610 .driver_name = "atmel_serial", 2611 .dev_name = ATMEL_DEVICENAME, 2612 .major = SERIAL_ATMEL_MAJOR, 2613 .minor = MINOR_START, 2614 .nr = ATMEL_MAX_UART, 2615 .cons = ATMEL_CONSOLE_DEVICE, 2616 }; 2617 2618 #ifdef CONFIG_PM 2619 static bool atmel_serial_clk_will_stop(void) 2620 { 2621 #ifdef CONFIG_ARCH_AT91 2622 return at91_suspend_entering_slow_clock(); 2623 #else 2624 return false; 2625 #endif 2626 } 2627 2628 static int atmel_serial_suspend(struct platform_device *pdev, 2629 pm_message_t state) 2630 { 2631 struct uart_port *port = platform_get_drvdata(pdev); 2632 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2633 2634 if (atmel_is_console_port(port) && console_suspend_enabled) { 2635 /* Drain the TX shifter */ 2636 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2637 ATMEL_US_TXEMPTY)) 2638 cpu_relax(); 2639 } 2640 2641 /* we can not wake up if we're running on slow clock */ 2642 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2643 if (atmel_serial_clk_will_stop()) { 2644 unsigned long flags; 2645 2646 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2647 atmel_port->suspended = true; 2648 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2649 device_set_wakeup_enable(&pdev->dev, 0); 2650 } 2651 2652 uart_suspend_port(&atmel_uart, port); 2653 2654 return 0; 2655 } 2656 2657 static int atmel_serial_resume(struct platform_device *pdev) 2658 { 2659 struct uart_port *port = platform_get_drvdata(pdev); 2660 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2661 unsigned long flags; 2662 2663 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2664 if (atmel_port->pending) { 2665 atmel_handle_receive(port, atmel_port->pending); 2666 atmel_handle_status(port, atmel_port->pending, 2667 atmel_port->pending_status); 2668 atmel_handle_transmit(port, atmel_port->pending); 2669 atmel_port->pending = 0; 2670 } 2671 atmel_port->suspended = false; 2672 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2673 2674 uart_resume_port(&atmel_uart, port); 2675 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2676 2677 return 0; 2678 } 2679 #else 2680 #define atmel_serial_suspend NULL 2681 #define atmel_serial_resume NULL 2682 #endif 2683 2684 static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev) 2685 { 2686 enum mctrl_gpio_idx i; 2687 struct gpio_desc *gpiod; 2688 2689 p->gpios = mctrl_gpio_init(dev, 0); 2690 if (IS_ERR(p->gpios)) 2691 return PTR_ERR(p->gpios); 2692 2693 for (i = 0; i < UART_GPIO_MAX; i++) { 2694 gpiod = mctrl_gpio_to_gpiod(p->gpios, i); 2695 if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN)) 2696 p->gpio_irq[i] = gpiod_to_irq(gpiod); 2697 else 2698 p->gpio_irq[i] = -EINVAL; 2699 } 2700 2701 return 0; 2702 } 2703 2704 static void atmel_serial_probe_fifos(struct atmel_uart_port *port, 2705 struct platform_device *pdev) 2706 { 2707 port->fifo_size = 0; 2708 port->rts_low = 0; 2709 port->rts_high = 0; 2710 2711 if (of_property_read_u32(pdev->dev.of_node, 2712 "atmel,fifo-size", 2713 &port->fifo_size)) 2714 return; 2715 2716 if (!port->fifo_size) 2717 return; 2718 2719 if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2720 port->fifo_size = 0; 2721 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2722 return; 2723 } 2724 2725 /* 2726 * 0 <= rts_low <= rts_high <= fifo_size 2727 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2728 * to flush their internal TX FIFO, commonly up to 16 data, before 2729 * actually stopping to send new data. So we try to set the RTS High 2730 * Threshold to a reasonably high value respecting this 16 data 2731 * empirical rule when possible. 2732 */ 2733 port->rts_high = max_t(int, port->fifo_size >> 1, 2734 port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2735 port->rts_low = max_t(int, port->fifo_size >> 2, 2736 port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2737 2738 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2739 port->fifo_size); 2740 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2741 port->rts_high); 2742 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2743 port->rts_low); 2744 } 2745 2746 static int atmel_serial_probe(struct platform_device *pdev) 2747 { 2748 struct atmel_uart_port *port; 2749 struct device_node *np = pdev->dev.of_node; 2750 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 2751 void *data; 2752 int ret = -ENODEV; 2753 bool rs485_enabled; 2754 2755 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2756 2757 if (np) 2758 ret = of_alias_get_id(np, "serial"); 2759 else 2760 if (pdata) 2761 ret = pdata->num; 2762 2763 if (ret < 0) 2764 /* port id not found in platform data nor device-tree aliases: 2765 * auto-enumerate it */ 2766 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2767 2768 if (ret >= ATMEL_MAX_UART) { 2769 ret = -ENODEV; 2770 goto err; 2771 } 2772 2773 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2774 /* port already in use */ 2775 ret = -EBUSY; 2776 goto err; 2777 } 2778 2779 port = &atmel_ports[ret]; 2780 port->backup_imr = 0; 2781 port->uart.line = ret; 2782 atmel_serial_probe_fifos(port, pdev); 2783 2784 spin_lock_init(&port->lock_suspended); 2785 2786 ret = atmel_init_gpios(port, &pdev->dev); 2787 if (ret < 0) { 2788 dev_err(&pdev->dev, "Failed to initialize GPIOs."); 2789 goto err; 2790 } 2791 2792 ret = atmel_init_port(port, pdev); 2793 if (ret) 2794 goto err_clear_bit; 2795 2796 if (!atmel_use_pdc_rx(&port->uart)) { 2797 ret = -ENOMEM; 2798 data = kmalloc(sizeof(struct atmel_uart_char) 2799 * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL); 2800 if (!data) 2801 goto err_alloc_ring; 2802 port->rx_ring.buf = data; 2803 } 2804 2805 rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED; 2806 2807 ret = uart_add_one_port(&atmel_uart, &port->uart); 2808 if (ret) 2809 goto err_add_port; 2810 2811 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2812 if (atmel_is_console_port(&port->uart) 2813 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2814 /* 2815 * The serial core enabled the clock for us, so undo 2816 * the clk_prepare_enable() in atmel_console_setup() 2817 */ 2818 clk_disable_unprepare(port->clk); 2819 } 2820 #endif 2821 2822 device_init_wakeup(&pdev->dev, 1); 2823 platform_set_drvdata(pdev, port); 2824 2825 /* 2826 * The peripheral clock has been disabled by atmel_init_port(): 2827 * enable it before accessing I/O registers 2828 */ 2829 clk_prepare_enable(port->clk); 2830 2831 if (rs485_enabled) { 2832 atmel_uart_writel(&port->uart, ATMEL_US_MR, 2833 ATMEL_US_USMODE_NORMAL); 2834 atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN); 2835 } 2836 2837 /* 2838 * Get port name of usart or uart 2839 */ 2840 atmel_get_ip_name(&port->uart); 2841 2842 /* 2843 * The peripheral clock can now safely be disabled till the port 2844 * is used 2845 */ 2846 clk_disable_unprepare(port->clk); 2847 2848 return 0; 2849 2850 err_add_port: 2851 kfree(port->rx_ring.buf); 2852 port->rx_ring.buf = NULL; 2853 err_alloc_ring: 2854 if (!atmel_is_console_port(&port->uart)) { 2855 clk_put(port->clk); 2856 port->clk = NULL; 2857 } 2858 err_clear_bit: 2859 clear_bit(port->uart.line, atmel_ports_in_use); 2860 err: 2861 return ret; 2862 } 2863 2864 static int atmel_serial_remove(struct platform_device *pdev) 2865 { 2866 struct uart_port *port = platform_get_drvdata(pdev); 2867 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2868 int ret = 0; 2869 2870 tasklet_kill(&atmel_port->tasklet); 2871 2872 device_init_wakeup(&pdev->dev, 0); 2873 2874 ret = uart_remove_one_port(&atmel_uart, port); 2875 2876 kfree(atmel_port->rx_ring.buf); 2877 2878 /* "port" is allocated statically, so we shouldn't free it */ 2879 2880 clear_bit(port->line, atmel_ports_in_use); 2881 2882 clk_put(atmel_port->clk); 2883 2884 return ret; 2885 } 2886 2887 static struct platform_driver atmel_serial_driver = { 2888 .probe = atmel_serial_probe, 2889 .remove = atmel_serial_remove, 2890 .suspend = atmel_serial_suspend, 2891 .resume = atmel_serial_resume, 2892 .driver = { 2893 .name = "atmel_usart", 2894 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 2895 }, 2896 }; 2897 2898 static int __init atmel_serial_init(void) 2899 { 2900 int ret; 2901 2902 ret = uart_register_driver(&atmel_uart); 2903 if (ret) 2904 return ret; 2905 2906 ret = platform_driver_register(&atmel_serial_driver); 2907 if (ret) 2908 uart_unregister_driver(&atmel_uart); 2909 2910 return ret; 2911 } 2912 2913 static void __exit atmel_serial_exit(void) 2914 { 2915 platform_driver_unregister(&atmel_serial_driver); 2916 uart_unregister_driver(&atmel_uart); 2917 } 2918 2919 module_init(atmel_serial_init); 2920 module_exit(atmel_serial_exit); 2921 2922 MODULE_AUTHOR("Rick Bronson"); 2923 MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver"); 2924 MODULE_LICENSE("GPL"); 2925 MODULE_ALIAS("platform:atmel_usart"); 2926