1 /* 2 * Driver for Atmel AT91 / AT32 Serial ports 3 * Copyright (C) 2003 Rick Bronson 4 * 5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 7 * 8 * DMA support added by Chip Coldwell. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 */ 25 #include <linux/tty.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/init.h> 29 #include <linux/serial.h> 30 #include <linux/clk.h> 31 #include <linux/console.h> 32 #include <linux/sysrq.h> 33 #include <linux/tty_flip.h> 34 #include <linux/platform_device.h> 35 #include <linux/of.h> 36 #include <linux/of_device.h> 37 #include <linux/of_gpio.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/dmaengine.h> 40 #include <linux/atmel_pdc.h> 41 #include <linux/atmel_serial.h> 42 #include <linux/uaccess.h> 43 #include <linux/platform_data/atmel.h> 44 #include <linux/timer.h> 45 #include <linux/gpio.h> 46 #include <linux/gpio/consumer.h> 47 #include <linux/err.h> 48 #include <linux/irq.h> 49 #include <linux/suspend.h> 50 51 #include <asm/io.h> 52 #include <asm/ioctls.h> 53 54 #define PDC_BUFFER_SIZE 512 55 /* Revisit: We should calculate this based on the actual port settings */ 56 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 57 58 /* The minium number of data FIFOs should be able to contain */ 59 #define ATMEL_MIN_FIFO_SIZE 8 60 /* 61 * These two offsets are substracted from the RX FIFO size to define the RTS 62 * high and low thresholds 63 */ 64 #define ATMEL_RTS_HIGH_OFFSET 16 65 #define ATMEL_RTS_LOW_OFFSET 20 66 67 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 68 #define SUPPORT_SYSRQ 69 #endif 70 71 #include <linux/serial_core.h> 72 73 #include "serial_mctrl_gpio.h" 74 75 static void atmel_start_rx(struct uart_port *port); 76 static void atmel_stop_rx(struct uart_port *port); 77 78 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 79 80 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 81 * should coexist with the 8250 driver, such as if we have an external 16C550 82 * UART. */ 83 #define SERIAL_ATMEL_MAJOR 204 84 #define MINOR_START 154 85 #define ATMEL_DEVICENAME "ttyAT" 86 87 #else 88 89 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 90 * name, but it is legally reserved for the 8250 driver. */ 91 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 92 #define MINOR_START 64 93 #define ATMEL_DEVICENAME "ttyS" 94 95 #endif 96 97 #define ATMEL_ISR_PASS_LIMIT 256 98 99 struct atmel_dma_buffer { 100 unsigned char *buf; 101 dma_addr_t dma_addr; 102 unsigned int dma_size; 103 unsigned int ofs; 104 }; 105 106 struct atmel_uart_char { 107 u16 status; 108 u16 ch; 109 }; 110 111 /* 112 * Be careful, the real size of the ring buffer is 113 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 114 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 115 * DMA mode. 116 */ 117 #define ATMEL_SERIAL_RINGSIZE 1024 118 119 /* 120 * at91: 6 USARTs and one DBGU port (SAM9260) 121 * avr32: 4 122 */ 123 #define ATMEL_MAX_UART 7 124 125 /* 126 * We wrap our port structure around the generic uart_port. 127 */ 128 struct atmel_uart_port { 129 struct uart_port uart; /* uart */ 130 struct clk *clk; /* uart clock */ 131 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 132 u32 backup_imr; /* IMR saved during suspend */ 133 int break_active; /* break being received */ 134 135 bool use_dma_rx; /* enable DMA receiver */ 136 bool use_pdc_rx; /* enable PDC receiver */ 137 short pdc_rx_idx; /* current PDC RX buffer */ 138 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 139 140 bool use_dma_tx; /* enable DMA transmitter */ 141 bool use_pdc_tx; /* enable PDC transmitter */ 142 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 143 144 spinlock_t lock_tx; /* port lock */ 145 spinlock_t lock_rx; /* port lock */ 146 struct dma_chan *chan_tx; 147 struct dma_chan *chan_rx; 148 struct dma_async_tx_descriptor *desc_tx; 149 struct dma_async_tx_descriptor *desc_rx; 150 dma_cookie_t cookie_tx; 151 dma_cookie_t cookie_rx; 152 struct scatterlist sg_tx; 153 struct scatterlist sg_rx; 154 struct tasklet_struct tasklet_rx; 155 struct tasklet_struct tasklet_tx; 156 atomic_t tasklet_shutdown; 157 unsigned int irq_status_prev; 158 unsigned int tx_len; 159 160 struct circ_buf rx_ring; 161 162 struct mctrl_gpios *gpios; 163 unsigned int tx_done_mask; 164 u32 fifo_size; 165 u32 rts_high; 166 u32 rts_low; 167 bool ms_irq_enabled; 168 u32 rtor; /* address of receiver timeout register if it exists */ 169 bool has_frac_baudrate; 170 bool has_hw_timer; 171 struct timer_list uart_timer; 172 173 bool suspended; 174 unsigned int pending; 175 unsigned int pending_status; 176 spinlock_t lock_suspended; 177 178 int (*prepare_rx)(struct uart_port *port); 179 int (*prepare_tx)(struct uart_port *port); 180 void (*schedule_rx)(struct uart_port *port); 181 void (*schedule_tx)(struct uart_port *port); 182 void (*release_rx)(struct uart_port *port); 183 void (*release_tx)(struct uart_port *port); 184 }; 185 186 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 187 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 188 189 #ifdef SUPPORT_SYSRQ 190 static struct console atmel_console; 191 #endif 192 193 #if defined(CONFIG_OF) 194 static const struct of_device_id atmel_serial_dt_ids[] = { 195 { .compatible = "atmel,at91rm9200-usart" }, 196 { .compatible = "atmel,at91sam9260-usart" }, 197 { /* sentinel */ } 198 }; 199 #endif 200 201 static inline struct atmel_uart_port * 202 to_atmel_uart_port(struct uart_port *uart) 203 { 204 return container_of(uart, struct atmel_uart_port, uart); 205 } 206 207 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 208 { 209 return __raw_readl(port->membase + reg); 210 } 211 212 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 213 { 214 __raw_writel(value, port->membase + reg); 215 } 216 217 #ifdef CONFIG_AVR32 218 219 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */ 220 static inline u8 atmel_uart_read_char(struct uart_port *port) 221 { 222 return __raw_readl(port->membase + ATMEL_US_RHR); 223 } 224 225 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 226 { 227 __raw_writel(value, port->membase + ATMEL_US_THR); 228 } 229 230 #else 231 232 static inline u8 atmel_uart_read_char(struct uart_port *port) 233 { 234 return __raw_readb(port->membase + ATMEL_US_RHR); 235 } 236 237 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 238 { 239 __raw_writeb(value, port->membase + ATMEL_US_THR); 240 } 241 242 #endif 243 244 #ifdef CONFIG_SERIAL_ATMEL_PDC 245 static bool atmel_use_pdc_rx(struct uart_port *port) 246 { 247 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 248 249 return atmel_port->use_pdc_rx; 250 } 251 252 static bool atmel_use_pdc_tx(struct uart_port *port) 253 { 254 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 255 256 return atmel_port->use_pdc_tx; 257 } 258 #else 259 static bool atmel_use_pdc_rx(struct uart_port *port) 260 { 261 return false; 262 } 263 264 static bool atmel_use_pdc_tx(struct uart_port *port) 265 { 266 return false; 267 } 268 #endif 269 270 static bool atmel_use_dma_tx(struct uart_port *port) 271 { 272 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 273 274 return atmel_port->use_dma_tx; 275 } 276 277 static bool atmel_use_dma_rx(struct uart_port *port) 278 { 279 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 280 281 return atmel_port->use_dma_rx; 282 } 283 284 static bool atmel_use_fifo(struct uart_port *port) 285 { 286 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 287 288 return atmel_port->fifo_size; 289 } 290 291 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 292 struct tasklet_struct *t) 293 { 294 if (!atomic_read(&atmel_port->tasklet_shutdown)) 295 tasklet_schedule(t); 296 } 297 298 static unsigned int atmel_get_lines_status(struct uart_port *port) 299 { 300 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 301 unsigned int status, ret = 0; 302 303 status = atmel_uart_readl(port, ATMEL_US_CSR); 304 305 mctrl_gpio_get(atmel_port->gpios, &ret); 306 307 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 308 UART_GPIO_CTS))) { 309 if (ret & TIOCM_CTS) 310 status &= ~ATMEL_US_CTS; 311 else 312 status |= ATMEL_US_CTS; 313 } 314 315 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 316 UART_GPIO_DSR))) { 317 if (ret & TIOCM_DSR) 318 status &= ~ATMEL_US_DSR; 319 else 320 status |= ATMEL_US_DSR; 321 } 322 323 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 324 UART_GPIO_RI))) { 325 if (ret & TIOCM_RI) 326 status &= ~ATMEL_US_RI; 327 else 328 status |= ATMEL_US_RI; 329 } 330 331 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 332 UART_GPIO_DCD))) { 333 if (ret & TIOCM_CD) 334 status &= ~ATMEL_US_DCD; 335 else 336 status |= ATMEL_US_DCD; 337 } 338 339 return status; 340 } 341 342 /* Enable or disable the rs485 support */ 343 static int atmel_config_rs485(struct uart_port *port, 344 struct serial_rs485 *rs485conf) 345 { 346 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 347 unsigned int mode; 348 349 /* Disable interrupts */ 350 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 351 352 mode = atmel_uart_readl(port, ATMEL_US_MR); 353 354 /* Resetting serial mode to RS232 (0x0) */ 355 mode &= ~ATMEL_US_USMODE; 356 357 port->rs485 = *rs485conf; 358 359 if (rs485conf->flags & SER_RS485_ENABLED) { 360 dev_dbg(port->dev, "Setting UART to RS485\n"); 361 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 362 atmel_uart_writel(port, ATMEL_US_TTGR, 363 rs485conf->delay_rts_after_send); 364 mode |= ATMEL_US_USMODE_RS485; 365 } else { 366 dev_dbg(port->dev, "Setting UART to RS232\n"); 367 if (atmel_use_pdc_tx(port)) 368 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 369 ATMEL_US_TXBUFE; 370 else 371 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 372 } 373 atmel_uart_writel(port, ATMEL_US_MR, mode); 374 375 /* Enable interrupts */ 376 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 377 378 return 0; 379 } 380 381 /* 382 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 383 */ 384 static u_int atmel_tx_empty(struct uart_port *port) 385 { 386 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 387 TIOCSER_TEMT : 388 0; 389 } 390 391 /* 392 * Set state of the modem control output lines 393 */ 394 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 395 { 396 unsigned int control = 0; 397 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 398 unsigned int rts_paused, rts_ready; 399 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 400 401 /* override mode to RS485 if needed, otherwise keep the current mode */ 402 if (port->rs485.flags & SER_RS485_ENABLED) { 403 atmel_uart_writel(port, ATMEL_US_TTGR, 404 port->rs485.delay_rts_after_send); 405 mode &= ~ATMEL_US_USMODE; 406 mode |= ATMEL_US_USMODE_RS485; 407 } 408 409 /* set the RTS line state according to the mode */ 410 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 411 /* force RTS line to high level */ 412 rts_paused = ATMEL_US_RTSEN; 413 414 /* give the control of the RTS line back to the hardware */ 415 rts_ready = ATMEL_US_RTSDIS; 416 } else { 417 /* force RTS line to high level */ 418 rts_paused = ATMEL_US_RTSDIS; 419 420 /* force RTS line to low level */ 421 rts_ready = ATMEL_US_RTSEN; 422 } 423 424 if (mctrl & TIOCM_RTS) 425 control |= rts_ready; 426 else 427 control |= rts_paused; 428 429 if (mctrl & TIOCM_DTR) 430 control |= ATMEL_US_DTREN; 431 else 432 control |= ATMEL_US_DTRDIS; 433 434 atmel_uart_writel(port, ATMEL_US_CR, control); 435 436 mctrl_gpio_set(atmel_port->gpios, mctrl); 437 438 /* Local loopback mode? */ 439 mode &= ~ATMEL_US_CHMODE; 440 if (mctrl & TIOCM_LOOP) 441 mode |= ATMEL_US_CHMODE_LOC_LOOP; 442 else 443 mode |= ATMEL_US_CHMODE_NORMAL; 444 445 atmel_uart_writel(port, ATMEL_US_MR, mode); 446 } 447 448 /* 449 * Get state of the modem control input lines 450 */ 451 static u_int atmel_get_mctrl(struct uart_port *port) 452 { 453 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 454 unsigned int ret = 0, status; 455 456 status = atmel_uart_readl(port, ATMEL_US_CSR); 457 458 /* 459 * The control signals are active low. 460 */ 461 if (!(status & ATMEL_US_DCD)) 462 ret |= TIOCM_CD; 463 if (!(status & ATMEL_US_CTS)) 464 ret |= TIOCM_CTS; 465 if (!(status & ATMEL_US_DSR)) 466 ret |= TIOCM_DSR; 467 if (!(status & ATMEL_US_RI)) 468 ret |= TIOCM_RI; 469 470 return mctrl_gpio_get(atmel_port->gpios, &ret); 471 } 472 473 /* 474 * Stop transmitting. 475 */ 476 static void atmel_stop_tx(struct uart_port *port) 477 { 478 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 479 480 if (atmel_use_pdc_tx(port)) { 481 /* disable PDC transmit */ 482 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 483 } 484 485 /* 486 * Disable the transmitter. 487 * This is mandatory when DMA is used, otherwise the DMA buffer 488 * is fully transmitted. 489 */ 490 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 491 492 /* Disable interrupts */ 493 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 494 495 if ((port->rs485.flags & SER_RS485_ENABLED) && 496 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) 497 atmel_start_rx(port); 498 } 499 500 /* 501 * Start transmitting. 502 */ 503 static void atmel_start_tx(struct uart_port *port) 504 { 505 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 506 507 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 508 & ATMEL_PDC_TXTEN)) 509 /* The transmitter is already running. Yes, we 510 really need this.*/ 511 return; 512 513 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 514 if ((port->rs485.flags & SER_RS485_ENABLED) && 515 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) 516 atmel_stop_rx(port); 517 518 if (atmel_use_pdc_tx(port)) 519 /* re-enable PDC transmit */ 520 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 521 522 /* Enable interrupts */ 523 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 524 525 /* re-enable the transmitter */ 526 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 527 } 528 529 /* 530 * start receiving - port is in process of being opened. 531 */ 532 static void atmel_start_rx(struct uart_port *port) 533 { 534 /* reset status and receiver */ 535 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 536 537 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 538 539 if (atmel_use_pdc_rx(port)) { 540 /* enable PDC controller */ 541 atmel_uart_writel(port, ATMEL_US_IER, 542 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 543 port->read_status_mask); 544 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 545 } else { 546 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 547 } 548 } 549 550 /* 551 * Stop receiving - port is in process of being closed. 552 */ 553 static void atmel_stop_rx(struct uart_port *port) 554 { 555 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 556 557 if (atmel_use_pdc_rx(port)) { 558 /* disable PDC receive */ 559 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 560 atmel_uart_writel(port, ATMEL_US_IDR, 561 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 562 port->read_status_mask); 563 } else { 564 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 565 } 566 } 567 568 /* 569 * Enable modem status interrupts 570 */ 571 static void atmel_enable_ms(struct uart_port *port) 572 { 573 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 574 uint32_t ier = 0; 575 576 /* 577 * Interrupt should not be enabled twice 578 */ 579 if (atmel_port->ms_irq_enabled) 580 return; 581 582 atmel_port->ms_irq_enabled = true; 583 584 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 585 ier |= ATMEL_US_CTSIC; 586 587 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 588 ier |= ATMEL_US_DSRIC; 589 590 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 591 ier |= ATMEL_US_RIIC; 592 593 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 594 ier |= ATMEL_US_DCDIC; 595 596 atmel_uart_writel(port, ATMEL_US_IER, ier); 597 598 mctrl_gpio_enable_ms(atmel_port->gpios); 599 } 600 601 /* 602 * Disable modem status interrupts 603 */ 604 static void atmel_disable_ms(struct uart_port *port) 605 { 606 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 607 uint32_t idr = 0; 608 609 /* 610 * Interrupt should not be disabled twice 611 */ 612 if (!atmel_port->ms_irq_enabled) 613 return; 614 615 atmel_port->ms_irq_enabled = false; 616 617 mctrl_gpio_disable_ms(atmel_port->gpios); 618 619 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 620 idr |= ATMEL_US_CTSIC; 621 622 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 623 idr |= ATMEL_US_DSRIC; 624 625 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 626 idr |= ATMEL_US_RIIC; 627 628 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 629 idr |= ATMEL_US_DCDIC; 630 631 atmel_uart_writel(port, ATMEL_US_IDR, idr); 632 } 633 634 /* 635 * Control the transmission of a break signal 636 */ 637 static void atmel_break_ctl(struct uart_port *port, int break_state) 638 { 639 if (break_state != 0) 640 /* start break */ 641 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 642 else 643 /* stop break */ 644 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 645 } 646 647 /* 648 * Stores the incoming character in the ring buffer 649 */ 650 static void 651 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 652 unsigned int ch) 653 { 654 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 655 struct circ_buf *ring = &atmel_port->rx_ring; 656 struct atmel_uart_char *c; 657 658 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 659 /* Buffer overflow, ignore char */ 660 return; 661 662 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 663 c->status = status; 664 c->ch = ch; 665 666 /* Make sure the character is stored before we update head. */ 667 smp_wmb(); 668 669 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 670 } 671 672 /* 673 * Deal with parity, framing and overrun errors. 674 */ 675 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 676 { 677 /* clear error */ 678 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 679 680 if (status & ATMEL_US_RXBRK) { 681 /* ignore side-effect */ 682 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 683 port->icount.brk++; 684 } 685 if (status & ATMEL_US_PARE) 686 port->icount.parity++; 687 if (status & ATMEL_US_FRAME) 688 port->icount.frame++; 689 if (status & ATMEL_US_OVRE) 690 port->icount.overrun++; 691 } 692 693 /* 694 * Characters received (called from interrupt handler) 695 */ 696 static void atmel_rx_chars(struct uart_port *port) 697 { 698 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 699 unsigned int status, ch; 700 701 status = atmel_uart_readl(port, ATMEL_US_CSR); 702 while (status & ATMEL_US_RXRDY) { 703 ch = atmel_uart_read_char(port); 704 705 /* 706 * note that the error handling code is 707 * out of the main execution path 708 */ 709 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 710 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 711 || atmel_port->break_active)) { 712 713 /* clear error */ 714 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 715 716 if (status & ATMEL_US_RXBRK 717 && !atmel_port->break_active) { 718 atmel_port->break_active = 1; 719 atmel_uart_writel(port, ATMEL_US_IER, 720 ATMEL_US_RXBRK); 721 } else { 722 /* 723 * This is either the end-of-break 724 * condition or we've received at 725 * least one character without RXBRK 726 * being set. In both cases, the next 727 * RXBRK will indicate start-of-break. 728 */ 729 atmel_uart_writel(port, ATMEL_US_IDR, 730 ATMEL_US_RXBRK); 731 status &= ~ATMEL_US_RXBRK; 732 atmel_port->break_active = 0; 733 } 734 } 735 736 atmel_buffer_rx_char(port, status, ch); 737 status = atmel_uart_readl(port, ATMEL_US_CSR); 738 } 739 740 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 741 } 742 743 /* 744 * Transmit characters (called from tasklet with TXRDY interrupt 745 * disabled) 746 */ 747 static void atmel_tx_chars(struct uart_port *port) 748 { 749 struct circ_buf *xmit = &port->state->xmit; 750 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 751 752 if (port->x_char && 753 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) { 754 atmel_uart_write_char(port, port->x_char); 755 port->icount.tx++; 756 port->x_char = 0; 757 } 758 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 759 return; 760 761 while (atmel_uart_readl(port, ATMEL_US_CSR) & 762 atmel_port->tx_done_mask) { 763 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 764 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 765 port->icount.tx++; 766 if (uart_circ_empty(xmit)) 767 break; 768 } 769 770 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 771 uart_write_wakeup(port); 772 773 if (!uart_circ_empty(xmit)) 774 /* Enable interrupts */ 775 atmel_uart_writel(port, ATMEL_US_IER, 776 atmel_port->tx_done_mask); 777 } 778 779 static void atmel_complete_tx_dma(void *arg) 780 { 781 struct atmel_uart_port *atmel_port = arg; 782 struct uart_port *port = &atmel_port->uart; 783 struct circ_buf *xmit = &port->state->xmit; 784 struct dma_chan *chan = atmel_port->chan_tx; 785 unsigned long flags; 786 787 spin_lock_irqsave(&port->lock, flags); 788 789 if (chan) 790 dmaengine_terminate_all(chan); 791 xmit->tail += atmel_port->tx_len; 792 xmit->tail &= UART_XMIT_SIZE - 1; 793 794 port->icount.tx += atmel_port->tx_len; 795 796 spin_lock_irq(&atmel_port->lock_tx); 797 async_tx_ack(atmel_port->desc_tx); 798 atmel_port->cookie_tx = -EINVAL; 799 atmel_port->desc_tx = NULL; 800 spin_unlock_irq(&atmel_port->lock_tx); 801 802 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 803 uart_write_wakeup(port); 804 805 /* 806 * xmit is a circular buffer so, if we have just send data from 807 * xmit->tail to the end of xmit->buf, now we have to transmit the 808 * remaining data from the beginning of xmit->buf to xmit->head. 809 */ 810 if (!uart_circ_empty(xmit)) 811 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 812 else if ((port->rs485.flags & SER_RS485_ENABLED) && 813 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { 814 /* DMA done, stop TX, start RX for RS485 */ 815 atmel_start_rx(port); 816 } 817 818 spin_unlock_irqrestore(&port->lock, flags); 819 } 820 821 static void atmel_release_tx_dma(struct uart_port *port) 822 { 823 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 824 struct dma_chan *chan = atmel_port->chan_tx; 825 826 if (chan) { 827 dmaengine_terminate_all(chan); 828 dma_release_channel(chan); 829 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 830 DMA_TO_DEVICE); 831 } 832 833 atmel_port->desc_tx = NULL; 834 atmel_port->chan_tx = NULL; 835 atmel_port->cookie_tx = -EINVAL; 836 } 837 838 /* 839 * Called from tasklet with TXRDY interrupt is disabled. 840 */ 841 static void atmel_tx_dma(struct uart_port *port) 842 { 843 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 844 struct circ_buf *xmit = &port->state->xmit; 845 struct dma_chan *chan = atmel_port->chan_tx; 846 struct dma_async_tx_descriptor *desc; 847 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 848 unsigned int tx_len, part1_len, part2_len, sg_len; 849 dma_addr_t phys_addr; 850 851 /* Make sure we have an idle channel */ 852 if (atmel_port->desc_tx != NULL) 853 return; 854 855 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 856 /* 857 * DMA is idle now. 858 * Port xmit buffer is already mapped, 859 * and it is one page... Just adjust 860 * offsets and lengths. Since it is a circular buffer, 861 * we have to transmit till the end, and then the rest. 862 * Take the port lock to get a 863 * consistent xmit buffer state. 864 */ 865 tx_len = CIRC_CNT_TO_END(xmit->head, 866 xmit->tail, 867 UART_XMIT_SIZE); 868 869 if (atmel_port->fifo_size) { 870 /* multi data mode */ 871 part1_len = (tx_len & ~0x3); /* DWORD access */ 872 part2_len = (tx_len & 0x3); /* BYTE access */ 873 } else { 874 /* single data (legacy) mode */ 875 part1_len = 0; 876 part2_len = tx_len; /* BYTE access only */ 877 } 878 879 sg_init_table(sgl, 2); 880 sg_len = 0; 881 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 882 if (part1_len) { 883 sg = &sgl[sg_len++]; 884 sg_dma_address(sg) = phys_addr; 885 sg_dma_len(sg) = part1_len; 886 887 phys_addr += part1_len; 888 } 889 890 if (part2_len) { 891 sg = &sgl[sg_len++]; 892 sg_dma_address(sg) = phys_addr; 893 sg_dma_len(sg) = part2_len; 894 } 895 896 /* 897 * save tx_len so atmel_complete_tx_dma() will increase 898 * xmit->tail correctly 899 */ 900 atmel_port->tx_len = tx_len; 901 902 desc = dmaengine_prep_slave_sg(chan, 903 sgl, 904 sg_len, 905 DMA_MEM_TO_DEV, 906 DMA_PREP_INTERRUPT | 907 DMA_CTRL_ACK); 908 if (!desc) { 909 dev_err(port->dev, "Failed to send via dma!\n"); 910 return; 911 } 912 913 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 914 915 atmel_port->desc_tx = desc; 916 desc->callback = atmel_complete_tx_dma; 917 desc->callback_param = atmel_port; 918 atmel_port->cookie_tx = dmaengine_submit(desc); 919 } 920 921 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 922 uart_write_wakeup(port); 923 } 924 925 static int atmel_prepare_tx_dma(struct uart_port *port) 926 { 927 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 928 dma_cap_mask_t mask; 929 struct dma_slave_config config; 930 int ret, nent; 931 932 dma_cap_zero(mask); 933 dma_cap_set(DMA_SLAVE, mask); 934 935 atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx"); 936 if (atmel_port->chan_tx == NULL) 937 goto chan_err; 938 dev_info(port->dev, "using %s for tx DMA transfers\n", 939 dma_chan_name(atmel_port->chan_tx)); 940 941 spin_lock_init(&atmel_port->lock_tx); 942 sg_init_table(&atmel_port->sg_tx, 1); 943 /* UART circular tx buffer is an aligned page. */ 944 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 945 sg_set_page(&atmel_port->sg_tx, 946 virt_to_page(port->state->xmit.buf), 947 UART_XMIT_SIZE, 948 (unsigned long)port->state->xmit.buf & ~PAGE_MASK); 949 nent = dma_map_sg(port->dev, 950 &atmel_port->sg_tx, 951 1, 952 DMA_TO_DEVICE); 953 954 if (!nent) { 955 dev_dbg(port->dev, "need to release resource of dma\n"); 956 goto chan_err; 957 } else { 958 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 959 sg_dma_len(&atmel_port->sg_tx), 960 port->state->xmit.buf, 961 &sg_dma_address(&atmel_port->sg_tx)); 962 } 963 964 /* Configure the slave DMA */ 965 memset(&config, 0, sizeof(config)); 966 config.direction = DMA_MEM_TO_DEV; 967 config.dst_addr_width = (atmel_port->fifo_size) ? 968 DMA_SLAVE_BUSWIDTH_4_BYTES : 969 DMA_SLAVE_BUSWIDTH_1_BYTE; 970 config.dst_addr = port->mapbase + ATMEL_US_THR; 971 config.dst_maxburst = 1; 972 973 ret = dmaengine_slave_config(atmel_port->chan_tx, 974 &config); 975 if (ret) { 976 dev_err(port->dev, "DMA tx slave configuration failed\n"); 977 goto chan_err; 978 } 979 980 return 0; 981 982 chan_err: 983 dev_err(port->dev, "TX channel not available, switch to pio\n"); 984 atmel_port->use_dma_tx = 0; 985 if (atmel_port->chan_tx) 986 atmel_release_tx_dma(port); 987 return -EINVAL; 988 } 989 990 static void atmel_complete_rx_dma(void *arg) 991 { 992 struct uart_port *port = arg; 993 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 994 995 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 996 } 997 998 static void atmel_release_rx_dma(struct uart_port *port) 999 { 1000 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1001 struct dma_chan *chan = atmel_port->chan_rx; 1002 1003 if (chan) { 1004 dmaengine_terminate_all(chan); 1005 dma_release_channel(chan); 1006 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1007 DMA_FROM_DEVICE); 1008 } 1009 1010 atmel_port->desc_rx = NULL; 1011 atmel_port->chan_rx = NULL; 1012 atmel_port->cookie_rx = -EINVAL; 1013 } 1014 1015 static void atmel_rx_from_dma(struct uart_port *port) 1016 { 1017 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1018 struct tty_port *tport = &port->state->port; 1019 struct circ_buf *ring = &atmel_port->rx_ring; 1020 struct dma_chan *chan = atmel_port->chan_rx; 1021 struct dma_tx_state state; 1022 enum dma_status dmastat; 1023 size_t count; 1024 1025 1026 /* Reset the UART timeout early so that we don't miss one */ 1027 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1028 dmastat = dmaengine_tx_status(chan, 1029 atmel_port->cookie_rx, 1030 &state); 1031 /* Restart a new tasklet if DMA status is error */ 1032 if (dmastat == DMA_ERROR) { 1033 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1034 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1035 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1036 return; 1037 } 1038 1039 /* CPU claims ownership of RX DMA buffer */ 1040 dma_sync_sg_for_cpu(port->dev, 1041 &atmel_port->sg_rx, 1042 1, 1043 DMA_FROM_DEVICE); 1044 1045 /* 1046 * ring->head points to the end of data already written by the DMA. 1047 * ring->tail points to the beginning of data to be read by the 1048 * framework. 1049 * The current transfer size should not be larger than the dma buffer 1050 * length. 1051 */ 1052 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1053 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1054 /* 1055 * At this point ring->head may point to the first byte right after the 1056 * last byte of the dma buffer: 1057 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1058 * 1059 * However ring->tail must always points inside the dma buffer: 1060 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1061 * 1062 * Since we use a ring buffer, we have to handle the case 1063 * where head is lower than tail. In such a case, we first read from 1064 * tail to the end of the buffer then reset tail. 1065 */ 1066 if (ring->head < ring->tail) { 1067 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1068 1069 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1070 ring->tail = 0; 1071 port->icount.rx += count; 1072 } 1073 1074 /* Finally we read data from tail to head */ 1075 if (ring->tail < ring->head) { 1076 count = ring->head - ring->tail; 1077 1078 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1079 /* Wrap ring->head if needed */ 1080 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1081 ring->head = 0; 1082 ring->tail = ring->head; 1083 port->icount.rx += count; 1084 } 1085 1086 /* USART retreives ownership of RX DMA buffer */ 1087 dma_sync_sg_for_device(port->dev, 1088 &atmel_port->sg_rx, 1089 1, 1090 DMA_FROM_DEVICE); 1091 1092 /* 1093 * Drop the lock here since it might end up calling 1094 * uart_start(), which takes the lock. 1095 */ 1096 spin_unlock(&port->lock); 1097 tty_flip_buffer_push(tport); 1098 spin_lock(&port->lock); 1099 1100 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1101 } 1102 1103 static int atmel_prepare_rx_dma(struct uart_port *port) 1104 { 1105 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1106 struct dma_async_tx_descriptor *desc; 1107 dma_cap_mask_t mask; 1108 struct dma_slave_config config; 1109 struct circ_buf *ring; 1110 int ret, nent; 1111 1112 ring = &atmel_port->rx_ring; 1113 1114 dma_cap_zero(mask); 1115 dma_cap_set(DMA_CYCLIC, mask); 1116 1117 atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx"); 1118 if (atmel_port->chan_rx == NULL) 1119 goto chan_err; 1120 dev_info(port->dev, "using %s for rx DMA transfers\n", 1121 dma_chan_name(atmel_port->chan_rx)); 1122 1123 spin_lock_init(&atmel_port->lock_rx); 1124 sg_init_table(&atmel_port->sg_rx, 1); 1125 /* UART circular rx buffer is an aligned page. */ 1126 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1127 sg_set_page(&atmel_port->sg_rx, 1128 virt_to_page(ring->buf), 1129 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1130 (unsigned long)ring->buf & ~PAGE_MASK); 1131 nent = dma_map_sg(port->dev, 1132 &atmel_port->sg_rx, 1133 1, 1134 DMA_FROM_DEVICE); 1135 1136 if (!nent) { 1137 dev_dbg(port->dev, "need to release resource of dma\n"); 1138 goto chan_err; 1139 } else { 1140 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1141 sg_dma_len(&atmel_port->sg_rx), 1142 ring->buf, 1143 &sg_dma_address(&atmel_port->sg_rx)); 1144 } 1145 1146 /* Configure the slave DMA */ 1147 memset(&config, 0, sizeof(config)); 1148 config.direction = DMA_DEV_TO_MEM; 1149 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1150 config.src_addr = port->mapbase + ATMEL_US_RHR; 1151 config.src_maxburst = 1; 1152 1153 ret = dmaengine_slave_config(atmel_port->chan_rx, 1154 &config); 1155 if (ret) { 1156 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1157 goto chan_err; 1158 } 1159 /* 1160 * Prepare a cyclic dma transfer, assign 2 descriptors, 1161 * each one is half ring buffer size 1162 */ 1163 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1164 sg_dma_address(&atmel_port->sg_rx), 1165 sg_dma_len(&atmel_port->sg_rx), 1166 sg_dma_len(&atmel_port->sg_rx)/2, 1167 DMA_DEV_TO_MEM, 1168 DMA_PREP_INTERRUPT); 1169 desc->callback = atmel_complete_rx_dma; 1170 desc->callback_param = port; 1171 atmel_port->desc_rx = desc; 1172 atmel_port->cookie_rx = dmaengine_submit(desc); 1173 1174 return 0; 1175 1176 chan_err: 1177 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1178 atmel_port->use_dma_rx = 0; 1179 if (atmel_port->chan_rx) 1180 atmel_release_rx_dma(port); 1181 return -EINVAL; 1182 } 1183 1184 static void atmel_uart_timer_callback(unsigned long data) 1185 { 1186 struct uart_port *port = (void *)data; 1187 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1188 1189 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1190 tasklet_schedule(&atmel_port->tasklet_rx); 1191 mod_timer(&atmel_port->uart_timer, 1192 jiffies + uart_poll_timeout(port)); 1193 } 1194 } 1195 1196 /* 1197 * receive interrupt handler. 1198 */ 1199 static void 1200 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1201 { 1202 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1203 1204 if (atmel_use_pdc_rx(port)) { 1205 /* 1206 * PDC receive. Just schedule the tasklet and let it 1207 * figure out the details. 1208 * 1209 * TODO: We're not handling error flags correctly at 1210 * the moment. 1211 */ 1212 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1213 atmel_uart_writel(port, ATMEL_US_IDR, 1214 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1215 atmel_tasklet_schedule(atmel_port, 1216 &atmel_port->tasklet_rx); 1217 } 1218 1219 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1220 ATMEL_US_FRAME | ATMEL_US_PARE)) 1221 atmel_pdc_rxerr(port, pending); 1222 } 1223 1224 if (atmel_use_dma_rx(port)) { 1225 if (pending & ATMEL_US_TIMEOUT) { 1226 atmel_uart_writel(port, ATMEL_US_IDR, 1227 ATMEL_US_TIMEOUT); 1228 atmel_tasklet_schedule(atmel_port, 1229 &atmel_port->tasklet_rx); 1230 } 1231 } 1232 1233 /* Interrupt receive */ 1234 if (pending & ATMEL_US_RXRDY) 1235 atmel_rx_chars(port); 1236 else if (pending & ATMEL_US_RXBRK) { 1237 /* 1238 * End of break detected. If it came along with a 1239 * character, atmel_rx_chars will handle it. 1240 */ 1241 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1242 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1243 atmel_port->break_active = 0; 1244 } 1245 } 1246 1247 /* 1248 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1249 */ 1250 static void 1251 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1252 { 1253 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1254 1255 if (pending & atmel_port->tx_done_mask) { 1256 /* Either PDC or interrupt transmission */ 1257 atmel_uart_writel(port, ATMEL_US_IDR, 1258 atmel_port->tx_done_mask); 1259 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1260 } 1261 } 1262 1263 /* 1264 * status flags interrupt handler. 1265 */ 1266 static void 1267 atmel_handle_status(struct uart_port *port, unsigned int pending, 1268 unsigned int status) 1269 { 1270 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1271 unsigned int status_change; 1272 1273 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1274 | ATMEL_US_CTSIC)) { 1275 status_change = status ^ atmel_port->irq_status_prev; 1276 atmel_port->irq_status_prev = status; 1277 1278 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1279 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1280 /* TODO: All reads to CSR will clear these interrupts! */ 1281 if (status_change & ATMEL_US_RI) 1282 port->icount.rng++; 1283 if (status_change & ATMEL_US_DSR) 1284 port->icount.dsr++; 1285 if (status_change & ATMEL_US_DCD) 1286 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1287 if (status_change & ATMEL_US_CTS) 1288 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1289 1290 wake_up_interruptible(&port->state->port.delta_msr_wait); 1291 } 1292 } 1293 } 1294 1295 /* 1296 * Interrupt handler 1297 */ 1298 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1299 { 1300 struct uart_port *port = dev_id; 1301 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1302 unsigned int status, pending, mask, pass_counter = 0; 1303 1304 spin_lock(&atmel_port->lock_suspended); 1305 1306 do { 1307 status = atmel_get_lines_status(port); 1308 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1309 pending = status & mask; 1310 if (!pending) 1311 break; 1312 1313 if (atmel_port->suspended) { 1314 atmel_port->pending |= pending; 1315 atmel_port->pending_status = status; 1316 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1317 pm_system_wakeup(); 1318 break; 1319 } 1320 1321 atmel_handle_receive(port, pending); 1322 atmel_handle_status(port, pending, status); 1323 atmel_handle_transmit(port, pending); 1324 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1325 1326 spin_unlock(&atmel_port->lock_suspended); 1327 1328 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1329 } 1330 1331 static void atmel_release_tx_pdc(struct uart_port *port) 1332 { 1333 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1334 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1335 1336 dma_unmap_single(port->dev, 1337 pdc->dma_addr, 1338 pdc->dma_size, 1339 DMA_TO_DEVICE); 1340 } 1341 1342 /* 1343 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1344 */ 1345 static void atmel_tx_pdc(struct uart_port *port) 1346 { 1347 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1348 struct circ_buf *xmit = &port->state->xmit; 1349 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1350 int count; 1351 1352 /* nothing left to transmit? */ 1353 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1354 return; 1355 1356 xmit->tail += pdc->ofs; 1357 xmit->tail &= UART_XMIT_SIZE - 1; 1358 1359 port->icount.tx += pdc->ofs; 1360 pdc->ofs = 0; 1361 1362 /* more to transmit - setup next transfer */ 1363 1364 /* disable PDC transmit */ 1365 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1366 1367 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1368 dma_sync_single_for_device(port->dev, 1369 pdc->dma_addr, 1370 pdc->dma_size, 1371 DMA_TO_DEVICE); 1372 1373 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1374 pdc->ofs = count; 1375 1376 atmel_uart_writel(port, ATMEL_PDC_TPR, 1377 pdc->dma_addr + xmit->tail); 1378 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1379 /* re-enable PDC transmit */ 1380 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1381 /* Enable interrupts */ 1382 atmel_uart_writel(port, ATMEL_US_IER, 1383 atmel_port->tx_done_mask); 1384 } else { 1385 if ((port->rs485.flags & SER_RS485_ENABLED) && 1386 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { 1387 /* DMA done, stop TX, start RX for RS485 */ 1388 atmel_start_rx(port); 1389 } 1390 } 1391 1392 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1393 uart_write_wakeup(port); 1394 } 1395 1396 static int atmel_prepare_tx_pdc(struct uart_port *port) 1397 { 1398 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1399 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1400 struct circ_buf *xmit = &port->state->xmit; 1401 1402 pdc->buf = xmit->buf; 1403 pdc->dma_addr = dma_map_single(port->dev, 1404 pdc->buf, 1405 UART_XMIT_SIZE, 1406 DMA_TO_DEVICE); 1407 pdc->dma_size = UART_XMIT_SIZE; 1408 pdc->ofs = 0; 1409 1410 return 0; 1411 } 1412 1413 static void atmel_rx_from_ring(struct uart_port *port) 1414 { 1415 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1416 struct circ_buf *ring = &atmel_port->rx_ring; 1417 unsigned int flg; 1418 unsigned int status; 1419 1420 while (ring->head != ring->tail) { 1421 struct atmel_uart_char c; 1422 1423 /* Make sure c is loaded after head. */ 1424 smp_rmb(); 1425 1426 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1427 1428 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1429 1430 port->icount.rx++; 1431 status = c.status; 1432 flg = TTY_NORMAL; 1433 1434 /* 1435 * note that the error handling code is 1436 * out of the main execution path 1437 */ 1438 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1439 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1440 if (status & ATMEL_US_RXBRK) { 1441 /* ignore side-effect */ 1442 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1443 1444 port->icount.brk++; 1445 if (uart_handle_break(port)) 1446 continue; 1447 } 1448 if (status & ATMEL_US_PARE) 1449 port->icount.parity++; 1450 if (status & ATMEL_US_FRAME) 1451 port->icount.frame++; 1452 if (status & ATMEL_US_OVRE) 1453 port->icount.overrun++; 1454 1455 status &= port->read_status_mask; 1456 1457 if (status & ATMEL_US_RXBRK) 1458 flg = TTY_BREAK; 1459 else if (status & ATMEL_US_PARE) 1460 flg = TTY_PARITY; 1461 else if (status & ATMEL_US_FRAME) 1462 flg = TTY_FRAME; 1463 } 1464 1465 1466 if (uart_handle_sysrq_char(port, c.ch)) 1467 continue; 1468 1469 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1470 } 1471 1472 /* 1473 * Drop the lock here since it might end up calling 1474 * uart_start(), which takes the lock. 1475 */ 1476 spin_unlock(&port->lock); 1477 tty_flip_buffer_push(&port->state->port); 1478 spin_lock(&port->lock); 1479 } 1480 1481 static void atmel_release_rx_pdc(struct uart_port *port) 1482 { 1483 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1484 int i; 1485 1486 for (i = 0; i < 2; i++) { 1487 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1488 1489 dma_unmap_single(port->dev, 1490 pdc->dma_addr, 1491 pdc->dma_size, 1492 DMA_FROM_DEVICE); 1493 kfree(pdc->buf); 1494 } 1495 } 1496 1497 static void atmel_rx_from_pdc(struct uart_port *port) 1498 { 1499 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1500 struct tty_port *tport = &port->state->port; 1501 struct atmel_dma_buffer *pdc; 1502 int rx_idx = atmel_port->pdc_rx_idx; 1503 unsigned int head; 1504 unsigned int tail; 1505 unsigned int count; 1506 1507 do { 1508 /* Reset the UART timeout early so that we don't miss one */ 1509 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1510 1511 pdc = &atmel_port->pdc_rx[rx_idx]; 1512 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1513 tail = pdc->ofs; 1514 1515 /* If the PDC has switched buffers, RPR won't contain 1516 * any address within the current buffer. Since head 1517 * is unsigned, we just need a one-way comparison to 1518 * find out. 1519 * 1520 * In this case, we just need to consume the entire 1521 * buffer and resubmit it for DMA. This will clear the 1522 * ENDRX bit as well, so that we can safely re-enable 1523 * all interrupts below. 1524 */ 1525 head = min(head, pdc->dma_size); 1526 1527 if (likely(head != tail)) { 1528 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1529 pdc->dma_size, DMA_FROM_DEVICE); 1530 1531 /* 1532 * head will only wrap around when we recycle 1533 * the DMA buffer, and when that happens, we 1534 * explicitly set tail to 0. So head will 1535 * always be greater than tail. 1536 */ 1537 count = head - tail; 1538 1539 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1540 count); 1541 1542 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1543 pdc->dma_size, DMA_FROM_DEVICE); 1544 1545 port->icount.rx += count; 1546 pdc->ofs = head; 1547 } 1548 1549 /* 1550 * If the current buffer is full, we need to check if 1551 * the next one contains any additional data. 1552 */ 1553 if (head >= pdc->dma_size) { 1554 pdc->ofs = 0; 1555 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1556 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1557 1558 rx_idx = !rx_idx; 1559 atmel_port->pdc_rx_idx = rx_idx; 1560 } 1561 } while (head >= pdc->dma_size); 1562 1563 /* 1564 * Drop the lock here since it might end up calling 1565 * uart_start(), which takes the lock. 1566 */ 1567 spin_unlock(&port->lock); 1568 tty_flip_buffer_push(tport); 1569 spin_lock(&port->lock); 1570 1571 atmel_uart_writel(port, ATMEL_US_IER, 1572 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1573 } 1574 1575 static int atmel_prepare_rx_pdc(struct uart_port *port) 1576 { 1577 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1578 int i; 1579 1580 for (i = 0; i < 2; i++) { 1581 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1582 1583 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1584 if (pdc->buf == NULL) { 1585 if (i != 0) { 1586 dma_unmap_single(port->dev, 1587 atmel_port->pdc_rx[0].dma_addr, 1588 PDC_BUFFER_SIZE, 1589 DMA_FROM_DEVICE); 1590 kfree(atmel_port->pdc_rx[0].buf); 1591 } 1592 atmel_port->use_pdc_rx = 0; 1593 return -ENOMEM; 1594 } 1595 pdc->dma_addr = dma_map_single(port->dev, 1596 pdc->buf, 1597 PDC_BUFFER_SIZE, 1598 DMA_FROM_DEVICE); 1599 pdc->dma_size = PDC_BUFFER_SIZE; 1600 pdc->ofs = 0; 1601 } 1602 1603 atmel_port->pdc_rx_idx = 0; 1604 1605 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1606 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1607 1608 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1609 atmel_port->pdc_rx[1].dma_addr); 1610 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1611 1612 return 0; 1613 } 1614 1615 /* 1616 * tasklet handling tty stuff outside the interrupt handler. 1617 */ 1618 static void atmel_tasklet_rx_func(unsigned long data) 1619 { 1620 struct uart_port *port = (struct uart_port *)data; 1621 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1622 1623 /* The interrupt handler does not take the lock */ 1624 spin_lock(&port->lock); 1625 atmel_port->schedule_rx(port); 1626 spin_unlock(&port->lock); 1627 } 1628 1629 static void atmel_tasklet_tx_func(unsigned long data) 1630 { 1631 struct uart_port *port = (struct uart_port *)data; 1632 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1633 1634 /* The interrupt handler does not take the lock */ 1635 spin_lock(&port->lock); 1636 atmel_port->schedule_tx(port); 1637 spin_unlock(&port->lock); 1638 } 1639 1640 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1641 struct platform_device *pdev) 1642 { 1643 struct device_node *np = pdev->dev.of_node; 1644 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 1645 1646 if (np) { 1647 /* DMA/PDC usage specification */ 1648 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1649 if (of_property_read_bool(np, "dmas")) { 1650 atmel_port->use_dma_rx = true; 1651 atmel_port->use_pdc_rx = false; 1652 } else { 1653 atmel_port->use_dma_rx = false; 1654 atmel_port->use_pdc_rx = true; 1655 } 1656 } else { 1657 atmel_port->use_dma_rx = false; 1658 atmel_port->use_pdc_rx = false; 1659 } 1660 1661 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1662 if (of_property_read_bool(np, "dmas")) { 1663 atmel_port->use_dma_tx = true; 1664 atmel_port->use_pdc_tx = false; 1665 } else { 1666 atmel_port->use_dma_tx = false; 1667 atmel_port->use_pdc_tx = true; 1668 } 1669 } else { 1670 atmel_port->use_dma_tx = false; 1671 atmel_port->use_pdc_tx = false; 1672 } 1673 1674 } else { 1675 atmel_port->use_pdc_rx = pdata->use_dma_rx; 1676 atmel_port->use_pdc_tx = pdata->use_dma_tx; 1677 atmel_port->use_dma_rx = false; 1678 atmel_port->use_dma_tx = false; 1679 } 1680 1681 } 1682 1683 static void atmel_init_rs485(struct uart_port *port, 1684 struct platform_device *pdev) 1685 { 1686 struct device_node *np = pdev->dev.of_node; 1687 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 1688 1689 if (np) { 1690 struct serial_rs485 *rs485conf = &port->rs485; 1691 u32 rs485_delay[2]; 1692 /* rs485 properties */ 1693 if (of_property_read_u32_array(np, "rs485-rts-delay", 1694 rs485_delay, 2) == 0) { 1695 rs485conf->delay_rts_before_send = rs485_delay[0]; 1696 rs485conf->delay_rts_after_send = rs485_delay[1]; 1697 rs485conf->flags = 0; 1698 } 1699 1700 if (of_get_property(np, "rs485-rx-during-tx", NULL)) 1701 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1702 1703 if (of_get_property(np, "linux,rs485-enabled-at-boot-time", 1704 NULL)) 1705 rs485conf->flags |= SER_RS485_ENABLED; 1706 } else { 1707 port->rs485 = pdata->rs485; 1708 } 1709 1710 } 1711 1712 static void atmel_set_ops(struct uart_port *port) 1713 { 1714 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1715 1716 if (atmel_use_dma_rx(port)) { 1717 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1718 atmel_port->schedule_rx = &atmel_rx_from_dma; 1719 atmel_port->release_rx = &atmel_release_rx_dma; 1720 } else if (atmel_use_pdc_rx(port)) { 1721 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1722 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1723 atmel_port->release_rx = &atmel_release_rx_pdc; 1724 } else { 1725 atmel_port->prepare_rx = NULL; 1726 atmel_port->schedule_rx = &atmel_rx_from_ring; 1727 atmel_port->release_rx = NULL; 1728 } 1729 1730 if (atmel_use_dma_tx(port)) { 1731 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1732 atmel_port->schedule_tx = &atmel_tx_dma; 1733 atmel_port->release_tx = &atmel_release_tx_dma; 1734 } else if (atmel_use_pdc_tx(port)) { 1735 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1736 atmel_port->schedule_tx = &atmel_tx_pdc; 1737 atmel_port->release_tx = &atmel_release_tx_pdc; 1738 } else { 1739 atmel_port->prepare_tx = NULL; 1740 atmel_port->schedule_tx = &atmel_tx_chars; 1741 atmel_port->release_tx = NULL; 1742 } 1743 } 1744 1745 /* 1746 * Get ip name usart or uart 1747 */ 1748 static void atmel_get_ip_name(struct uart_port *port) 1749 { 1750 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1751 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1752 u32 version; 1753 u32 usart, dbgu_uart, new_uart; 1754 /* ASCII decoding for IP version */ 1755 usart = 0x55534152; /* USAR(T) */ 1756 dbgu_uart = 0x44424755; /* DBGU */ 1757 new_uart = 0x55415254; /* UART */ 1758 1759 /* 1760 * Only USART devices from at91sam9260 SOC implement fractional 1761 * baudrate. 1762 */ 1763 atmel_port->has_frac_baudrate = false; 1764 atmel_port->has_hw_timer = false; 1765 1766 if (name == new_uart) { 1767 dev_dbg(port->dev, "Uart with hw timer"); 1768 atmel_port->has_hw_timer = true; 1769 atmel_port->rtor = ATMEL_UA_RTOR; 1770 } else if (name == usart) { 1771 dev_dbg(port->dev, "Usart\n"); 1772 atmel_port->has_frac_baudrate = true; 1773 atmel_port->has_hw_timer = true; 1774 atmel_port->rtor = ATMEL_US_RTOR; 1775 } else if (name == dbgu_uart) { 1776 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1777 } else { 1778 /* fallback for older SoCs: use version field */ 1779 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1780 switch (version) { 1781 case 0x302: 1782 case 0x10213: 1783 dev_dbg(port->dev, "This version is usart\n"); 1784 atmel_port->has_frac_baudrate = true; 1785 atmel_port->has_hw_timer = true; 1786 atmel_port->rtor = ATMEL_US_RTOR; 1787 break; 1788 case 0x203: 1789 case 0x10202: 1790 dev_dbg(port->dev, "This version is uart\n"); 1791 break; 1792 default: 1793 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1794 } 1795 } 1796 } 1797 1798 /* 1799 * Perform initialization and enable port for reception 1800 */ 1801 static int atmel_startup(struct uart_port *port) 1802 { 1803 struct platform_device *pdev = to_platform_device(port->dev); 1804 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1805 struct tty_struct *tty = port->state->port.tty; 1806 int retval; 1807 1808 /* 1809 * Ensure that no interrupts are enabled otherwise when 1810 * request_irq() is called we could get stuck trying to 1811 * handle an unexpected interrupt 1812 */ 1813 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1814 atmel_port->ms_irq_enabled = false; 1815 1816 /* 1817 * Allocate the IRQ 1818 */ 1819 retval = request_irq(port->irq, atmel_interrupt, 1820 IRQF_SHARED | IRQF_COND_SUSPEND, 1821 tty ? tty->name : "atmel_serial", port); 1822 if (retval) { 1823 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1824 return retval; 1825 } 1826 1827 atomic_set(&atmel_port->tasklet_shutdown, 0); 1828 tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func, 1829 (unsigned long)port); 1830 tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func, 1831 (unsigned long)port); 1832 1833 /* 1834 * Initialize DMA (if necessary) 1835 */ 1836 atmel_init_property(atmel_port, pdev); 1837 atmel_set_ops(port); 1838 1839 if (atmel_port->prepare_rx) { 1840 retval = atmel_port->prepare_rx(port); 1841 if (retval < 0) 1842 atmel_set_ops(port); 1843 } 1844 1845 if (atmel_port->prepare_tx) { 1846 retval = atmel_port->prepare_tx(port); 1847 if (retval < 0) 1848 atmel_set_ops(port); 1849 } 1850 1851 /* 1852 * Enable FIFO when available 1853 */ 1854 if (atmel_port->fifo_size) { 1855 unsigned int txrdym = ATMEL_US_ONE_DATA; 1856 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1857 unsigned int fmr; 1858 1859 atmel_uart_writel(port, ATMEL_US_CR, 1860 ATMEL_US_FIFOEN | 1861 ATMEL_US_RXFCLR | 1862 ATMEL_US_TXFLCLR); 1863 1864 if (atmel_use_dma_tx(port)) 1865 txrdym = ATMEL_US_FOUR_DATA; 1866 1867 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1868 if (atmel_port->rts_high && 1869 atmel_port->rts_low) 1870 fmr |= ATMEL_US_FRTSC | 1871 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1872 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1873 1874 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1875 } 1876 1877 /* Save current CSR for comparison in atmel_tasklet_func() */ 1878 atmel_port->irq_status_prev = atmel_get_lines_status(port); 1879 1880 /* 1881 * Finally, enable the serial port 1882 */ 1883 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1884 /* enable xmit & rcvr */ 1885 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1886 1887 setup_timer(&atmel_port->uart_timer, 1888 atmel_uart_timer_callback, 1889 (unsigned long)port); 1890 1891 if (atmel_use_pdc_rx(port)) { 1892 /* set UART timeout */ 1893 if (!atmel_port->has_hw_timer) { 1894 mod_timer(&atmel_port->uart_timer, 1895 jiffies + uart_poll_timeout(port)); 1896 /* set USART timeout */ 1897 } else { 1898 atmel_uart_writel(port, atmel_port->rtor, 1899 PDC_RX_TIMEOUT); 1900 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1901 1902 atmel_uart_writel(port, ATMEL_US_IER, 1903 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1904 } 1905 /* enable PDC controller */ 1906 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 1907 } else if (atmel_use_dma_rx(port)) { 1908 /* set UART timeout */ 1909 if (!atmel_port->has_hw_timer) { 1910 mod_timer(&atmel_port->uart_timer, 1911 jiffies + uart_poll_timeout(port)); 1912 /* set USART timeout */ 1913 } else { 1914 atmel_uart_writel(port, atmel_port->rtor, 1915 PDC_RX_TIMEOUT); 1916 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1917 1918 atmel_uart_writel(port, ATMEL_US_IER, 1919 ATMEL_US_TIMEOUT); 1920 } 1921 } else { 1922 /* enable receive only */ 1923 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 1924 } 1925 1926 return 0; 1927 } 1928 1929 /* 1930 * Flush any TX data submitted for DMA. Called when the TX circular 1931 * buffer is reset. 1932 */ 1933 static void atmel_flush_buffer(struct uart_port *port) 1934 { 1935 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1936 1937 if (atmel_use_pdc_tx(port)) { 1938 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1939 atmel_port->pdc_tx.ofs = 0; 1940 } 1941 } 1942 1943 /* 1944 * Disable the port 1945 */ 1946 static void atmel_shutdown(struct uart_port *port) 1947 { 1948 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1949 1950 /* Disable modem control lines interrupts */ 1951 atmel_disable_ms(port); 1952 1953 /* Disable interrupts at device level */ 1954 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1955 1956 /* Prevent spurious interrupts from scheduling the tasklet */ 1957 atomic_inc(&atmel_port->tasklet_shutdown); 1958 1959 /* 1960 * Prevent any tasklets being scheduled during 1961 * cleanup 1962 */ 1963 del_timer_sync(&atmel_port->uart_timer); 1964 1965 /* Make sure that no interrupt is on the fly */ 1966 synchronize_irq(port->irq); 1967 1968 /* 1969 * Clear out any scheduled tasklets before 1970 * we destroy the buffers 1971 */ 1972 tasklet_kill(&atmel_port->tasklet_rx); 1973 tasklet_kill(&atmel_port->tasklet_tx); 1974 1975 /* 1976 * Ensure everything is stopped and 1977 * disable port and break condition. 1978 */ 1979 atmel_stop_rx(port); 1980 atmel_stop_tx(port); 1981 1982 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1983 1984 /* 1985 * Shut-down the DMA. 1986 */ 1987 if (atmel_port->release_rx) 1988 atmel_port->release_rx(port); 1989 if (atmel_port->release_tx) 1990 atmel_port->release_tx(port); 1991 1992 /* 1993 * Reset ring buffer pointers 1994 */ 1995 atmel_port->rx_ring.head = 0; 1996 atmel_port->rx_ring.tail = 0; 1997 1998 /* 1999 * Free the interrupts 2000 */ 2001 free_irq(port->irq, port); 2002 2003 atmel_flush_buffer(port); 2004 } 2005 2006 /* 2007 * Power / Clock management. 2008 */ 2009 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2010 unsigned int oldstate) 2011 { 2012 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2013 2014 switch (state) { 2015 case 0: 2016 /* 2017 * Enable the peripheral clock for this serial port. 2018 * This is called on uart_open() or a resume event. 2019 */ 2020 clk_prepare_enable(atmel_port->clk); 2021 2022 /* re-enable interrupts if we disabled some on suspend */ 2023 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2024 break; 2025 case 3: 2026 /* Back up the interrupt mask and disable all interrupts */ 2027 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2028 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2029 2030 /* 2031 * Disable the peripheral clock for this serial port. 2032 * This is called on uart_close() or a suspend event. 2033 */ 2034 clk_disable_unprepare(atmel_port->clk); 2035 break; 2036 default: 2037 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2038 } 2039 } 2040 2041 /* 2042 * Change the port parameters 2043 */ 2044 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2045 struct ktermios *old) 2046 { 2047 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2048 unsigned long flags; 2049 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2050 2051 /* save the current mode register */ 2052 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2053 2054 /* reset the mode, clock divisor, parity, stop bits and data size */ 2055 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2056 ATMEL_US_PAR | ATMEL_US_USMODE); 2057 2058 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2059 2060 /* byte size */ 2061 switch (termios->c_cflag & CSIZE) { 2062 case CS5: 2063 mode |= ATMEL_US_CHRL_5; 2064 break; 2065 case CS6: 2066 mode |= ATMEL_US_CHRL_6; 2067 break; 2068 case CS7: 2069 mode |= ATMEL_US_CHRL_7; 2070 break; 2071 default: 2072 mode |= ATMEL_US_CHRL_8; 2073 break; 2074 } 2075 2076 /* stop bits */ 2077 if (termios->c_cflag & CSTOPB) 2078 mode |= ATMEL_US_NBSTOP_2; 2079 2080 /* parity */ 2081 if (termios->c_cflag & PARENB) { 2082 /* Mark or Space parity */ 2083 if (termios->c_cflag & CMSPAR) { 2084 if (termios->c_cflag & PARODD) 2085 mode |= ATMEL_US_PAR_MARK; 2086 else 2087 mode |= ATMEL_US_PAR_SPACE; 2088 } else if (termios->c_cflag & PARODD) 2089 mode |= ATMEL_US_PAR_ODD; 2090 else 2091 mode |= ATMEL_US_PAR_EVEN; 2092 } else 2093 mode |= ATMEL_US_PAR_NONE; 2094 2095 spin_lock_irqsave(&port->lock, flags); 2096 2097 port->read_status_mask = ATMEL_US_OVRE; 2098 if (termios->c_iflag & INPCK) 2099 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2100 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2101 port->read_status_mask |= ATMEL_US_RXBRK; 2102 2103 if (atmel_use_pdc_rx(port)) 2104 /* need to enable error interrupts */ 2105 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2106 2107 /* 2108 * Characters to ignore 2109 */ 2110 port->ignore_status_mask = 0; 2111 if (termios->c_iflag & IGNPAR) 2112 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2113 if (termios->c_iflag & IGNBRK) { 2114 port->ignore_status_mask |= ATMEL_US_RXBRK; 2115 /* 2116 * If we're ignoring parity and break indicators, 2117 * ignore overruns too (for real raw support). 2118 */ 2119 if (termios->c_iflag & IGNPAR) 2120 port->ignore_status_mask |= ATMEL_US_OVRE; 2121 } 2122 /* TODO: Ignore all characters if CREAD is set.*/ 2123 2124 /* update the per-port timeout */ 2125 uart_update_timeout(port, termios->c_cflag, baud); 2126 2127 /* 2128 * save/disable interrupts. The tty layer will ensure that the 2129 * transmitter is empty if requested by the caller, so there's 2130 * no need to wait for it here. 2131 */ 2132 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2133 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2134 2135 /* disable receiver and transmitter */ 2136 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2137 2138 /* mode */ 2139 if (port->rs485.flags & SER_RS485_ENABLED) { 2140 atmel_uart_writel(port, ATMEL_US_TTGR, 2141 port->rs485.delay_rts_after_send); 2142 mode |= ATMEL_US_USMODE_RS485; 2143 } else if (termios->c_cflag & CRTSCTS) { 2144 /* RS232 with hardware handshake (RTS/CTS) */ 2145 if (atmel_use_fifo(port) && 2146 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2147 /* 2148 * with ATMEL_US_USMODE_HWHS set, the controller will 2149 * be able to drive the RTS pin high/low when the RX 2150 * FIFO is above RXFTHRES/below RXFTHRES2. 2151 * It will also disable the transmitter when the CTS 2152 * pin is high. 2153 * This mode is not activated if CTS pin is a GPIO 2154 * because in this case, the transmitter is always 2155 * disabled (there must be an internal pull-up 2156 * responsible for this behaviour). 2157 * If the RTS pin is a GPIO, the controller won't be 2158 * able to drive it according to the FIFO thresholds, 2159 * but it will be handled by the driver. 2160 */ 2161 mode |= ATMEL_US_USMODE_HWHS; 2162 } else { 2163 /* 2164 * For platforms without FIFO, the flow control is 2165 * handled by the driver. 2166 */ 2167 mode |= ATMEL_US_USMODE_NORMAL; 2168 } 2169 } else { 2170 /* RS232 without hadware handshake */ 2171 mode |= ATMEL_US_USMODE_NORMAL; 2172 } 2173 2174 /* set the mode, clock divisor, parity, stop bits and data size */ 2175 atmel_uart_writel(port, ATMEL_US_MR, mode); 2176 2177 /* 2178 * when switching the mode, set the RTS line state according to the 2179 * new mode, otherwise keep the former state 2180 */ 2181 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2182 unsigned int rts_state; 2183 2184 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2185 /* let the hardware control the RTS line */ 2186 rts_state = ATMEL_US_RTSDIS; 2187 } else { 2188 /* force RTS line to low level */ 2189 rts_state = ATMEL_US_RTSEN; 2190 } 2191 2192 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2193 } 2194 2195 /* 2196 * Set the baud rate: 2197 * Fractional baudrate allows to setup output frequency more 2198 * accurately. This feature is enabled only when using normal mode. 2199 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2200 * Currently, OVER is always set to 0 so we get 2201 * baudrate = selected clock / (16 * (CD + FP / 8)) 2202 * then 2203 * 8 CD + FP = selected clock / (2 * baudrate) 2204 */ 2205 if (atmel_port->has_frac_baudrate && 2206 (mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_NORMAL) { 2207 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2208 cd = div >> 3; 2209 fp = div & ATMEL_US_FP_MASK; 2210 } else { 2211 cd = uart_get_divisor(port, baud); 2212 } 2213 2214 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2215 cd /= 8; 2216 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2217 } 2218 quot = cd | fp << ATMEL_US_FP_OFFSET; 2219 2220 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2221 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2222 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2223 2224 /* restore interrupts */ 2225 atmel_uart_writel(port, ATMEL_US_IER, imr); 2226 2227 /* CTS flow-control and modem-status interrupts */ 2228 if (UART_ENABLE_MS(port, termios->c_cflag)) 2229 atmel_enable_ms(port); 2230 else 2231 atmel_disable_ms(port); 2232 2233 spin_unlock_irqrestore(&port->lock, flags); 2234 } 2235 2236 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2237 { 2238 if (termios->c_line == N_PPS) { 2239 port->flags |= UPF_HARDPPS_CD; 2240 spin_lock_irq(&port->lock); 2241 atmel_enable_ms(port); 2242 spin_unlock_irq(&port->lock); 2243 } else { 2244 port->flags &= ~UPF_HARDPPS_CD; 2245 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2246 spin_lock_irq(&port->lock); 2247 atmel_disable_ms(port); 2248 spin_unlock_irq(&port->lock); 2249 } 2250 } 2251 } 2252 2253 /* 2254 * Return string describing the specified port 2255 */ 2256 static const char *atmel_type(struct uart_port *port) 2257 { 2258 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2259 } 2260 2261 /* 2262 * Release the memory region(s) being used by 'port'. 2263 */ 2264 static void atmel_release_port(struct uart_port *port) 2265 { 2266 struct platform_device *pdev = to_platform_device(port->dev); 2267 int size = pdev->resource[0].end - pdev->resource[0].start + 1; 2268 2269 release_mem_region(port->mapbase, size); 2270 2271 if (port->flags & UPF_IOREMAP) { 2272 iounmap(port->membase); 2273 port->membase = NULL; 2274 } 2275 } 2276 2277 /* 2278 * Request the memory region(s) being used by 'port'. 2279 */ 2280 static int atmel_request_port(struct uart_port *port) 2281 { 2282 struct platform_device *pdev = to_platform_device(port->dev); 2283 int size = pdev->resource[0].end - pdev->resource[0].start + 1; 2284 2285 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2286 return -EBUSY; 2287 2288 if (port->flags & UPF_IOREMAP) { 2289 port->membase = ioremap(port->mapbase, size); 2290 if (port->membase == NULL) { 2291 release_mem_region(port->mapbase, size); 2292 return -ENOMEM; 2293 } 2294 } 2295 2296 return 0; 2297 } 2298 2299 /* 2300 * Configure/autoconfigure the port. 2301 */ 2302 static void atmel_config_port(struct uart_port *port, int flags) 2303 { 2304 if (flags & UART_CONFIG_TYPE) { 2305 port->type = PORT_ATMEL; 2306 atmel_request_port(port); 2307 } 2308 } 2309 2310 /* 2311 * Verify the new serial_struct (for TIOCSSERIAL). 2312 */ 2313 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2314 { 2315 int ret = 0; 2316 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2317 ret = -EINVAL; 2318 if (port->irq != ser->irq) 2319 ret = -EINVAL; 2320 if (ser->io_type != SERIAL_IO_MEM) 2321 ret = -EINVAL; 2322 if (port->uartclk / 16 != ser->baud_base) 2323 ret = -EINVAL; 2324 if (port->mapbase != (unsigned long)ser->iomem_base) 2325 ret = -EINVAL; 2326 if (port->iobase != ser->port) 2327 ret = -EINVAL; 2328 if (ser->hub6 != 0) 2329 ret = -EINVAL; 2330 return ret; 2331 } 2332 2333 #ifdef CONFIG_CONSOLE_POLL 2334 static int atmel_poll_get_char(struct uart_port *port) 2335 { 2336 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2337 cpu_relax(); 2338 2339 return atmel_uart_read_char(port); 2340 } 2341 2342 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2343 { 2344 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2345 cpu_relax(); 2346 2347 atmel_uart_write_char(port, ch); 2348 } 2349 #endif 2350 2351 static const struct uart_ops atmel_pops = { 2352 .tx_empty = atmel_tx_empty, 2353 .set_mctrl = atmel_set_mctrl, 2354 .get_mctrl = atmel_get_mctrl, 2355 .stop_tx = atmel_stop_tx, 2356 .start_tx = atmel_start_tx, 2357 .stop_rx = atmel_stop_rx, 2358 .enable_ms = atmel_enable_ms, 2359 .break_ctl = atmel_break_ctl, 2360 .startup = atmel_startup, 2361 .shutdown = atmel_shutdown, 2362 .flush_buffer = atmel_flush_buffer, 2363 .set_termios = atmel_set_termios, 2364 .set_ldisc = atmel_set_ldisc, 2365 .type = atmel_type, 2366 .release_port = atmel_release_port, 2367 .request_port = atmel_request_port, 2368 .config_port = atmel_config_port, 2369 .verify_port = atmel_verify_port, 2370 .pm = atmel_serial_pm, 2371 #ifdef CONFIG_CONSOLE_POLL 2372 .poll_get_char = atmel_poll_get_char, 2373 .poll_put_char = atmel_poll_put_char, 2374 #endif 2375 }; 2376 2377 /* 2378 * Configure the port from the platform device resource info. 2379 */ 2380 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2381 struct platform_device *pdev) 2382 { 2383 int ret; 2384 struct uart_port *port = &atmel_port->uart; 2385 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 2386 2387 atmel_init_property(atmel_port, pdev); 2388 atmel_set_ops(port); 2389 2390 atmel_init_rs485(port, pdev); 2391 2392 port->iotype = UPIO_MEM; 2393 port->flags = UPF_BOOT_AUTOCONF; 2394 port->ops = &atmel_pops; 2395 port->fifosize = 1; 2396 port->dev = &pdev->dev; 2397 port->mapbase = pdev->resource[0].start; 2398 port->irq = pdev->resource[1].start; 2399 port->rs485_config = atmel_config_rs485; 2400 2401 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2402 2403 if (pdata && pdata->regs) { 2404 /* Already mapped by setup code */ 2405 port->membase = pdata->regs; 2406 } else { 2407 port->flags |= UPF_IOREMAP; 2408 port->membase = NULL; 2409 } 2410 2411 /* for console, the clock could already be configured */ 2412 if (!atmel_port->clk) { 2413 atmel_port->clk = clk_get(&pdev->dev, "usart"); 2414 if (IS_ERR(atmel_port->clk)) { 2415 ret = PTR_ERR(atmel_port->clk); 2416 atmel_port->clk = NULL; 2417 return ret; 2418 } 2419 ret = clk_prepare_enable(atmel_port->clk); 2420 if (ret) { 2421 clk_put(atmel_port->clk); 2422 atmel_port->clk = NULL; 2423 return ret; 2424 } 2425 port->uartclk = clk_get_rate(atmel_port->clk); 2426 clk_disable_unprepare(atmel_port->clk); 2427 /* only enable clock when USART is in use */ 2428 } 2429 2430 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */ 2431 if (port->rs485.flags & SER_RS485_ENABLED) 2432 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2433 else if (atmel_use_pdc_tx(port)) { 2434 port->fifosize = PDC_BUFFER_SIZE; 2435 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2436 } else { 2437 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2438 } 2439 2440 return 0; 2441 } 2442 2443 struct platform_device *atmel_default_console_device; /* the serial console device */ 2444 2445 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2446 static void atmel_console_putchar(struct uart_port *port, int ch) 2447 { 2448 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2449 cpu_relax(); 2450 atmel_uart_write_char(port, ch); 2451 } 2452 2453 /* 2454 * Interrupts are disabled on entering 2455 */ 2456 static void atmel_console_write(struct console *co, const char *s, u_int count) 2457 { 2458 struct uart_port *port = &atmel_ports[co->index].uart; 2459 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2460 unsigned int status, imr; 2461 unsigned int pdc_tx; 2462 2463 /* 2464 * First, save IMR and then disable interrupts 2465 */ 2466 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2467 atmel_uart_writel(port, ATMEL_US_IDR, 2468 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2469 2470 /* Store PDC transmit status and disable it */ 2471 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2472 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2473 2474 uart_console_write(port, s, count, atmel_console_putchar); 2475 2476 /* 2477 * Finally, wait for transmitter to become empty 2478 * and restore IMR 2479 */ 2480 do { 2481 status = atmel_uart_readl(port, ATMEL_US_CSR); 2482 } while (!(status & ATMEL_US_TXRDY)); 2483 2484 /* Restore PDC transmit status */ 2485 if (pdc_tx) 2486 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2487 2488 /* set interrupts back the way they were */ 2489 atmel_uart_writel(port, ATMEL_US_IER, imr); 2490 } 2491 2492 /* 2493 * If the port was already initialised (eg, by a boot loader), 2494 * try to determine the current setup. 2495 */ 2496 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2497 int *parity, int *bits) 2498 { 2499 unsigned int mr, quot; 2500 2501 /* 2502 * If the baud rate generator isn't running, the port wasn't 2503 * initialized by the boot loader. 2504 */ 2505 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2506 if (!quot) 2507 return; 2508 2509 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2510 if (mr == ATMEL_US_CHRL_8) 2511 *bits = 8; 2512 else 2513 *bits = 7; 2514 2515 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2516 if (mr == ATMEL_US_PAR_EVEN) 2517 *parity = 'e'; 2518 else if (mr == ATMEL_US_PAR_ODD) 2519 *parity = 'o'; 2520 2521 /* 2522 * The serial core only rounds down when matching this to a 2523 * supported baud rate. Make sure we don't end up slightly 2524 * lower than one of those, as it would make us fall through 2525 * to a much lower baud rate than we really want. 2526 */ 2527 *baud = port->uartclk / (16 * (quot - 1)); 2528 } 2529 2530 static int __init atmel_console_setup(struct console *co, char *options) 2531 { 2532 int ret; 2533 struct uart_port *port = &atmel_ports[co->index].uart; 2534 int baud = 115200; 2535 int bits = 8; 2536 int parity = 'n'; 2537 int flow = 'n'; 2538 2539 if (port->membase == NULL) { 2540 /* Port not initialized yet - delay setup */ 2541 return -ENODEV; 2542 } 2543 2544 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2545 if (ret) 2546 return ret; 2547 2548 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2549 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2550 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2551 2552 if (options) 2553 uart_parse_options(options, &baud, &parity, &bits, &flow); 2554 else 2555 atmel_console_get_options(port, &baud, &parity, &bits); 2556 2557 return uart_set_options(port, co, baud, parity, bits, flow); 2558 } 2559 2560 static struct uart_driver atmel_uart; 2561 2562 static struct console atmel_console = { 2563 .name = ATMEL_DEVICENAME, 2564 .write = atmel_console_write, 2565 .device = uart_console_device, 2566 .setup = atmel_console_setup, 2567 .flags = CON_PRINTBUFFER, 2568 .index = -1, 2569 .data = &atmel_uart, 2570 }; 2571 2572 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2573 2574 /* 2575 * Early console initialization (before VM subsystem initialized). 2576 */ 2577 static int __init atmel_console_init(void) 2578 { 2579 int ret; 2580 if (atmel_default_console_device) { 2581 struct atmel_uart_data *pdata = 2582 dev_get_platdata(&atmel_default_console_device->dev); 2583 int id = pdata->num; 2584 struct atmel_uart_port *atmel_port = &atmel_ports[id]; 2585 2586 atmel_port->backup_imr = 0; 2587 atmel_port->uart.line = id; 2588 2589 add_preferred_console(ATMEL_DEVICENAME, id, NULL); 2590 ret = atmel_init_port(atmel_port, atmel_default_console_device); 2591 if (ret) 2592 return ret; 2593 register_console(&atmel_console); 2594 } 2595 2596 return 0; 2597 } 2598 2599 console_initcall(atmel_console_init); 2600 2601 /* 2602 * Late console initialization. 2603 */ 2604 static int __init atmel_late_console_init(void) 2605 { 2606 if (atmel_default_console_device 2607 && !(atmel_console.flags & CON_ENABLED)) 2608 register_console(&atmel_console); 2609 2610 return 0; 2611 } 2612 2613 core_initcall(atmel_late_console_init); 2614 2615 static inline bool atmel_is_console_port(struct uart_port *port) 2616 { 2617 return port->cons && port->cons->index == port->line; 2618 } 2619 2620 #else 2621 #define ATMEL_CONSOLE_DEVICE NULL 2622 2623 static inline bool atmel_is_console_port(struct uart_port *port) 2624 { 2625 return false; 2626 } 2627 #endif 2628 2629 static struct uart_driver atmel_uart = { 2630 .owner = THIS_MODULE, 2631 .driver_name = "atmel_serial", 2632 .dev_name = ATMEL_DEVICENAME, 2633 .major = SERIAL_ATMEL_MAJOR, 2634 .minor = MINOR_START, 2635 .nr = ATMEL_MAX_UART, 2636 .cons = ATMEL_CONSOLE_DEVICE, 2637 }; 2638 2639 #ifdef CONFIG_PM 2640 static bool atmel_serial_clk_will_stop(void) 2641 { 2642 #ifdef CONFIG_ARCH_AT91 2643 return at91_suspend_entering_slow_clock(); 2644 #else 2645 return false; 2646 #endif 2647 } 2648 2649 static int atmel_serial_suspend(struct platform_device *pdev, 2650 pm_message_t state) 2651 { 2652 struct uart_port *port = platform_get_drvdata(pdev); 2653 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2654 2655 if (atmel_is_console_port(port) && console_suspend_enabled) { 2656 /* Drain the TX shifter */ 2657 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2658 ATMEL_US_TXEMPTY)) 2659 cpu_relax(); 2660 } 2661 2662 /* we can not wake up if we're running on slow clock */ 2663 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2664 if (atmel_serial_clk_will_stop()) { 2665 unsigned long flags; 2666 2667 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2668 atmel_port->suspended = true; 2669 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2670 device_set_wakeup_enable(&pdev->dev, 0); 2671 } 2672 2673 uart_suspend_port(&atmel_uart, port); 2674 2675 return 0; 2676 } 2677 2678 static int atmel_serial_resume(struct platform_device *pdev) 2679 { 2680 struct uart_port *port = platform_get_drvdata(pdev); 2681 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2682 unsigned long flags; 2683 2684 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2685 if (atmel_port->pending) { 2686 atmel_handle_receive(port, atmel_port->pending); 2687 atmel_handle_status(port, atmel_port->pending, 2688 atmel_port->pending_status); 2689 atmel_handle_transmit(port, atmel_port->pending); 2690 atmel_port->pending = 0; 2691 } 2692 atmel_port->suspended = false; 2693 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2694 2695 uart_resume_port(&atmel_uart, port); 2696 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2697 2698 return 0; 2699 } 2700 #else 2701 #define atmel_serial_suspend NULL 2702 #define atmel_serial_resume NULL 2703 #endif 2704 2705 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2706 struct platform_device *pdev) 2707 { 2708 atmel_port->fifo_size = 0; 2709 atmel_port->rts_low = 0; 2710 atmel_port->rts_high = 0; 2711 2712 if (of_property_read_u32(pdev->dev.of_node, 2713 "atmel,fifo-size", 2714 &atmel_port->fifo_size)) 2715 return; 2716 2717 if (!atmel_port->fifo_size) 2718 return; 2719 2720 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2721 atmel_port->fifo_size = 0; 2722 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2723 return; 2724 } 2725 2726 /* 2727 * 0 <= rts_low <= rts_high <= fifo_size 2728 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2729 * to flush their internal TX FIFO, commonly up to 16 data, before 2730 * actually stopping to send new data. So we try to set the RTS High 2731 * Threshold to a reasonably high value respecting this 16 data 2732 * empirical rule when possible. 2733 */ 2734 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2735 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2736 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2737 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2738 2739 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2740 atmel_port->fifo_size); 2741 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2742 atmel_port->rts_high); 2743 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2744 atmel_port->rts_low); 2745 } 2746 2747 static int atmel_serial_probe(struct platform_device *pdev) 2748 { 2749 struct atmel_uart_port *atmel_port; 2750 struct device_node *np = pdev->dev.of_node; 2751 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 2752 void *data; 2753 int ret = -ENODEV; 2754 bool rs485_enabled; 2755 2756 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2757 2758 if (np) 2759 ret = of_alias_get_id(np, "serial"); 2760 else 2761 if (pdata) 2762 ret = pdata->num; 2763 2764 if (ret < 0) 2765 /* port id not found in platform data nor device-tree aliases: 2766 * auto-enumerate it */ 2767 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2768 2769 if (ret >= ATMEL_MAX_UART) { 2770 ret = -ENODEV; 2771 goto err; 2772 } 2773 2774 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2775 /* port already in use */ 2776 ret = -EBUSY; 2777 goto err; 2778 } 2779 2780 atmel_port = &atmel_ports[ret]; 2781 atmel_port->backup_imr = 0; 2782 atmel_port->uart.line = ret; 2783 atmel_serial_probe_fifos(atmel_port, pdev); 2784 2785 atomic_set(&atmel_port->tasklet_shutdown, 0); 2786 spin_lock_init(&atmel_port->lock_suspended); 2787 2788 ret = atmel_init_port(atmel_port, pdev); 2789 if (ret) 2790 goto err_clear_bit; 2791 2792 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2793 if (IS_ERR(atmel_port->gpios)) { 2794 ret = PTR_ERR(atmel_port->gpios); 2795 goto err_clear_bit; 2796 } 2797 2798 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2799 ret = -ENOMEM; 2800 data = kmalloc(sizeof(struct atmel_uart_char) 2801 * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL); 2802 if (!data) 2803 goto err_alloc_ring; 2804 atmel_port->rx_ring.buf = data; 2805 } 2806 2807 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2808 2809 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2810 if (ret) 2811 goto err_add_port; 2812 2813 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2814 if (atmel_is_console_port(&atmel_port->uart) 2815 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2816 /* 2817 * The serial core enabled the clock for us, so undo 2818 * the clk_prepare_enable() in atmel_console_setup() 2819 */ 2820 clk_disable_unprepare(atmel_port->clk); 2821 } 2822 #endif 2823 2824 device_init_wakeup(&pdev->dev, 1); 2825 platform_set_drvdata(pdev, atmel_port); 2826 2827 /* 2828 * The peripheral clock has been disabled by atmel_init_port(): 2829 * enable it before accessing I/O registers 2830 */ 2831 clk_prepare_enable(atmel_port->clk); 2832 2833 if (rs485_enabled) { 2834 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2835 ATMEL_US_USMODE_NORMAL); 2836 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2837 ATMEL_US_RTSEN); 2838 } 2839 2840 /* 2841 * Get port name of usart or uart 2842 */ 2843 atmel_get_ip_name(&atmel_port->uart); 2844 2845 /* 2846 * The peripheral clock can now safely be disabled till the port 2847 * is used 2848 */ 2849 clk_disable_unprepare(atmel_port->clk); 2850 2851 return 0; 2852 2853 err_add_port: 2854 kfree(atmel_port->rx_ring.buf); 2855 atmel_port->rx_ring.buf = NULL; 2856 err_alloc_ring: 2857 if (!atmel_is_console_port(&atmel_port->uart)) { 2858 clk_put(atmel_port->clk); 2859 atmel_port->clk = NULL; 2860 } 2861 err_clear_bit: 2862 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 2863 err: 2864 return ret; 2865 } 2866 2867 /* 2868 * Even if the driver is not modular, it makes sense to be able to 2869 * unbind a device: there can be many bound devices, and there are 2870 * situations where dynamic binding and unbinding can be useful. 2871 * 2872 * For example, a connected device can require a specific firmware update 2873 * protocol that needs bitbanging on IO lines, but use the regular serial 2874 * port in the normal case. 2875 */ 2876 static int atmel_serial_remove(struct platform_device *pdev) 2877 { 2878 struct uart_port *port = platform_get_drvdata(pdev); 2879 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2880 int ret = 0; 2881 2882 tasklet_kill(&atmel_port->tasklet_rx); 2883 tasklet_kill(&atmel_port->tasklet_tx); 2884 2885 device_init_wakeup(&pdev->dev, 0); 2886 2887 ret = uart_remove_one_port(&atmel_uart, port); 2888 2889 kfree(atmel_port->rx_ring.buf); 2890 2891 /* "port" is allocated statically, so we shouldn't free it */ 2892 2893 clear_bit(port->line, atmel_ports_in_use); 2894 2895 clk_put(atmel_port->clk); 2896 atmel_port->clk = NULL; 2897 2898 return ret; 2899 } 2900 2901 static struct platform_driver atmel_serial_driver = { 2902 .probe = atmel_serial_probe, 2903 .remove = atmel_serial_remove, 2904 .suspend = atmel_serial_suspend, 2905 .resume = atmel_serial_resume, 2906 .driver = { 2907 .name = "atmel_usart", 2908 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 2909 }, 2910 }; 2911 2912 static int __init atmel_serial_init(void) 2913 { 2914 int ret; 2915 2916 ret = uart_register_driver(&atmel_uart); 2917 if (ret) 2918 return ret; 2919 2920 ret = platform_driver_register(&atmel_serial_driver); 2921 if (ret) 2922 uart_unregister_driver(&atmel_uart); 2923 2924 return ret; 2925 } 2926 device_initcall(atmel_serial_init); 2927