1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Atmel AT91 Serial ports 4 * Copyright (C) 2003 Rick Bronson 5 * 6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 8 * 9 * DMA support added by Chip Coldwell. 10 */ 11 #include <linux/tty.h> 12 #include <linux/ioport.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/serial.h> 16 #include <linux/clk.h> 17 #include <linux/console.h> 18 #include <linux/sysrq.h> 19 #include <linux/tty_flip.h> 20 #include <linux/platform_device.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/of_gpio.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmaengine.h> 26 #include <linux/atmel_pdc.h> 27 #include <linux/uaccess.h> 28 #include <linux/platform_data/atmel.h> 29 #include <linux/timer.h> 30 #include <linux/gpio.h> 31 #include <linux/gpio/consumer.h> 32 #include <linux/err.h> 33 #include <linux/irq.h> 34 #include <linux/suspend.h> 35 #include <linux/mm.h> 36 37 #include <asm/div64.h> 38 #include <asm/io.h> 39 #include <asm/ioctls.h> 40 41 #define PDC_BUFFER_SIZE 512 42 /* Revisit: We should calculate this based on the actual port settings */ 43 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 44 45 /* The minium number of data FIFOs should be able to contain */ 46 #define ATMEL_MIN_FIFO_SIZE 8 47 /* 48 * These two offsets are substracted from the RX FIFO size to define the RTS 49 * high and low thresholds 50 */ 51 #define ATMEL_RTS_HIGH_OFFSET 16 52 #define ATMEL_RTS_LOW_OFFSET 20 53 54 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 55 #define SUPPORT_SYSRQ 56 #endif 57 58 #include <linux/serial_core.h> 59 60 #include "serial_mctrl_gpio.h" 61 #include "atmel_serial.h" 62 63 static void atmel_start_rx(struct uart_port *port); 64 static void atmel_stop_rx(struct uart_port *port); 65 66 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 67 68 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 69 * should coexist with the 8250 driver, such as if we have an external 16C550 70 * UART. */ 71 #define SERIAL_ATMEL_MAJOR 204 72 #define MINOR_START 154 73 #define ATMEL_DEVICENAME "ttyAT" 74 75 #else 76 77 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 78 * name, but it is legally reserved for the 8250 driver. */ 79 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 80 #define MINOR_START 64 81 #define ATMEL_DEVICENAME "ttyS" 82 83 #endif 84 85 #define ATMEL_ISR_PASS_LIMIT 256 86 87 struct atmel_dma_buffer { 88 unsigned char *buf; 89 dma_addr_t dma_addr; 90 unsigned int dma_size; 91 unsigned int ofs; 92 }; 93 94 struct atmel_uart_char { 95 u16 status; 96 u16 ch; 97 }; 98 99 /* 100 * Be careful, the real size of the ring buffer is 101 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 102 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 103 * DMA mode. 104 */ 105 #define ATMEL_SERIAL_RINGSIZE 1024 106 107 /* 108 * at91: 6 USARTs and one DBGU port (SAM9260) 109 * samx7: 3 USARTs and 5 UARTs 110 */ 111 #define ATMEL_MAX_UART 8 112 113 /* 114 * We wrap our port structure around the generic uart_port. 115 */ 116 struct atmel_uart_port { 117 struct uart_port uart; /* uart */ 118 struct clk *clk; /* uart clock */ 119 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 120 u32 backup_imr; /* IMR saved during suspend */ 121 int break_active; /* break being received */ 122 123 bool use_dma_rx; /* enable DMA receiver */ 124 bool use_pdc_rx; /* enable PDC receiver */ 125 short pdc_rx_idx; /* current PDC RX buffer */ 126 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 127 128 bool use_dma_tx; /* enable DMA transmitter */ 129 bool use_pdc_tx; /* enable PDC transmitter */ 130 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 131 132 spinlock_t lock_tx; /* port lock */ 133 spinlock_t lock_rx; /* port lock */ 134 struct dma_chan *chan_tx; 135 struct dma_chan *chan_rx; 136 struct dma_async_tx_descriptor *desc_tx; 137 struct dma_async_tx_descriptor *desc_rx; 138 dma_cookie_t cookie_tx; 139 dma_cookie_t cookie_rx; 140 struct scatterlist sg_tx; 141 struct scatterlist sg_rx; 142 struct tasklet_struct tasklet_rx; 143 struct tasklet_struct tasklet_tx; 144 atomic_t tasklet_shutdown; 145 unsigned int irq_status_prev; 146 unsigned int tx_len; 147 148 struct circ_buf rx_ring; 149 150 struct mctrl_gpios *gpios; 151 u32 backup_mode; /* MR saved during iso7816 operations */ 152 u32 backup_brgr; /* BRGR saved during iso7816 operations */ 153 unsigned int tx_done_mask; 154 u32 fifo_size; 155 u32 rts_high; 156 u32 rts_low; 157 bool ms_irq_enabled; 158 u32 rtor; /* address of receiver timeout register if it exists */ 159 bool has_frac_baudrate; 160 bool has_hw_timer; 161 struct timer_list uart_timer; 162 163 bool tx_stopped; 164 bool suspended; 165 unsigned int pending; 166 unsigned int pending_status; 167 spinlock_t lock_suspended; 168 169 bool hd_start_rx; /* can start RX during half-duplex operation */ 170 171 /* ISO7816 */ 172 unsigned int fidi_min; 173 unsigned int fidi_max; 174 175 #ifdef CONFIG_PM 176 struct { 177 u32 cr; 178 u32 mr; 179 u32 imr; 180 u32 brgr; 181 u32 rtor; 182 u32 ttgr; 183 u32 fmr; 184 u32 fimr; 185 } cache; 186 #endif 187 188 int (*prepare_rx)(struct uart_port *port); 189 int (*prepare_tx)(struct uart_port *port); 190 void (*schedule_rx)(struct uart_port *port); 191 void (*schedule_tx)(struct uart_port *port); 192 void (*release_rx)(struct uart_port *port); 193 void (*release_tx)(struct uart_port *port); 194 }; 195 196 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 197 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 198 199 #ifdef SUPPORT_SYSRQ 200 static struct console atmel_console; 201 #endif 202 203 #if defined(CONFIG_OF) 204 static const struct of_device_id atmel_serial_dt_ids[] = { 205 { .compatible = "atmel,at91rm9200-usart-serial" }, 206 { /* sentinel */ } 207 }; 208 #endif 209 210 static inline struct atmel_uart_port * 211 to_atmel_uart_port(struct uart_port *uart) 212 { 213 return container_of(uart, struct atmel_uart_port, uart); 214 } 215 216 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 217 { 218 return __raw_readl(port->membase + reg); 219 } 220 221 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 222 { 223 __raw_writel(value, port->membase + reg); 224 } 225 226 static inline u8 atmel_uart_read_char(struct uart_port *port) 227 { 228 return __raw_readb(port->membase + ATMEL_US_RHR); 229 } 230 231 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 232 { 233 __raw_writeb(value, port->membase + ATMEL_US_THR); 234 } 235 236 static inline int atmel_uart_is_half_duplex(struct uart_port *port) 237 { 238 return ((port->rs485.flags & SER_RS485_ENABLED) && 239 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 240 (port->iso7816.flags & SER_ISO7816_ENABLED); 241 } 242 243 #ifdef CONFIG_SERIAL_ATMEL_PDC 244 static bool atmel_use_pdc_rx(struct uart_port *port) 245 { 246 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 247 248 return atmel_port->use_pdc_rx; 249 } 250 251 static bool atmel_use_pdc_tx(struct uart_port *port) 252 { 253 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 254 255 return atmel_port->use_pdc_tx; 256 } 257 #else 258 static bool atmel_use_pdc_rx(struct uart_port *port) 259 { 260 return false; 261 } 262 263 static bool atmel_use_pdc_tx(struct uart_port *port) 264 { 265 return false; 266 } 267 #endif 268 269 static bool atmel_use_dma_tx(struct uart_port *port) 270 { 271 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 272 273 return atmel_port->use_dma_tx; 274 } 275 276 static bool atmel_use_dma_rx(struct uart_port *port) 277 { 278 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 279 280 return atmel_port->use_dma_rx; 281 } 282 283 static bool atmel_use_fifo(struct uart_port *port) 284 { 285 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 286 287 return atmel_port->fifo_size; 288 } 289 290 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 291 struct tasklet_struct *t) 292 { 293 if (!atomic_read(&atmel_port->tasklet_shutdown)) 294 tasklet_schedule(t); 295 } 296 297 static unsigned int atmel_get_lines_status(struct uart_port *port) 298 { 299 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 300 unsigned int status, ret = 0; 301 302 status = atmel_uart_readl(port, ATMEL_US_CSR); 303 304 mctrl_gpio_get(atmel_port->gpios, &ret); 305 306 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 307 UART_GPIO_CTS))) { 308 if (ret & TIOCM_CTS) 309 status &= ~ATMEL_US_CTS; 310 else 311 status |= ATMEL_US_CTS; 312 } 313 314 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 315 UART_GPIO_DSR))) { 316 if (ret & TIOCM_DSR) 317 status &= ~ATMEL_US_DSR; 318 else 319 status |= ATMEL_US_DSR; 320 } 321 322 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 323 UART_GPIO_RI))) { 324 if (ret & TIOCM_RI) 325 status &= ~ATMEL_US_RI; 326 else 327 status |= ATMEL_US_RI; 328 } 329 330 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 331 UART_GPIO_DCD))) { 332 if (ret & TIOCM_CD) 333 status &= ~ATMEL_US_DCD; 334 else 335 status |= ATMEL_US_DCD; 336 } 337 338 return status; 339 } 340 341 /* Enable or disable the rs485 support */ 342 static int atmel_config_rs485(struct uart_port *port, 343 struct serial_rs485 *rs485conf) 344 { 345 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 346 unsigned int mode; 347 348 /* Disable interrupts */ 349 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 350 351 mode = atmel_uart_readl(port, ATMEL_US_MR); 352 353 /* Resetting serial mode to RS232 (0x0) */ 354 mode &= ~ATMEL_US_USMODE; 355 356 port->rs485 = *rs485conf; 357 358 if (rs485conf->flags & SER_RS485_ENABLED) { 359 dev_dbg(port->dev, "Setting UART to RS485\n"); 360 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 361 atmel_uart_writel(port, ATMEL_US_TTGR, 362 rs485conf->delay_rts_after_send); 363 mode |= ATMEL_US_USMODE_RS485; 364 } else { 365 dev_dbg(port->dev, "Setting UART to RS232\n"); 366 if (atmel_use_pdc_tx(port)) 367 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 368 ATMEL_US_TXBUFE; 369 else 370 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 371 } 372 atmel_uart_writel(port, ATMEL_US_MR, mode); 373 374 /* Enable interrupts */ 375 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 376 377 return 0; 378 } 379 380 static unsigned int atmel_calc_cd(struct uart_port *port, 381 struct serial_iso7816 *iso7816conf) 382 { 383 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 384 unsigned int cd; 385 u64 mck_rate; 386 387 mck_rate = (u64)clk_get_rate(atmel_port->clk); 388 do_div(mck_rate, iso7816conf->clk); 389 cd = mck_rate; 390 return cd; 391 } 392 393 static unsigned int atmel_calc_fidi(struct uart_port *port, 394 struct serial_iso7816 *iso7816conf) 395 { 396 u64 fidi = 0; 397 398 if (iso7816conf->sc_fi && iso7816conf->sc_di) { 399 fidi = (u64)iso7816conf->sc_fi; 400 do_div(fidi, iso7816conf->sc_di); 401 } 402 return (u32)fidi; 403 } 404 405 /* Enable or disable the iso7816 support */ 406 /* Called with interrupts disabled */ 407 static int atmel_config_iso7816(struct uart_port *port, 408 struct serial_iso7816 *iso7816conf) 409 { 410 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 411 unsigned int mode; 412 unsigned int cd, fidi; 413 int ret = 0; 414 415 /* Disable interrupts */ 416 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 417 418 mode = atmel_uart_readl(port, ATMEL_US_MR); 419 420 if (iso7816conf->flags & SER_ISO7816_ENABLED) { 421 mode &= ~ATMEL_US_USMODE; 422 423 if (iso7816conf->tg > 255) { 424 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); 425 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 426 ret = -EINVAL; 427 goto err_out; 428 } 429 430 if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 431 == SER_ISO7816_T(0)) { 432 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; 433 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 434 == SER_ISO7816_T(1)) { 435 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; 436 } else { 437 dev_err(port->dev, "ISO7816: Type not supported\n"); 438 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 439 ret = -EINVAL; 440 goto err_out; 441 } 442 443 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); 444 445 /* select mck clock, and output */ 446 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 447 /* set parity for normal/inverse mode + max iterations */ 448 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); 449 450 cd = atmel_calc_cd(port, iso7816conf); 451 fidi = atmel_calc_fidi(port, iso7816conf); 452 if (fidi == 0) { 453 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); 454 } else if (fidi < atmel_port->fidi_min 455 || fidi > atmel_port->fidi_max) { 456 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); 457 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 458 ret = -EINVAL; 459 goto err_out; 460 } 461 462 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { 463 /* port not yet in iso7816 mode: store configuration */ 464 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); 465 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 466 } 467 468 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); 469 atmel_uart_writel(port, ATMEL_US_BRGR, cd); 470 atmel_uart_writel(port, ATMEL_US_FIDI, fidi); 471 472 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); 473 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; 474 } else { 475 dev_dbg(port->dev, "Setting UART back to RS232\n"); 476 /* back to last RS232 settings */ 477 mode = atmel_port->backup_mode; 478 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 479 atmel_uart_writel(port, ATMEL_US_TTGR, 0); 480 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); 481 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); 482 483 if (atmel_use_pdc_tx(port)) 484 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 485 ATMEL_US_TXBUFE; 486 else 487 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 488 } 489 490 port->iso7816 = *iso7816conf; 491 492 atmel_uart_writel(port, ATMEL_US_MR, mode); 493 494 err_out: 495 /* Enable interrupts */ 496 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 497 498 return ret; 499 } 500 501 /* 502 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 503 */ 504 static u_int atmel_tx_empty(struct uart_port *port) 505 { 506 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 507 508 if (atmel_port->tx_stopped) 509 return TIOCSER_TEMT; 510 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 511 TIOCSER_TEMT : 512 0; 513 } 514 515 /* 516 * Set state of the modem control output lines 517 */ 518 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 519 { 520 unsigned int control = 0; 521 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 522 unsigned int rts_paused, rts_ready; 523 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 524 525 /* override mode to RS485 if needed, otherwise keep the current mode */ 526 if (port->rs485.flags & SER_RS485_ENABLED) { 527 atmel_uart_writel(port, ATMEL_US_TTGR, 528 port->rs485.delay_rts_after_send); 529 mode &= ~ATMEL_US_USMODE; 530 mode |= ATMEL_US_USMODE_RS485; 531 } 532 533 /* set the RTS line state according to the mode */ 534 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 535 /* force RTS line to high level */ 536 rts_paused = ATMEL_US_RTSEN; 537 538 /* give the control of the RTS line back to the hardware */ 539 rts_ready = ATMEL_US_RTSDIS; 540 } else { 541 /* force RTS line to high level */ 542 rts_paused = ATMEL_US_RTSDIS; 543 544 /* force RTS line to low level */ 545 rts_ready = ATMEL_US_RTSEN; 546 } 547 548 if (mctrl & TIOCM_RTS) 549 control |= rts_ready; 550 else 551 control |= rts_paused; 552 553 if (mctrl & TIOCM_DTR) 554 control |= ATMEL_US_DTREN; 555 else 556 control |= ATMEL_US_DTRDIS; 557 558 atmel_uart_writel(port, ATMEL_US_CR, control); 559 560 mctrl_gpio_set(atmel_port->gpios, mctrl); 561 562 /* Local loopback mode? */ 563 mode &= ~ATMEL_US_CHMODE; 564 if (mctrl & TIOCM_LOOP) 565 mode |= ATMEL_US_CHMODE_LOC_LOOP; 566 else 567 mode |= ATMEL_US_CHMODE_NORMAL; 568 569 atmel_uart_writel(port, ATMEL_US_MR, mode); 570 } 571 572 /* 573 * Get state of the modem control input lines 574 */ 575 static u_int atmel_get_mctrl(struct uart_port *port) 576 { 577 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 578 unsigned int ret = 0, status; 579 580 status = atmel_uart_readl(port, ATMEL_US_CSR); 581 582 /* 583 * The control signals are active low. 584 */ 585 if (!(status & ATMEL_US_DCD)) 586 ret |= TIOCM_CD; 587 if (!(status & ATMEL_US_CTS)) 588 ret |= TIOCM_CTS; 589 if (!(status & ATMEL_US_DSR)) 590 ret |= TIOCM_DSR; 591 if (!(status & ATMEL_US_RI)) 592 ret |= TIOCM_RI; 593 594 return mctrl_gpio_get(atmel_port->gpios, &ret); 595 } 596 597 /* 598 * Stop transmitting. 599 */ 600 static void atmel_stop_tx(struct uart_port *port) 601 { 602 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 603 604 if (atmel_use_pdc_tx(port)) { 605 /* disable PDC transmit */ 606 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 607 } 608 609 /* 610 * Disable the transmitter. 611 * This is mandatory when DMA is used, otherwise the DMA buffer 612 * is fully transmitted. 613 */ 614 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 615 atmel_port->tx_stopped = true; 616 617 /* Disable interrupts */ 618 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 619 620 if (atmel_uart_is_half_duplex(port)) 621 atmel_start_rx(port); 622 623 } 624 625 /* 626 * Start transmitting. 627 */ 628 static void atmel_start_tx(struct uart_port *port) 629 { 630 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 631 632 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 633 & ATMEL_PDC_TXTEN)) 634 /* The transmitter is already running. Yes, we 635 really need this.*/ 636 return; 637 638 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 639 if (atmel_uart_is_half_duplex(port)) 640 atmel_stop_rx(port); 641 642 if (atmel_use_pdc_tx(port)) 643 /* re-enable PDC transmit */ 644 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 645 646 /* Enable interrupts */ 647 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 648 649 /* re-enable the transmitter */ 650 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 651 atmel_port->tx_stopped = false; 652 } 653 654 /* 655 * start receiving - port is in process of being opened. 656 */ 657 static void atmel_start_rx(struct uart_port *port) 658 { 659 /* reset status and receiver */ 660 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 661 662 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 663 664 if (atmel_use_pdc_rx(port)) { 665 /* enable PDC controller */ 666 atmel_uart_writel(port, ATMEL_US_IER, 667 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 668 port->read_status_mask); 669 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 670 } else { 671 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 672 } 673 } 674 675 /* 676 * Stop receiving - port is in process of being closed. 677 */ 678 static void atmel_stop_rx(struct uart_port *port) 679 { 680 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 681 682 if (atmel_use_pdc_rx(port)) { 683 /* disable PDC receive */ 684 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 685 atmel_uart_writel(port, ATMEL_US_IDR, 686 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 687 port->read_status_mask); 688 } else { 689 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 690 } 691 } 692 693 /* 694 * Enable modem status interrupts 695 */ 696 static void atmel_enable_ms(struct uart_port *port) 697 { 698 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 699 uint32_t ier = 0; 700 701 /* 702 * Interrupt should not be enabled twice 703 */ 704 if (atmel_port->ms_irq_enabled) 705 return; 706 707 atmel_port->ms_irq_enabled = true; 708 709 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 710 ier |= ATMEL_US_CTSIC; 711 712 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 713 ier |= ATMEL_US_DSRIC; 714 715 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 716 ier |= ATMEL_US_RIIC; 717 718 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 719 ier |= ATMEL_US_DCDIC; 720 721 atmel_uart_writel(port, ATMEL_US_IER, ier); 722 723 mctrl_gpio_enable_ms(atmel_port->gpios); 724 } 725 726 /* 727 * Disable modem status interrupts 728 */ 729 static void atmel_disable_ms(struct uart_port *port) 730 { 731 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 732 uint32_t idr = 0; 733 734 /* 735 * Interrupt should not be disabled twice 736 */ 737 if (!atmel_port->ms_irq_enabled) 738 return; 739 740 atmel_port->ms_irq_enabled = false; 741 742 mctrl_gpio_disable_ms(atmel_port->gpios); 743 744 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 745 idr |= ATMEL_US_CTSIC; 746 747 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 748 idr |= ATMEL_US_DSRIC; 749 750 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 751 idr |= ATMEL_US_RIIC; 752 753 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 754 idr |= ATMEL_US_DCDIC; 755 756 atmel_uart_writel(port, ATMEL_US_IDR, idr); 757 } 758 759 /* 760 * Control the transmission of a break signal 761 */ 762 static void atmel_break_ctl(struct uart_port *port, int break_state) 763 { 764 if (break_state != 0) 765 /* start break */ 766 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 767 else 768 /* stop break */ 769 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 770 } 771 772 /* 773 * Stores the incoming character in the ring buffer 774 */ 775 static void 776 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 777 unsigned int ch) 778 { 779 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 780 struct circ_buf *ring = &atmel_port->rx_ring; 781 struct atmel_uart_char *c; 782 783 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 784 /* Buffer overflow, ignore char */ 785 return; 786 787 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 788 c->status = status; 789 c->ch = ch; 790 791 /* Make sure the character is stored before we update head. */ 792 smp_wmb(); 793 794 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 795 } 796 797 /* 798 * Deal with parity, framing and overrun errors. 799 */ 800 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 801 { 802 /* clear error */ 803 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 804 805 if (status & ATMEL_US_RXBRK) { 806 /* ignore side-effect */ 807 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 808 port->icount.brk++; 809 } 810 if (status & ATMEL_US_PARE) 811 port->icount.parity++; 812 if (status & ATMEL_US_FRAME) 813 port->icount.frame++; 814 if (status & ATMEL_US_OVRE) 815 port->icount.overrun++; 816 } 817 818 /* 819 * Characters received (called from interrupt handler) 820 */ 821 static void atmel_rx_chars(struct uart_port *port) 822 { 823 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 824 unsigned int status, ch; 825 826 status = atmel_uart_readl(port, ATMEL_US_CSR); 827 while (status & ATMEL_US_RXRDY) { 828 ch = atmel_uart_read_char(port); 829 830 /* 831 * note that the error handling code is 832 * out of the main execution path 833 */ 834 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 835 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 836 || atmel_port->break_active)) { 837 838 /* clear error */ 839 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 840 841 if (status & ATMEL_US_RXBRK 842 && !atmel_port->break_active) { 843 atmel_port->break_active = 1; 844 atmel_uart_writel(port, ATMEL_US_IER, 845 ATMEL_US_RXBRK); 846 } else { 847 /* 848 * This is either the end-of-break 849 * condition or we've received at 850 * least one character without RXBRK 851 * being set. In both cases, the next 852 * RXBRK will indicate start-of-break. 853 */ 854 atmel_uart_writel(port, ATMEL_US_IDR, 855 ATMEL_US_RXBRK); 856 status &= ~ATMEL_US_RXBRK; 857 atmel_port->break_active = 0; 858 } 859 } 860 861 atmel_buffer_rx_char(port, status, ch); 862 status = atmel_uart_readl(port, ATMEL_US_CSR); 863 } 864 865 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 866 } 867 868 /* 869 * Transmit characters (called from tasklet with TXRDY interrupt 870 * disabled) 871 */ 872 static void atmel_tx_chars(struct uart_port *port) 873 { 874 struct circ_buf *xmit = &port->state->xmit; 875 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 876 877 if (port->x_char && 878 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) { 879 atmel_uart_write_char(port, port->x_char); 880 port->icount.tx++; 881 port->x_char = 0; 882 } 883 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 884 return; 885 886 while (atmel_uart_readl(port, ATMEL_US_CSR) & 887 atmel_port->tx_done_mask) { 888 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 889 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 890 port->icount.tx++; 891 if (uart_circ_empty(xmit)) 892 break; 893 } 894 895 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 896 uart_write_wakeup(port); 897 898 if (!uart_circ_empty(xmit)) 899 /* Enable interrupts */ 900 atmel_uart_writel(port, ATMEL_US_IER, 901 atmel_port->tx_done_mask); 902 } 903 904 static void atmel_complete_tx_dma(void *arg) 905 { 906 struct atmel_uart_port *atmel_port = arg; 907 struct uart_port *port = &atmel_port->uart; 908 struct circ_buf *xmit = &port->state->xmit; 909 struct dma_chan *chan = atmel_port->chan_tx; 910 unsigned long flags; 911 912 spin_lock_irqsave(&port->lock, flags); 913 914 if (chan) 915 dmaengine_terminate_all(chan); 916 xmit->tail += atmel_port->tx_len; 917 xmit->tail &= UART_XMIT_SIZE - 1; 918 919 port->icount.tx += atmel_port->tx_len; 920 921 spin_lock_irq(&atmel_port->lock_tx); 922 async_tx_ack(atmel_port->desc_tx); 923 atmel_port->cookie_tx = -EINVAL; 924 atmel_port->desc_tx = NULL; 925 spin_unlock_irq(&atmel_port->lock_tx); 926 927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 928 uart_write_wakeup(port); 929 930 /* 931 * xmit is a circular buffer so, if we have just send data from 932 * xmit->tail to the end of xmit->buf, now we have to transmit the 933 * remaining data from the beginning of xmit->buf to xmit->head. 934 */ 935 if (!uart_circ_empty(xmit)) 936 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 937 else if (atmel_uart_is_half_duplex(port)) { 938 /* 939 * DMA done, re-enable TXEMPTY and signal that we can stop 940 * TX and start RX for RS485 941 */ 942 atmel_port->hd_start_rx = true; 943 atmel_uart_writel(port, ATMEL_US_IER, 944 atmel_port->tx_done_mask); 945 } 946 947 spin_unlock_irqrestore(&port->lock, flags); 948 } 949 950 static void atmel_release_tx_dma(struct uart_port *port) 951 { 952 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 953 struct dma_chan *chan = atmel_port->chan_tx; 954 955 if (chan) { 956 dmaengine_terminate_all(chan); 957 dma_release_channel(chan); 958 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 959 DMA_TO_DEVICE); 960 } 961 962 atmel_port->desc_tx = NULL; 963 atmel_port->chan_tx = NULL; 964 atmel_port->cookie_tx = -EINVAL; 965 } 966 967 /* 968 * Called from tasklet with TXRDY interrupt is disabled. 969 */ 970 static void atmel_tx_dma(struct uart_port *port) 971 { 972 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 973 struct circ_buf *xmit = &port->state->xmit; 974 struct dma_chan *chan = atmel_port->chan_tx; 975 struct dma_async_tx_descriptor *desc; 976 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 977 unsigned int tx_len, part1_len, part2_len, sg_len; 978 dma_addr_t phys_addr; 979 980 /* Make sure we have an idle channel */ 981 if (atmel_port->desc_tx != NULL) 982 return; 983 984 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 985 /* 986 * DMA is idle now. 987 * Port xmit buffer is already mapped, 988 * and it is one page... Just adjust 989 * offsets and lengths. Since it is a circular buffer, 990 * we have to transmit till the end, and then the rest. 991 * Take the port lock to get a 992 * consistent xmit buffer state. 993 */ 994 tx_len = CIRC_CNT_TO_END(xmit->head, 995 xmit->tail, 996 UART_XMIT_SIZE); 997 998 if (atmel_port->fifo_size) { 999 /* multi data mode */ 1000 part1_len = (tx_len & ~0x3); /* DWORD access */ 1001 part2_len = (tx_len & 0x3); /* BYTE access */ 1002 } else { 1003 /* single data (legacy) mode */ 1004 part1_len = 0; 1005 part2_len = tx_len; /* BYTE access only */ 1006 } 1007 1008 sg_init_table(sgl, 2); 1009 sg_len = 0; 1010 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 1011 if (part1_len) { 1012 sg = &sgl[sg_len++]; 1013 sg_dma_address(sg) = phys_addr; 1014 sg_dma_len(sg) = part1_len; 1015 1016 phys_addr += part1_len; 1017 } 1018 1019 if (part2_len) { 1020 sg = &sgl[sg_len++]; 1021 sg_dma_address(sg) = phys_addr; 1022 sg_dma_len(sg) = part2_len; 1023 } 1024 1025 /* 1026 * save tx_len so atmel_complete_tx_dma() will increase 1027 * xmit->tail correctly 1028 */ 1029 atmel_port->tx_len = tx_len; 1030 1031 desc = dmaengine_prep_slave_sg(chan, 1032 sgl, 1033 sg_len, 1034 DMA_MEM_TO_DEV, 1035 DMA_PREP_INTERRUPT | 1036 DMA_CTRL_ACK); 1037 if (!desc) { 1038 dev_err(port->dev, "Failed to send via dma!\n"); 1039 return; 1040 } 1041 1042 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 1043 1044 atmel_port->desc_tx = desc; 1045 desc->callback = atmel_complete_tx_dma; 1046 desc->callback_param = atmel_port; 1047 atmel_port->cookie_tx = dmaengine_submit(desc); 1048 } 1049 1050 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1051 uart_write_wakeup(port); 1052 } 1053 1054 static int atmel_prepare_tx_dma(struct uart_port *port) 1055 { 1056 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1057 struct device *mfd_dev = port->dev->parent; 1058 dma_cap_mask_t mask; 1059 struct dma_slave_config config; 1060 int ret, nent; 1061 1062 dma_cap_zero(mask); 1063 dma_cap_set(DMA_SLAVE, mask); 1064 1065 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); 1066 if (atmel_port->chan_tx == NULL) 1067 goto chan_err; 1068 dev_info(port->dev, "using %s for tx DMA transfers\n", 1069 dma_chan_name(atmel_port->chan_tx)); 1070 1071 spin_lock_init(&atmel_port->lock_tx); 1072 sg_init_table(&atmel_port->sg_tx, 1); 1073 /* UART circular tx buffer is an aligned page. */ 1074 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 1075 sg_set_page(&atmel_port->sg_tx, 1076 virt_to_page(port->state->xmit.buf), 1077 UART_XMIT_SIZE, 1078 offset_in_page(port->state->xmit.buf)); 1079 nent = dma_map_sg(port->dev, 1080 &atmel_port->sg_tx, 1081 1, 1082 DMA_TO_DEVICE); 1083 1084 if (!nent) { 1085 dev_dbg(port->dev, "need to release resource of dma\n"); 1086 goto chan_err; 1087 } else { 1088 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1089 sg_dma_len(&atmel_port->sg_tx), 1090 port->state->xmit.buf, 1091 &sg_dma_address(&atmel_port->sg_tx)); 1092 } 1093 1094 /* Configure the slave DMA */ 1095 memset(&config, 0, sizeof(config)); 1096 config.direction = DMA_MEM_TO_DEV; 1097 config.dst_addr_width = (atmel_port->fifo_size) ? 1098 DMA_SLAVE_BUSWIDTH_4_BYTES : 1099 DMA_SLAVE_BUSWIDTH_1_BYTE; 1100 config.dst_addr = port->mapbase + ATMEL_US_THR; 1101 config.dst_maxburst = 1; 1102 1103 ret = dmaengine_slave_config(atmel_port->chan_tx, 1104 &config); 1105 if (ret) { 1106 dev_err(port->dev, "DMA tx slave configuration failed\n"); 1107 goto chan_err; 1108 } 1109 1110 return 0; 1111 1112 chan_err: 1113 dev_err(port->dev, "TX channel not available, switch to pio\n"); 1114 atmel_port->use_dma_tx = 0; 1115 if (atmel_port->chan_tx) 1116 atmel_release_tx_dma(port); 1117 return -EINVAL; 1118 } 1119 1120 static void atmel_complete_rx_dma(void *arg) 1121 { 1122 struct uart_port *port = arg; 1123 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1124 1125 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1126 } 1127 1128 static void atmel_release_rx_dma(struct uart_port *port) 1129 { 1130 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1131 struct dma_chan *chan = atmel_port->chan_rx; 1132 1133 if (chan) { 1134 dmaengine_terminate_all(chan); 1135 dma_release_channel(chan); 1136 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1137 DMA_FROM_DEVICE); 1138 } 1139 1140 atmel_port->desc_rx = NULL; 1141 atmel_port->chan_rx = NULL; 1142 atmel_port->cookie_rx = -EINVAL; 1143 } 1144 1145 static void atmel_rx_from_dma(struct uart_port *port) 1146 { 1147 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1148 struct tty_port *tport = &port->state->port; 1149 struct circ_buf *ring = &atmel_port->rx_ring; 1150 struct dma_chan *chan = atmel_port->chan_rx; 1151 struct dma_tx_state state; 1152 enum dma_status dmastat; 1153 size_t count; 1154 1155 1156 /* Reset the UART timeout early so that we don't miss one */ 1157 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1158 dmastat = dmaengine_tx_status(chan, 1159 atmel_port->cookie_rx, 1160 &state); 1161 /* Restart a new tasklet if DMA status is error */ 1162 if (dmastat == DMA_ERROR) { 1163 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1164 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1165 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1166 return; 1167 } 1168 1169 /* CPU claims ownership of RX DMA buffer */ 1170 dma_sync_sg_for_cpu(port->dev, 1171 &atmel_port->sg_rx, 1172 1, 1173 DMA_FROM_DEVICE); 1174 1175 /* 1176 * ring->head points to the end of data already written by the DMA. 1177 * ring->tail points to the beginning of data to be read by the 1178 * framework. 1179 * The current transfer size should not be larger than the dma buffer 1180 * length. 1181 */ 1182 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1183 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1184 /* 1185 * At this point ring->head may point to the first byte right after the 1186 * last byte of the dma buffer: 1187 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1188 * 1189 * However ring->tail must always points inside the dma buffer: 1190 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1191 * 1192 * Since we use a ring buffer, we have to handle the case 1193 * where head is lower than tail. In such a case, we first read from 1194 * tail to the end of the buffer then reset tail. 1195 */ 1196 if (ring->head < ring->tail) { 1197 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1198 1199 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1200 ring->tail = 0; 1201 port->icount.rx += count; 1202 } 1203 1204 /* Finally we read data from tail to head */ 1205 if (ring->tail < ring->head) { 1206 count = ring->head - ring->tail; 1207 1208 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1209 /* Wrap ring->head if needed */ 1210 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1211 ring->head = 0; 1212 ring->tail = ring->head; 1213 port->icount.rx += count; 1214 } 1215 1216 /* USART retreives ownership of RX DMA buffer */ 1217 dma_sync_sg_for_device(port->dev, 1218 &atmel_port->sg_rx, 1219 1, 1220 DMA_FROM_DEVICE); 1221 1222 /* 1223 * Drop the lock here since it might end up calling 1224 * uart_start(), which takes the lock. 1225 */ 1226 spin_unlock(&port->lock); 1227 tty_flip_buffer_push(tport); 1228 spin_lock(&port->lock); 1229 1230 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1231 } 1232 1233 static int atmel_prepare_rx_dma(struct uart_port *port) 1234 { 1235 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1236 struct device *mfd_dev = port->dev->parent; 1237 struct dma_async_tx_descriptor *desc; 1238 dma_cap_mask_t mask; 1239 struct dma_slave_config config; 1240 struct circ_buf *ring; 1241 int ret, nent; 1242 1243 ring = &atmel_port->rx_ring; 1244 1245 dma_cap_zero(mask); 1246 dma_cap_set(DMA_CYCLIC, mask); 1247 1248 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); 1249 if (atmel_port->chan_rx == NULL) 1250 goto chan_err; 1251 dev_info(port->dev, "using %s for rx DMA transfers\n", 1252 dma_chan_name(atmel_port->chan_rx)); 1253 1254 spin_lock_init(&atmel_port->lock_rx); 1255 sg_init_table(&atmel_port->sg_rx, 1); 1256 /* UART circular rx buffer is an aligned page. */ 1257 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1258 sg_set_page(&atmel_port->sg_rx, 1259 virt_to_page(ring->buf), 1260 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1261 offset_in_page(ring->buf)); 1262 nent = dma_map_sg(port->dev, 1263 &atmel_port->sg_rx, 1264 1, 1265 DMA_FROM_DEVICE); 1266 1267 if (!nent) { 1268 dev_dbg(port->dev, "need to release resource of dma\n"); 1269 goto chan_err; 1270 } else { 1271 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1272 sg_dma_len(&atmel_port->sg_rx), 1273 ring->buf, 1274 &sg_dma_address(&atmel_port->sg_rx)); 1275 } 1276 1277 /* Configure the slave DMA */ 1278 memset(&config, 0, sizeof(config)); 1279 config.direction = DMA_DEV_TO_MEM; 1280 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1281 config.src_addr = port->mapbase + ATMEL_US_RHR; 1282 config.src_maxburst = 1; 1283 1284 ret = dmaengine_slave_config(atmel_port->chan_rx, 1285 &config); 1286 if (ret) { 1287 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1288 goto chan_err; 1289 } 1290 /* 1291 * Prepare a cyclic dma transfer, assign 2 descriptors, 1292 * each one is half ring buffer size 1293 */ 1294 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1295 sg_dma_address(&atmel_port->sg_rx), 1296 sg_dma_len(&atmel_port->sg_rx), 1297 sg_dma_len(&atmel_port->sg_rx)/2, 1298 DMA_DEV_TO_MEM, 1299 DMA_PREP_INTERRUPT); 1300 if (!desc) { 1301 dev_err(port->dev, "Preparing DMA cyclic failed\n"); 1302 goto chan_err; 1303 } 1304 desc->callback = atmel_complete_rx_dma; 1305 desc->callback_param = port; 1306 atmel_port->desc_rx = desc; 1307 atmel_port->cookie_rx = dmaengine_submit(desc); 1308 1309 return 0; 1310 1311 chan_err: 1312 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1313 atmel_port->use_dma_rx = 0; 1314 if (atmel_port->chan_rx) 1315 atmel_release_rx_dma(port); 1316 return -EINVAL; 1317 } 1318 1319 static void atmel_uart_timer_callback(struct timer_list *t) 1320 { 1321 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, 1322 uart_timer); 1323 struct uart_port *port = &atmel_port->uart; 1324 1325 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1326 tasklet_schedule(&atmel_port->tasklet_rx); 1327 mod_timer(&atmel_port->uart_timer, 1328 jiffies + uart_poll_timeout(port)); 1329 } 1330 } 1331 1332 /* 1333 * receive interrupt handler. 1334 */ 1335 static void 1336 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1337 { 1338 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1339 1340 if (atmel_use_pdc_rx(port)) { 1341 /* 1342 * PDC receive. Just schedule the tasklet and let it 1343 * figure out the details. 1344 * 1345 * TODO: We're not handling error flags correctly at 1346 * the moment. 1347 */ 1348 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1349 atmel_uart_writel(port, ATMEL_US_IDR, 1350 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1351 atmel_tasklet_schedule(atmel_port, 1352 &atmel_port->tasklet_rx); 1353 } 1354 1355 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1356 ATMEL_US_FRAME | ATMEL_US_PARE)) 1357 atmel_pdc_rxerr(port, pending); 1358 } 1359 1360 if (atmel_use_dma_rx(port)) { 1361 if (pending & ATMEL_US_TIMEOUT) { 1362 atmel_uart_writel(port, ATMEL_US_IDR, 1363 ATMEL_US_TIMEOUT); 1364 atmel_tasklet_schedule(atmel_port, 1365 &atmel_port->tasklet_rx); 1366 } 1367 } 1368 1369 /* Interrupt receive */ 1370 if (pending & ATMEL_US_RXRDY) 1371 atmel_rx_chars(port); 1372 else if (pending & ATMEL_US_RXBRK) { 1373 /* 1374 * End of break detected. If it came along with a 1375 * character, atmel_rx_chars will handle it. 1376 */ 1377 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1378 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1379 atmel_port->break_active = 0; 1380 } 1381 } 1382 1383 /* 1384 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1385 */ 1386 static void 1387 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1388 { 1389 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1390 1391 if (pending & atmel_port->tx_done_mask) { 1392 atmel_uart_writel(port, ATMEL_US_IDR, 1393 atmel_port->tx_done_mask); 1394 1395 /* Start RX if flag was set and FIFO is empty */ 1396 if (atmel_port->hd_start_rx) { 1397 if (!(atmel_uart_readl(port, ATMEL_US_CSR) 1398 & ATMEL_US_TXEMPTY)) 1399 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); 1400 1401 atmel_port->hd_start_rx = false; 1402 atmel_start_rx(port); 1403 return; 1404 } 1405 1406 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1407 } 1408 } 1409 1410 /* 1411 * status flags interrupt handler. 1412 */ 1413 static void 1414 atmel_handle_status(struct uart_port *port, unsigned int pending, 1415 unsigned int status) 1416 { 1417 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1418 unsigned int status_change; 1419 1420 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1421 | ATMEL_US_CTSIC)) { 1422 status_change = status ^ atmel_port->irq_status_prev; 1423 atmel_port->irq_status_prev = status; 1424 1425 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1426 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1427 /* TODO: All reads to CSR will clear these interrupts! */ 1428 if (status_change & ATMEL_US_RI) 1429 port->icount.rng++; 1430 if (status_change & ATMEL_US_DSR) 1431 port->icount.dsr++; 1432 if (status_change & ATMEL_US_DCD) 1433 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1434 if (status_change & ATMEL_US_CTS) 1435 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1436 1437 wake_up_interruptible(&port->state->port.delta_msr_wait); 1438 } 1439 } 1440 1441 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) 1442 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); 1443 } 1444 1445 /* 1446 * Interrupt handler 1447 */ 1448 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1449 { 1450 struct uart_port *port = dev_id; 1451 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1452 unsigned int status, pending, mask, pass_counter = 0; 1453 1454 spin_lock(&atmel_port->lock_suspended); 1455 1456 do { 1457 status = atmel_get_lines_status(port); 1458 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1459 pending = status & mask; 1460 if (!pending) 1461 break; 1462 1463 if (atmel_port->suspended) { 1464 atmel_port->pending |= pending; 1465 atmel_port->pending_status = status; 1466 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1467 pm_system_wakeup(); 1468 break; 1469 } 1470 1471 atmel_handle_receive(port, pending); 1472 atmel_handle_status(port, pending, status); 1473 atmel_handle_transmit(port, pending); 1474 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1475 1476 spin_unlock(&atmel_port->lock_suspended); 1477 1478 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1479 } 1480 1481 static void atmel_release_tx_pdc(struct uart_port *port) 1482 { 1483 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1484 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1485 1486 dma_unmap_single(port->dev, 1487 pdc->dma_addr, 1488 pdc->dma_size, 1489 DMA_TO_DEVICE); 1490 } 1491 1492 /* 1493 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1494 */ 1495 static void atmel_tx_pdc(struct uart_port *port) 1496 { 1497 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1498 struct circ_buf *xmit = &port->state->xmit; 1499 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1500 int count; 1501 1502 /* nothing left to transmit? */ 1503 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1504 return; 1505 1506 xmit->tail += pdc->ofs; 1507 xmit->tail &= UART_XMIT_SIZE - 1; 1508 1509 port->icount.tx += pdc->ofs; 1510 pdc->ofs = 0; 1511 1512 /* more to transmit - setup next transfer */ 1513 1514 /* disable PDC transmit */ 1515 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1516 1517 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1518 dma_sync_single_for_device(port->dev, 1519 pdc->dma_addr, 1520 pdc->dma_size, 1521 DMA_TO_DEVICE); 1522 1523 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1524 pdc->ofs = count; 1525 1526 atmel_uart_writel(port, ATMEL_PDC_TPR, 1527 pdc->dma_addr + xmit->tail); 1528 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1529 /* re-enable PDC transmit */ 1530 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1531 /* Enable interrupts */ 1532 atmel_uart_writel(port, ATMEL_US_IER, 1533 atmel_port->tx_done_mask); 1534 } else { 1535 if (atmel_uart_is_half_duplex(port)) { 1536 /* DMA done, stop TX, start RX for RS485 */ 1537 atmel_start_rx(port); 1538 } 1539 } 1540 1541 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1542 uart_write_wakeup(port); 1543 } 1544 1545 static int atmel_prepare_tx_pdc(struct uart_port *port) 1546 { 1547 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1548 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1549 struct circ_buf *xmit = &port->state->xmit; 1550 1551 pdc->buf = xmit->buf; 1552 pdc->dma_addr = dma_map_single(port->dev, 1553 pdc->buf, 1554 UART_XMIT_SIZE, 1555 DMA_TO_DEVICE); 1556 pdc->dma_size = UART_XMIT_SIZE; 1557 pdc->ofs = 0; 1558 1559 return 0; 1560 } 1561 1562 static void atmel_rx_from_ring(struct uart_port *port) 1563 { 1564 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1565 struct circ_buf *ring = &atmel_port->rx_ring; 1566 unsigned int flg; 1567 unsigned int status; 1568 1569 while (ring->head != ring->tail) { 1570 struct atmel_uart_char c; 1571 1572 /* Make sure c is loaded after head. */ 1573 smp_rmb(); 1574 1575 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1576 1577 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1578 1579 port->icount.rx++; 1580 status = c.status; 1581 flg = TTY_NORMAL; 1582 1583 /* 1584 * note that the error handling code is 1585 * out of the main execution path 1586 */ 1587 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1588 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1589 if (status & ATMEL_US_RXBRK) { 1590 /* ignore side-effect */ 1591 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1592 1593 port->icount.brk++; 1594 if (uart_handle_break(port)) 1595 continue; 1596 } 1597 if (status & ATMEL_US_PARE) 1598 port->icount.parity++; 1599 if (status & ATMEL_US_FRAME) 1600 port->icount.frame++; 1601 if (status & ATMEL_US_OVRE) 1602 port->icount.overrun++; 1603 1604 status &= port->read_status_mask; 1605 1606 if (status & ATMEL_US_RXBRK) 1607 flg = TTY_BREAK; 1608 else if (status & ATMEL_US_PARE) 1609 flg = TTY_PARITY; 1610 else if (status & ATMEL_US_FRAME) 1611 flg = TTY_FRAME; 1612 } 1613 1614 1615 if (uart_handle_sysrq_char(port, c.ch)) 1616 continue; 1617 1618 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1619 } 1620 1621 /* 1622 * Drop the lock here since it might end up calling 1623 * uart_start(), which takes the lock. 1624 */ 1625 spin_unlock(&port->lock); 1626 tty_flip_buffer_push(&port->state->port); 1627 spin_lock(&port->lock); 1628 } 1629 1630 static void atmel_release_rx_pdc(struct uart_port *port) 1631 { 1632 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1633 int i; 1634 1635 for (i = 0; i < 2; i++) { 1636 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1637 1638 dma_unmap_single(port->dev, 1639 pdc->dma_addr, 1640 pdc->dma_size, 1641 DMA_FROM_DEVICE); 1642 kfree(pdc->buf); 1643 } 1644 } 1645 1646 static void atmel_rx_from_pdc(struct uart_port *port) 1647 { 1648 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1649 struct tty_port *tport = &port->state->port; 1650 struct atmel_dma_buffer *pdc; 1651 int rx_idx = atmel_port->pdc_rx_idx; 1652 unsigned int head; 1653 unsigned int tail; 1654 unsigned int count; 1655 1656 do { 1657 /* Reset the UART timeout early so that we don't miss one */ 1658 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1659 1660 pdc = &atmel_port->pdc_rx[rx_idx]; 1661 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1662 tail = pdc->ofs; 1663 1664 /* If the PDC has switched buffers, RPR won't contain 1665 * any address within the current buffer. Since head 1666 * is unsigned, we just need a one-way comparison to 1667 * find out. 1668 * 1669 * In this case, we just need to consume the entire 1670 * buffer and resubmit it for DMA. This will clear the 1671 * ENDRX bit as well, so that we can safely re-enable 1672 * all interrupts below. 1673 */ 1674 head = min(head, pdc->dma_size); 1675 1676 if (likely(head != tail)) { 1677 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1678 pdc->dma_size, DMA_FROM_DEVICE); 1679 1680 /* 1681 * head will only wrap around when we recycle 1682 * the DMA buffer, and when that happens, we 1683 * explicitly set tail to 0. So head will 1684 * always be greater than tail. 1685 */ 1686 count = head - tail; 1687 1688 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1689 count); 1690 1691 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1692 pdc->dma_size, DMA_FROM_DEVICE); 1693 1694 port->icount.rx += count; 1695 pdc->ofs = head; 1696 } 1697 1698 /* 1699 * If the current buffer is full, we need to check if 1700 * the next one contains any additional data. 1701 */ 1702 if (head >= pdc->dma_size) { 1703 pdc->ofs = 0; 1704 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1705 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1706 1707 rx_idx = !rx_idx; 1708 atmel_port->pdc_rx_idx = rx_idx; 1709 } 1710 } while (head >= pdc->dma_size); 1711 1712 /* 1713 * Drop the lock here since it might end up calling 1714 * uart_start(), which takes the lock. 1715 */ 1716 spin_unlock(&port->lock); 1717 tty_flip_buffer_push(tport); 1718 spin_lock(&port->lock); 1719 1720 atmel_uart_writel(port, ATMEL_US_IER, 1721 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1722 } 1723 1724 static int atmel_prepare_rx_pdc(struct uart_port *port) 1725 { 1726 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1727 int i; 1728 1729 for (i = 0; i < 2; i++) { 1730 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1731 1732 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1733 if (pdc->buf == NULL) { 1734 if (i != 0) { 1735 dma_unmap_single(port->dev, 1736 atmel_port->pdc_rx[0].dma_addr, 1737 PDC_BUFFER_SIZE, 1738 DMA_FROM_DEVICE); 1739 kfree(atmel_port->pdc_rx[0].buf); 1740 } 1741 atmel_port->use_pdc_rx = 0; 1742 return -ENOMEM; 1743 } 1744 pdc->dma_addr = dma_map_single(port->dev, 1745 pdc->buf, 1746 PDC_BUFFER_SIZE, 1747 DMA_FROM_DEVICE); 1748 pdc->dma_size = PDC_BUFFER_SIZE; 1749 pdc->ofs = 0; 1750 } 1751 1752 atmel_port->pdc_rx_idx = 0; 1753 1754 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1755 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1756 1757 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1758 atmel_port->pdc_rx[1].dma_addr); 1759 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1760 1761 return 0; 1762 } 1763 1764 /* 1765 * tasklet handling tty stuff outside the interrupt handler. 1766 */ 1767 static void atmel_tasklet_rx_func(unsigned long data) 1768 { 1769 struct uart_port *port = (struct uart_port *)data; 1770 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1771 1772 /* The interrupt handler does not take the lock */ 1773 spin_lock(&port->lock); 1774 atmel_port->schedule_rx(port); 1775 spin_unlock(&port->lock); 1776 } 1777 1778 static void atmel_tasklet_tx_func(unsigned long data) 1779 { 1780 struct uart_port *port = (struct uart_port *)data; 1781 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1782 1783 /* The interrupt handler does not take the lock */ 1784 spin_lock(&port->lock); 1785 atmel_port->schedule_tx(port); 1786 spin_unlock(&port->lock); 1787 } 1788 1789 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1790 struct platform_device *pdev) 1791 { 1792 struct device_node *np = pdev->dev.of_node; 1793 1794 /* DMA/PDC usage specification */ 1795 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1796 if (of_property_read_bool(np, "dmas")) { 1797 atmel_port->use_dma_rx = true; 1798 atmel_port->use_pdc_rx = false; 1799 } else { 1800 atmel_port->use_dma_rx = false; 1801 atmel_port->use_pdc_rx = true; 1802 } 1803 } else { 1804 atmel_port->use_dma_rx = false; 1805 atmel_port->use_pdc_rx = false; 1806 } 1807 1808 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1809 if (of_property_read_bool(np, "dmas")) { 1810 atmel_port->use_dma_tx = true; 1811 atmel_port->use_pdc_tx = false; 1812 } else { 1813 atmel_port->use_dma_tx = false; 1814 atmel_port->use_pdc_tx = true; 1815 } 1816 } else { 1817 atmel_port->use_dma_tx = false; 1818 atmel_port->use_pdc_tx = false; 1819 } 1820 } 1821 1822 static void atmel_set_ops(struct uart_port *port) 1823 { 1824 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1825 1826 if (atmel_use_dma_rx(port)) { 1827 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1828 atmel_port->schedule_rx = &atmel_rx_from_dma; 1829 atmel_port->release_rx = &atmel_release_rx_dma; 1830 } else if (atmel_use_pdc_rx(port)) { 1831 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1832 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1833 atmel_port->release_rx = &atmel_release_rx_pdc; 1834 } else { 1835 atmel_port->prepare_rx = NULL; 1836 atmel_port->schedule_rx = &atmel_rx_from_ring; 1837 atmel_port->release_rx = NULL; 1838 } 1839 1840 if (atmel_use_dma_tx(port)) { 1841 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1842 atmel_port->schedule_tx = &atmel_tx_dma; 1843 atmel_port->release_tx = &atmel_release_tx_dma; 1844 } else if (atmel_use_pdc_tx(port)) { 1845 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1846 atmel_port->schedule_tx = &atmel_tx_pdc; 1847 atmel_port->release_tx = &atmel_release_tx_pdc; 1848 } else { 1849 atmel_port->prepare_tx = NULL; 1850 atmel_port->schedule_tx = &atmel_tx_chars; 1851 atmel_port->release_tx = NULL; 1852 } 1853 } 1854 1855 /* 1856 * Get ip name usart or uart 1857 */ 1858 static void atmel_get_ip_name(struct uart_port *port) 1859 { 1860 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1861 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1862 u32 version; 1863 u32 usart, dbgu_uart, new_uart; 1864 /* ASCII decoding for IP version */ 1865 usart = 0x55534152; /* USAR(T) */ 1866 dbgu_uart = 0x44424755; /* DBGU */ 1867 new_uart = 0x55415254; /* UART */ 1868 1869 /* 1870 * Only USART devices from at91sam9260 SOC implement fractional 1871 * baudrate. It is available for all asynchronous modes, with the 1872 * following restriction: the sampling clock's duty cycle is not 1873 * constant. 1874 */ 1875 atmel_port->has_frac_baudrate = false; 1876 atmel_port->has_hw_timer = false; 1877 1878 if (name == new_uart) { 1879 dev_dbg(port->dev, "Uart with hw timer"); 1880 atmel_port->has_hw_timer = true; 1881 atmel_port->rtor = ATMEL_UA_RTOR; 1882 } else if (name == usart) { 1883 dev_dbg(port->dev, "Usart\n"); 1884 atmel_port->has_frac_baudrate = true; 1885 atmel_port->has_hw_timer = true; 1886 atmel_port->rtor = ATMEL_US_RTOR; 1887 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1888 switch (version) { 1889 case 0x814: /* sama5d2 */ 1890 /* fall through */ 1891 case 0x701: /* sama5d4 */ 1892 atmel_port->fidi_min = 3; 1893 atmel_port->fidi_max = 65535; 1894 break; 1895 case 0x502: /* sam9x5, sama5d3 */ 1896 atmel_port->fidi_min = 3; 1897 atmel_port->fidi_max = 2047; 1898 break; 1899 default: 1900 atmel_port->fidi_min = 1; 1901 atmel_port->fidi_max = 2047; 1902 } 1903 } else if (name == dbgu_uart) { 1904 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1905 } else { 1906 /* fallback for older SoCs: use version field */ 1907 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1908 switch (version) { 1909 case 0x302: 1910 case 0x10213: 1911 case 0x10302: 1912 dev_dbg(port->dev, "This version is usart\n"); 1913 atmel_port->has_frac_baudrate = true; 1914 atmel_port->has_hw_timer = true; 1915 atmel_port->rtor = ATMEL_US_RTOR; 1916 break; 1917 case 0x203: 1918 case 0x10202: 1919 dev_dbg(port->dev, "This version is uart\n"); 1920 break; 1921 default: 1922 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1923 } 1924 } 1925 } 1926 1927 /* 1928 * Perform initialization and enable port for reception 1929 */ 1930 static int atmel_startup(struct uart_port *port) 1931 { 1932 struct platform_device *pdev = to_platform_device(port->dev); 1933 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1934 int retval; 1935 1936 /* 1937 * Ensure that no interrupts are enabled otherwise when 1938 * request_irq() is called we could get stuck trying to 1939 * handle an unexpected interrupt 1940 */ 1941 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1942 atmel_port->ms_irq_enabled = false; 1943 1944 /* 1945 * Allocate the IRQ 1946 */ 1947 retval = request_irq(port->irq, atmel_interrupt, 1948 IRQF_SHARED | IRQF_COND_SUSPEND, 1949 dev_name(&pdev->dev), port); 1950 if (retval) { 1951 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1952 return retval; 1953 } 1954 1955 atomic_set(&atmel_port->tasklet_shutdown, 0); 1956 tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func, 1957 (unsigned long)port); 1958 tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func, 1959 (unsigned long)port); 1960 1961 /* 1962 * Initialize DMA (if necessary) 1963 */ 1964 atmel_init_property(atmel_port, pdev); 1965 atmel_set_ops(port); 1966 1967 if (atmel_port->prepare_rx) { 1968 retval = atmel_port->prepare_rx(port); 1969 if (retval < 0) 1970 atmel_set_ops(port); 1971 } 1972 1973 if (atmel_port->prepare_tx) { 1974 retval = atmel_port->prepare_tx(port); 1975 if (retval < 0) 1976 atmel_set_ops(port); 1977 } 1978 1979 /* 1980 * Enable FIFO when available 1981 */ 1982 if (atmel_port->fifo_size) { 1983 unsigned int txrdym = ATMEL_US_ONE_DATA; 1984 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1985 unsigned int fmr; 1986 1987 atmel_uart_writel(port, ATMEL_US_CR, 1988 ATMEL_US_FIFOEN | 1989 ATMEL_US_RXFCLR | 1990 ATMEL_US_TXFLCLR); 1991 1992 if (atmel_use_dma_tx(port)) 1993 txrdym = ATMEL_US_FOUR_DATA; 1994 1995 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1996 if (atmel_port->rts_high && 1997 atmel_port->rts_low) 1998 fmr |= ATMEL_US_FRTSC | 1999 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 2000 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 2001 2002 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 2003 } 2004 2005 /* Save current CSR for comparison in atmel_tasklet_func() */ 2006 atmel_port->irq_status_prev = atmel_get_lines_status(port); 2007 2008 /* 2009 * Finally, enable the serial port 2010 */ 2011 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2012 /* enable xmit & rcvr */ 2013 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2014 atmel_port->tx_stopped = false; 2015 2016 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); 2017 2018 if (atmel_use_pdc_rx(port)) { 2019 /* set UART timeout */ 2020 if (!atmel_port->has_hw_timer) { 2021 mod_timer(&atmel_port->uart_timer, 2022 jiffies + uart_poll_timeout(port)); 2023 /* set USART timeout */ 2024 } else { 2025 atmel_uart_writel(port, atmel_port->rtor, 2026 PDC_RX_TIMEOUT); 2027 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 2028 2029 atmel_uart_writel(port, ATMEL_US_IER, 2030 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 2031 } 2032 /* enable PDC controller */ 2033 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 2034 } else if (atmel_use_dma_rx(port)) { 2035 /* set UART timeout */ 2036 if (!atmel_port->has_hw_timer) { 2037 mod_timer(&atmel_port->uart_timer, 2038 jiffies + uart_poll_timeout(port)); 2039 /* set USART timeout */ 2040 } else { 2041 atmel_uart_writel(port, atmel_port->rtor, 2042 PDC_RX_TIMEOUT); 2043 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 2044 2045 atmel_uart_writel(port, ATMEL_US_IER, 2046 ATMEL_US_TIMEOUT); 2047 } 2048 } else { 2049 /* enable receive only */ 2050 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 2051 } 2052 2053 return 0; 2054 } 2055 2056 /* 2057 * Flush any TX data submitted for DMA. Called when the TX circular 2058 * buffer is reset. 2059 */ 2060 static void atmel_flush_buffer(struct uart_port *port) 2061 { 2062 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2063 2064 if (atmel_use_pdc_tx(port)) { 2065 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 2066 atmel_port->pdc_tx.ofs = 0; 2067 } 2068 /* 2069 * in uart_flush_buffer(), the xmit circular buffer has just 2070 * been cleared, so we have to reset tx_len accordingly. 2071 */ 2072 atmel_port->tx_len = 0; 2073 } 2074 2075 /* 2076 * Disable the port 2077 */ 2078 static void atmel_shutdown(struct uart_port *port) 2079 { 2080 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2081 2082 /* Disable modem control lines interrupts */ 2083 atmel_disable_ms(port); 2084 2085 /* Disable interrupts at device level */ 2086 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2087 2088 /* Prevent spurious interrupts from scheduling the tasklet */ 2089 atomic_inc(&atmel_port->tasklet_shutdown); 2090 2091 /* 2092 * Prevent any tasklets being scheduled during 2093 * cleanup 2094 */ 2095 del_timer_sync(&atmel_port->uart_timer); 2096 2097 /* Make sure that no interrupt is on the fly */ 2098 synchronize_irq(port->irq); 2099 2100 /* 2101 * Clear out any scheduled tasklets before 2102 * we destroy the buffers 2103 */ 2104 tasklet_kill(&atmel_port->tasklet_rx); 2105 tasklet_kill(&atmel_port->tasklet_tx); 2106 2107 /* 2108 * Ensure everything is stopped and 2109 * disable port and break condition. 2110 */ 2111 atmel_stop_rx(port); 2112 atmel_stop_tx(port); 2113 2114 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 2115 2116 /* 2117 * Shut-down the DMA. 2118 */ 2119 if (atmel_port->release_rx) 2120 atmel_port->release_rx(port); 2121 if (atmel_port->release_tx) 2122 atmel_port->release_tx(port); 2123 2124 /* 2125 * Reset ring buffer pointers 2126 */ 2127 atmel_port->rx_ring.head = 0; 2128 atmel_port->rx_ring.tail = 0; 2129 2130 /* 2131 * Free the interrupts 2132 */ 2133 free_irq(port->irq, port); 2134 2135 atmel_flush_buffer(port); 2136 } 2137 2138 /* 2139 * Power / Clock management. 2140 */ 2141 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2142 unsigned int oldstate) 2143 { 2144 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2145 2146 switch (state) { 2147 case 0: 2148 /* 2149 * Enable the peripheral clock for this serial port. 2150 * This is called on uart_open() or a resume event. 2151 */ 2152 clk_prepare_enable(atmel_port->clk); 2153 2154 /* re-enable interrupts if we disabled some on suspend */ 2155 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2156 break; 2157 case 3: 2158 /* Back up the interrupt mask and disable all interrupts */ 2159 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2160 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2161 2162 /* 2163 * Disable the peripheral clock for this serial port. 2164 * This is called on uart_close() or a suspend event. 2165 */ 2166 clk_disable_unprepare(atmel_port->clk); 2167 break; 2168 default: 2169 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2170 } 2171 } 2172 2173 /* 2174 * Change the port parameters 2175 */ 2176 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2177 struct ktermios *old) 2178 { 2179 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2180 unsigned long flags; 2181 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2182 2183 /* save the current mode register */ 2184 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2185 2186 /* reset the mode, clock divisor, parity, stop bits and data size */ 2187 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2188 ATMEL_US_PAR | ATMEL_US_USMODE); 2189 2190 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2191 2192 /* byte size */ 2193 switch (termios->c_cflag & CSIZE) { 2194 case CS5: 2195 mode |= ATMEL_US_CHRL_5; 2196 break; 2197 case CS6: 2198 mode |= ATMEL_US_CHRL_6; 2199 break; 2200 case CS7: 2201 mode |= ATMEL_US_CHRL_7; 2202 break; 2203 default: 2204 mode |= ATMEL_US_CHRL_8; 2205 break; 2206 } 2207 2208 /* stop bits */ 2209 if (termios->c_cflag & CSTOPB) 2210 mode |= ATMEL_US_NBSTOP_2; 2211 2212 /* parity */ 2213 if (termios->c_cflag & PARENB) { 2214 /* Mark or Space parity */ 2215 if (termios->c_cflag & CMSPAR) { 2216 if (termios->c_cflag & PARODD) 2217 mode |= ATMEL_US_PAR_MARK; 2218 else 2219 mode |= ATMEL_US_PAR_SPACE; 2220 } else if (termios->c_cflag & PARODD) 2221 mode |= ATMEL_US_PAR_ODD; 2222 else 2223 mode |= ATMEL_US_PAR_EVEN; 2224 } else 2225 mode |= ATMEL_US_PAR_NONE; 2226 2227 spin_lock_irqsave(&port->lock, flags); 2228 2229 port->read_status_mask = ATMEL_US_OVRE; 2230 if (termios->c_iflag & INPCK) 2231 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2232 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2233 port->read_status_mask |= ATMEL_US_RXBRK; 2234 2235 if (atmel_use_pdc_rx(port)) 2236 /* need to enable error interrupts */ 2237 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2238 2239 /* 2240 * Characters to ignore 2241 */ 2242 port->ignore_status_mask = 0; 2243 if (termios->c_iflag & IGNPAR) 2244 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2245 if (termios->c_iflag & IGNBRK) { 2246 port->ignore_status_mask |= ATMEL_US_RXBRK; 2247 /* 2248 * If we're ignoring parity and break indicators, 2249 * ignore overruns too (for real raw support). 2250 */ 2251 if (termios->c_iflag & IGNPAR) 2252 port->ignore_status_mask |= ATMEL_US_OVRE; 2253 } 2254 /* TODO: Ignore all characters if CREAD is set.*/ 2255 2256 /* update the per-port timeout */ 2257 uart_update_timeout(port, termios->c_cflag, baud); 2258 2259 /* 2260 * save/disable interrupts. The tty layer will ensure that the 2261 * transmitter is empty if requested by the caller, so there's 2262 * no need to wait for it here. 2263 */ 2264 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2265 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2266 2267 /* disable receiver and transmitter */ 2268 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2269 atmel_port->tx_stopped = true; 2270 2271 /* mode */ 2272 if (port->rs485.flags & SER_RS485_ENABLED) { 2273 atmel_uart_writel(port, ATMEL_US_TTGR, 2274 port->rs485.delay_rts_after_send); 2275 mode |= ATMEL_US_USMODE_RS485; 2276 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { 2277 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); 2278 /* select mck clock, and output */ 2279 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 2280 /* set max iterations */ 2281 mode |= ATMEL_US_MAX_ITER(3); 2282 if ((port->iso7816.flags & SER_ISO7816_T_PARAM) 2283 == SER_ISO7816_T(0)) 2284 mode |= ATMEL_US_USMODE_ISO7816_T0; 2285 else 2286 mode |= ATMEL_US_USMODE_ISO7816_T1; 2287 } else if (termios->c_cflag & CRTSCTS) { 2288 /* RS232 with hardware handshake (RTS/CTS) */ 2289 if (atmel_use_fifo(port) && 2290 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2291 /* 2292 * with ATMEL_US_USMODE_HWHS set, the controller will 2293 * be able to drive the RTS pin high/low when the RX 2294 * FIFO is above RXFTHRES/below RXFTHRES2. 2295 * It will also disable the transmitter when the CTS 2296 * pin is high. 2297 * This mode is not activated if CTS pin is a GPIO 2298 * because in this case, the transmitter is always 2299 * disabled (there must be an internal pull-up 2300 * responsible for this behaviour). 2301 * If the RTS pin is a GPIO, the controller won't be 2302 * able to drive it according to the FIFO thresholds, 2303 * but it will be handled by the driver. 2304 */ 2305 mode |= ATMEL_US_USMODE_HWHS; 2306 } else { 2307 /* 2308 * For platforms without FIFO, the flow control is 2309 * handled by the driver. 2310 */ 2311 mode |= ATMEL_US_USMODE_NORMAL; 2312 } 2313 } else { 2314 /* RS232 without hadware handshake */ 2315 mode |= ATMEL_US_USMODE_NORMAL; 2316 } 2317 2318 /* set the mode, clock divisor, parity, stop bits and data size */ 2319 atmel_uart_writel(port, ATMEL_US_MR, mode); 2320 2321 /* 2322 * when switching the mode, set the RTS line state according to the 2323 * new mode, otherwise keep the former state 2324 */ 2325 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2326 unsigned int rts_state; 2327 2328 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2329 /* let the hardware control the RTS line */ 2330 rts_state = ATMEL_US_RTSDIS; 2331 } else { 2332 /* force RTS line to low level */ 2333 rts_state = ATMEL_US_RTSEN; 2334 } 2335 2336 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2337 } 2338 2339 /* 2340 * Set the baud rate: 2341 * Fractional baudrate allows to setup output frequency more 2342 * accurately. This feature is enabled only when using normal mode. 2343 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2344 * Currently, OVER is always set to 0 so we get 2345 * baudrate = selected clock / (16 * (CD + FP / 8)) 2346 * then 2347 * 8 CD + FP = selected clock / (2 * baudrate) 2348 */ 2349 if (atmel_port->has_frac_baudrate) { 2350 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2351 cd = div >> 3; 2352 fp = div & ATMEL_US_FP_MASK; 2353 } else { 2354 cd = uart_get_divisor(port, baud); 2355 } 2356 2357 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2358 cd /= 8; 2359 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2360 } 2361 quot = cd | fp << ATMEL_US_FP_OFFSET; 2362 2363 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) 2364 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2365 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2366 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2367 atmel_port->tx_stopped = false; 2368 2369 /* restore interrupts */ 2370 atmel_uart_writel(port, ATMEL_US_IER, imr); 2371 2372 /* CTS flow-control and modem-status interrupts */ 2373 if (UART_ENABLE_MS(port, termios->c_cflag)) 2374 atmel_enable_ms(port); 2375 else 2376 atmel_disable_ms(port); 2377 2378 spin_unlock_irqrestore(&port->lock, flags); 2379 } 2380 2381 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2382 { 2383 if (termios->c_line == N_PPS) { 2384 port->flags |= UPF_HARDPPS_CD; 2385 spin_lock_irq(&port->lock); 2386 atmel_enable_ms(port); 2387 spin_unlock_irq(&port->lock); 2388 } else { 2389 port->flags &= ~UPF_HARDPPS_CD; 2390 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2391 spin_lock_irq(&port->lock); 2392 atmel_disable_ms(port); 2393 spin_unlock_irq(&port->lock); 2394 } 2395 } 2396 } 2397 2398 /* 2399 * Return string describing the specified port 2400 */ 2401 static const char *atmel_type(struct uart_port *port) 2402 { 2403 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2404 } 2405 2406 /* 2407 * Release the memory region(s) being used by 'port'. 2408 */ 2409 static void atmel_release_port(struct uart_port *port) 2410 { 2411 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2412 int size = resource_size(mpdev->resource); 2413 2414 release_mem_region(port->mapbase, size); 2415 2416 if (port->flags & UPF_IOREMAP) { 2417 iounmap(port->membase); 2418 port->membase = NULL; 2419 } 2420 } 2421 2422 /* 2423 * Request the memory region(s) being used by 'port'. 2424 */ 2425 static int atmel_request_port(struct uart_port *port) 2426 { 2427 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2428 int size = resource_size(mpdev->resource); 2429 2430 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2431 return -EBUSY; 2432 2433 if (port->flags & UPF_IOREMAP) { 2434 port->membase = ioremap(port->mapbase, size); 2435 if (port->membase == NULL) { 2436 release_mem_region(port->mapbase, size); 2437 return -ENOMEM; 2438 } 2439 } 2440 2441 return 0; 2442 } 2443 2444 /* 2445 * Configure/autoconfigure the port. 2446 */ 2447 static void atmel_config_port(struct uart_port *port, int flags) 2448 { 2449 if (flags & UART_CONFIG_TYPE) { 2450 port->type = PORT_ATMEL; 2451 atmel_request_port(port); 2452 } 2453 } 2454 2455 /* 2456 * Verify the new serial_struct (for TIOCSSERIAL). 2457 */ 2458 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2459 { 2460 int ret = 0; 2461 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2462 ret = -EINVAL; 2463 if (port->irq != ser->irq) 2464 ret = -EINVAL; 2465 if (ser->io_type != SERIAL_IO_MEM) 2466 ret = -EINVAL; 2467 if (port->uartclk / 16 != ser->baud_base) 2468 ret = -EINVAL; 2469 if (port->mapbase != (unsigned long)ser->iomem_base) 2470 ret = -EINVAL; 2471 if (port->iobase != ser->port) 2472 ret = -EINVAL; 2473 if (ser->hub6 != 0) 2474 ret = -EINVAL; 2475 return ret; 2476 } 2477 2478 #ifdef CONFIG_CONSOLE_POLL 2479 static int atmel_poll_get_char(struct uart_port *port) 2480 { 2481 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2482 cpu_relax(); 2483 2484 return atmel_uart_read_char(port); 2485 } 2486 2487 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2488 { 2489 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2490 cpu_relax(); 2491 2492 atmel_uart_write_char(port, ch); 2493 } 2494 #endif 2495 2496 static const struct uart_ops atmel_pops = { 2497 .tx_empty = atmel_tx_empty, 2498 .set_mctrl = atmel_set_mctrl, 2499 .get_mctrl = atmel_get_mctrl, 2500 .stop_tx = atmel_stop_tx, 2501 .start_tx = atmel_start_tx, 2502 .stop_rx = atmel_stop_rx, 2503 .enable_ms = atmel_enable_ms, 2504 .break_ctl = atmel_break_ctl, 2505 .startup = atmel_startup, 2506 .shutdown = atmel_shutdown, 2507 .flush_buffer = atmel_flush_buffer, 2508 .set_termios = atmel_set_termios, 2509 .set_ldisc = atmel_set_ldisc, 2510 .type = atmel_type, 2511 .release_port = atmel_release_port, 2512 .request_port = atmel_request_port, 2513 .config_port = atmel_config_port, 2514 .verify_port = atmel_verify_port, 2515 .pm = atmel_serial_pm, 2516 #ifdef CONFIG_CONSOLE_POLL 2517 .poll_get_char = atmel_poll_get_char, 2518 .poll_put_char = atmel_poll_put_char, 2519 #endif 2520 }; 2521 2522 /* 2523 * Configure the port from the platform device resource info. 2524 */ 2525 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2526 struct platform_device *pdev) 2527 { 2528 int ret; 2529 struct uart_port *port = &atmel_port->uart; 2530 struct platform_device *mpdev = to_platform_device(pdev->dev.parent); 2531 2532 atmel_init_property(atmel_port, pdev); 2533 atmel_set_ops(port); 2534 2535 uart_get_rs485_mode(&mpdev->dev, &port->rs485); 2536 2537 port->iotype = UPIO_MEM; 2538 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; 2539 port->ops = &atmel_pops; 2540 port->fifosize = 1; 2541 port->dev = &pdev->dev; 2542 port->mapbase = mpdev->resource[0].start; 2543 port->irq = mpdev->resource[1].start; 2544 port->rs485_config = atmel_config_rs485; 2545 port->iso7816_config = atmel_config_iso7816; 2546 port->membase = NULL; 2547 2548 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2549 2550 /* for console, the clock could already be configured */ 2551 if (!atmel_port->clk) { 2552 atmel_port->clk = clk_get(&mpdev->dev, "usart"); 2553 if (IS_ERR(atmel_port->clk)) { 2554 ret = PTR_ERR(atmel_port->clk); 2555 atmel_port->clk = NULL; 2556 return ret; 2557 } 2558 ret = clk_prepare_enable(atmel_port->clk); 2559 if (ret) { 2560 clk_put(atmel_port->clk); 2561 atmel_port->clk = NULL; 2562 return ret; 2563 } 2564 port->uartclk = clk_get_rate(atmel_port->clk); 2565 clk_disable_unprepare(atmel_port->clk); 2566 /* only enable clock when USART is in use */ 2567 } 2568 2569 /* 2570 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or 2571 * ENDTX|TXBUFE 2572 */ 2573 if (port->rs485.flags & SER_RS485_ENABLED || 2574 port->iso7816.flags & SER_ISO7816_ENABLED) 2575 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2576 else if (atmel_use_pdc_tx(port)) { 2577 port->fifosize = PDC_BUFFER_SIZE; 2578 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2579 } else { 2580 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2581 } 2582 2583 return 0; 2584 } 2585 2586 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2587 static void atmel_console_putchar(struct uart_port *port, int ch) 2588 { 2589 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2590 cpu_relax(); 2591 atmel_uart_write_char(port, ch); 2592 } 2593 2594 /* 2595 * Interrupts are disabled on entering 2596 */ 2597 static void atmel_console_write(struct console *co, const char *s, u_int count) 2598 { 2599 struct uart_port *port = &atmel_ports[co->index].uart; 2600 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2601 unsigned int status, imr; 2602 unsigned int pdc_tx; 2603 2604 /* 2605 * First, save IMR and then disable interrupts 2606 */ 2607 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2608 atmel_uart_writel(port, ATMEL_US_IDR, 2609 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2610 2611 /* Store PDC transmit status and disable it */ 2612 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2613 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2614 2615 /* Make sure that tx path is actually able to send characters */ 2616 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2617 atmel_port->tx_stopped = false; 2618 2619 uart_console_write(port, s, count, atmel_console_putchar); 2620 2621 /* 2622 * Finally, wait for transmitter to become empty 2623 * and restore IMR 2624 */ 2625 do { 2626 status = atmel_uart_readl(port, ATMEL_US_CSR); 2627 } while (!(status & ATMEL_US_TXRDY)); 2628 2629 /* Restore PDC transmit status */ 2630 if (pdc_tx) 2631 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2632 2633 /* set interrupts back the way they were */ 2634 atmel_uart_writel(port, ATMEL_US_IER, imr); 2635 } 2636 2637 /* 2638 * If the port was already initialised (eg, by a boot loader), 2639 * try to determine the current setup. 2640 */ 2641 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2642 int *parity, int *bits) 2643 { 2644 unsigned int mr, quot; 2645 2646 /* 2647 * If the baud rate generator isn't running, the port wasn't 2648 * initialized by the boot loader. 2649 */ 2650 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2651 if (!quot) 2652 return; 2653 2654 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2655 if (mr == ATMEL_US_CHRL_8) 2656 *bits = 8; 2657 else 2658 *bits = 7; 2659 2660 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2661 if (mr == ATMEL_US_PAR_EVEN) 2662 *parity = 'e'; 2663 else if (mr == ATMEL_US_PAR_ODD) 2664 *parity = 'o'; 2665 2666 /* 2667 * The serial core only rounds down when matching this to a 2668 * supported baud rate. Make sure we don't end up slightly 2669 * lower than one of those, as it would make us fall through 2670 * to a much lower baud rate than we really want. 2671 */ 2672 *baud = port->uartclk / (16 * (quot - 1)); 2673 } 2674 2675 static int __init atmel_console_setup(struct console *co, char *options) 2676 { 2677 int ret; 2678 struct uart_port *port = &atmel_ports[co->index].uart; 2679 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2680 int baud = 115200; 2681 int bits = 8; 2682 int parity = 'n'; 2683 int flow = 'n'; 2684 2685 if (port->membase == NULL) { 2686 /* Port not initialized yet - delay setup */ 2687 return -ENODEV; 2688 } 2689 2690 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2691 if (ret) 2692 return ret; 2693 2694 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2695 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2696 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2697 atmel_port->tx_stopped = false; 2698 2699 if (options) 2700 uart_parse_options(options, &baud, &parity, &bits, &flow); 2701 else 2702 atmel_console_get_options(port, &baud, &parity, &bits); 2703 2704 return uart_set_options(port, co, baud, parity, bits, flow); 2705 } 2706 2707 static struct uart_driver atmel_uart; 2708 2709 static struct console atmel_console = { 2710 .name = ATMEL_DEVICENAME, 2711 .write = atmel_console_write, 2712 .device = uart_console_device, 2713 .setup = atmel_console_setup, 2714 .flags = CON_PRINTBUFFER, 2715 .index = -1, 2716 .data = &atmel_uart, 2717 }; 2718 2719 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2720 2721 static inline bool atmel_is_console_port(struct uart_port *port) 2722 { 2723 return port->cons && port->cons->index == port->line; 2724 } 2725 2726 #else 2727 #define ATMEL_CONSOLE_DEVICE NULL 2728 2729 static inline bool atmel_is_console_port(struct uart_port *port) 2730 { 2731 return false; 2732 } 2733 #endif 2734 2735 static struct uart_driver atmel_uart = { 2736 .owner = THIS_MODULE, 2737 .driver_name = "atmel_serial", 2738 .dev_name = ATMEL_DEVICENAME, 2739 .major = SERIAL_ATMEL_MAJOR, 2740 .minor = MINOR_START, 2741 .nr = ATMEL_MAX_UART, 2742 .cons = ATMEL_CONSOLE_DEVICE, 2743 }; 2744 2745 #ifdef CONFIG_PM 2746 static bool atmel_serial_clk_will_stop(void) 2747 { 2748 #ifdef CONFIG_ARCH_AT91 2749 return at91_suspend_entering_slow_clock(); 2750 #else 2751 return false; 2752 #endif 2753 } 2754 2755 static int atmel_serial_suspend(struct platform_device *pdev, 2756 pm_message_t state) 2757 { 2758 struct uart_port *port = platform_get_drvdata(pdev); 2759 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2760 2761 if (atmel_is_console_port(port) && console_suspend_enabled) { 2762 /* Drain the TX shifter */ 2763 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2764 ATMEL_US_TXEMPTY)) 2765 cpu_relax(); 2766 } 2767 2768 if (atmel_is_console_port(port) && !console_suspend_enabled) { 2769 /* Cache register values as we won't get a full shutdown/startup 2770 * cycle 2771 */ 2772 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); 2773 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); 2774 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 2775 atmel_port->cache.rtor = atmel_uart_readl(port, 2776 atmel_port->rtor); 2777 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); 2778 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); 2779 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); 2780 } 2781 2782 /* we can not wake up if we're running on slow clock */ 2783 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2784 if (atmel_serial_clk_will_stop()) { 2785 unsigned long flags; 2786 2787 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2788 atmel_port->suspended = true; 2789 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2790 device_set_wakeup_enable(&pdev->dev, 0); 2791 } 2792 2793 uart_suspend_port(&atmel_uart, port); 2794 2795 return 0; 2796 } 2797 2798 static int atmel_serial_resume(struct platform_device *pdev) 2799 { 2800 struct uart_port *port = platform_get_drvdata(pdev); 2801 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2802 unsigned long flags; 2803 2804 if (atmel_is_console_port(port) && !console_suspend_enabled) { 2805 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); 2806 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); 2807 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); 2808 atmel_uart_writel(port, atmel_port->rtor, 2809 atmel_port->cache.rtor); 2810 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); 2811 2812 if (atmel_port->fifo_size) { 2813 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | 2814 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); 2815 atmel_uart_writel(port, ATMEL_US_FMR, 2816 atmel_port->cache.fmr); 2817 atmel_uart_writel(port, ATMEL_US_FIER, 2818 atmel_port->cache.fimr); 2819 } 2820 atmel_start_rx(port); 2821 } 2822 2823 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2824 if (atmel_port->pending) { 2825 atmel_handle_receive(port, atmel_port->pending); 2826 atmel_handle_status(port, atmel_port->pending, 2827 atmel_port->pending_status); 2828 atmel_handle_transmit(port, atmel_port->pending); 2829 atmel_port->pending = 0; 2830 } 2831 atmel_port->suspended = false; 2832 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2833 2834 uart_resume_port(&atmel_uart, port); 2835 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2836 2837 return 0; 2838 } 2839 #else 2840 #define atmel_serial_suspend NULL 2841 #define atmel_serial_resume NULL 2842 #endif 2843 2844 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2845 struct platform_device *pdev) 2846 { 2847 atmel_port->fifo_size = 0; 2848 atmel_port->rts_low = 0; 2849 atmel_port->rts_high = 0; 2850 2851 if (of_property_read_u32(pdev->dev.of_node, 2852 "atmel,fifo-size", 2853 &atmel_port->fifo_size)) 2854 return; 2855 2856 if (!atmel_port->fifo_size) 2857 return; 2858 2859 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2860 atmel_port->fifo_size = 0; 2861 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2862 return; 2863 } 2864 2865 /* 2866 * 0 <= rts_low <= rts_high <= fifo_size 2867 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2868 * to flush their internal TX FIFO, commonly up to 16 data, before 2869 * actually stopping to send new data. So we try to set the RTS High 2870 * Threshold to a reasonably high value respecting this 16 data 2871 * empirical rule when possible. 2872 */ 2873 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2874 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2875 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2876 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2877 2878 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2879 atmel_port->fifo_size); 2880 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2881 atmel_port->rts_high); 2882 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2883 atmel_port->rts_low); 2884 } 2885 2886 static int atmel_serial_probe(struct platform_device *pdev) 2887 { 2888 struct atmel_uart_port *atmel_port; 2889 struct device_node *np = pdev->dev.parent->of_node; 2890 void *data; 2891 int ret = -ENODEV; 2892 bool rs485_enabled; 2893 2894 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2895 2896 /* 2897 * In device tree there is no node with "atmel,at91rm9200-usart-serial" 2898 * as compatible string. This driver is probed by at91-usart mfd driver 2899 * which is just a wrapper over the atmel_serial driver and 2900 * spi-at91-usart driver. All attributes needed by this driver are 2901 * found in of_node of parent. 2902 */ 2903 pdev->dev.of_node = np; 2904 2905 ret = of_alias_get_id(np, "serial"); 2906 if (ret < 0) 2907 /* port id not found in platform data nor device-tree aliases: 2908 * auto-enumerate it */ 2909 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2910 2911 if (ret >= ATMEL_MAX_UART) { 2912 ret = -ENODEV; 2913 goto err; 2914 } 2915 2916 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2917 /* port already in use */ 2918 ret = -EBUSY; 2919 goto err; 2920 } 2921 2922 atmel_port = &atmel_ports[ret]; 2923 atmel_port->backup_imr = 0; 2924 atmel_port->uart.line = ret; 2925 atmel_serial_probe_fifos(atmel_port, pdev); 2926 2927 atomic_set(&atmel_port->tasklet_shutdown, 0); 2928 spin_lock_init(&atmel_port->lock_suspended); 2929 2930 ret = atmel_init_port(atmel_port, pdev); 2931 if (ret) 2932 goto err_clear_bit; 2933 2934 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2935 if (IS_ERR(atmel_port->gpios)) { 2936 ret = PTR_ERR(atmel_port->gpios); 2937 goto err_clear_bit; 2938 } 2939 2940 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2941 ret = -ENOMEM; 2942 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, 2943 sizeof(struct atmel_uart_char), 2944 GFP_KERNEL); 2945 if (!data) 2946 goto err_alloc_ring; 2947 atmel_port->rx_ring.buf = data; 2948 } 2949 2950 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2951 2952 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2953 if (ret) 2954 goto err_add_port; 2955 2956 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2957 if (atmel_is_console_port(&atmel_port->uart) 2958 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2959 /* 2960 * The serial core enabled the clock for us, so undo 2961 * the clk_prepare_enable() in atmel_console_setup() 2962 */ 2963 clk_disable_unprepare(atmel_port->clk); 2964 } 2965 #endif 2966 2967 device_init_wakeup(&pdev->dev, 1); 2968 platform_set_drvdata(pdev, atmel_port); 2969 2970 /* 2971 * The peripheral clock has been disabled by atmel_init_port(): 2972 * enable it before accessing I/O registers 2973 */ 2974 clk_prepare_enable(atmel_port->clk); 2975 2976 if (rs485_enabled) { 2977 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2978 ATMEL_US_USMODE_NORMAL); 2979 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2980 ATMEL_US_RTSEN); 2981 } 2982 2983 /* 2984 * Get port name of usart or uart 2985 */ 2986 atmel_get_ip_name(&atmel_port->uart); 2987 2988 /* 2989 * The peripheral clock can now safely be disabled till the port 2990 * is used 2991 */ 2992 clk_disable_unprepare(atmel_port->clk); 2993 2994 return 0; 2995 2996 err_add_port: 2997 kfree(atmel_port->rx_ring.buf); 2998 atmel_port->rx_ring.buf = NULL; 2999 err_alloc_ring: 3000 if (!atmel_is_console_port(&atmel_port->uart)) { 3001 clk_put(atmel_port->clk); 3002 atmel_port->clk = NULL; 3003 } 3004 err_clear_bit: 3005 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 3006 err: 3007 return ret; 3008 } 3009 3010 /* 3011 * Even if the driver is not modular, it makes sense to be able to 3012 * unbind a device: there can be many bound devices, and there are 3013 * situations where dynamic binding and unbinding can be useful. 3014 * 3015 * For example, a connected device can require a specific firmware update 3016 * protocol that needs bitbanging on IO lines, but use the regular serial 3017 * port in the normal case. 3018 */ 3019 static int atmel_serial_remove(struct platform_device *pdev) 3020 { 3021 struct uart_port *port = platform_get_drvdata(pdev); 3022 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 3023 int ret = 0; 3024 3025 tasklet_kill(&atmel_port->tasklet_rx); 3026 tasklet_kill(&atmel_port->tasklet_tx); 3027 3028 device_init_wakeup(&pdev->dev, 0); 3029 3030 ret = uart_remove_one_port(&atmel_uart, port); 3031 3032 kfree(atmel_port->rx_ring.buf); 3033 3034 /* "port" is allocated statically, so we shouldn't free it */ 3035 3036 clear_bit(port->line, atmel_ports_in_use); 3037 3038 clk_put(atmel_port->clk); 3039 atmel_port->clk = NULL; 3040 pdev->dev.of_node = NULL; 3041 3042 return ret; 3043 } 3044 3045 static struct platform_driver atmel_serial_driver = { 3046 .probe = atmel_serial_probe, 3047 .remove = atmel_serial_remove, 3048 .suspend = atmel_serial_suspend, 3049 .resume = atmel_serial_resume, 3050 .driver = { 3051 .name = "atmel_usart_serial", 3052 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 3053 }, 3054 }; 3055 3056 static int __init atmel_serial_init(void) 3057 { 3058 int ret; 3059 3060 ret = uart_register_driver(&atmel_uart); 3061 if (ret) 3062 return ret; 3063 3064 ret = platform_driver_register(&atmel_serial_driver); 3065 if (ret) 3066 uart_unregister_driver(&atmel_uart); 3067 3068 return ret; 3069 } 3070 device_initcall(atmel_serial_init); 3071