1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Atmel AT91 Serial ports 4 * Copyright (C) 2003 Rick Bronson 5 * 6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 8 * 9 * DMA support added by Chip Coldwell. 10 */ 11 #include <linux/circ_buf.h> 12 #include <linux/tty.h> 13 #include <linux/ioport.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/serial.h> 17 #include <linux/clk.h> 18 #include <linux/console.h> 19 #include <linux/sysrq.h> 20 #include <linux/tty_flip.h> 21 #include <linux/platform_device.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmaengine.h> 26 #include <linux/atmel_pdc.h> 27 #include <linux/uaccess.h> 28 #include <linux/platform_data/atmel.h> 29 #include <linux/timer.h> 30 #include <linux/err.h> 31 #include <linux/irq.h> 32 #include <linux/suspend.h> 33 #include <linux/mm.h> 34 #include <linux/io.h> 35 36 #include <asm/div64.h> 37 #include <asm/ioctls.h> 38 39 #define PDC_BUFFER_SIZE 512 40 /* Revisit: We should calculate this based on the actual port settings */ 41 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 42 43 /* The minium number of data FIFOs should be able to contain */ 44 #define ATMEL_MIN_FIFO_SIZE 8 45 /* 46 * These two offsets are substracted from the RX FIFO size to define the RTS 47 * high and low thresholds 48 */ 49 #define ATMEL_RTS_HIGH_OFFSET 16 50 #define ATMEL_RTS_LOW_OFFSET 20 51 52 #include <linux/serial_core.h> 53 54 #include "serial_mctrl_gpio.h" 55 #include "atmel_serial.h" 56 57 static void atmel_start_rx(struct uart_port *port); 58 static void atmel_stop_rx(struct uart_port *port); 59 60 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 61 62 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 63 * should coexist with the 8250 driver, such as if we have an external 16C550 64 * UART. */ 65 #define SERIAL_ATMEL_MAJOR 204 66 #define MINOR_START 154 67 #define ATMEL_DEVICENAME "ttyAT" 68 69 #else 70 71 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 72 * name, but it is legally reserved for the 8250 driver. */ 73 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 74 #define MINOR_START 64 75 #define ATMEL_DEVICENAME "ttyS" 76 77 #endif 78 79 #define ATMEL_ISR_PASS_LIMIT 256 80 81 struct atmel_dma_buffer { 82 unsigned char *buf; 83 dma_addr_t dma_addr; 84 unsigned int dma_size; 85 unsigned int ofs; 86 }; 87 88 struct atmel_uart_char { 89 u16 status; 90 u16 ch; 91 }; 92 93 /* 94 * Be careful, the real size of the ring buffer is 95 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 96 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 97 * DMA mode. 98 */ 99 #define ATMEL_SERIAL_RINGSIZE 1024 100 101 /* 102 * at91: 6 USARTs and one DBGU port (SAM9260) 103 * samx7: 3 USARTs and 5 UARTs 104 */ 105 #define ATMEL_MAX_UART 8 106 107 /* 108 * We wrap our port structure around the generic uart_port. 109 */ 110 struct atmel_uart_port { 111 struct uart_port uart; /* uart */ 112 struct clk *clk; /* uart clock */ 113 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 114 u32 backup_imr; /* IMR saved during suspend */ 115 int break_active; /* break being received */ 116 117 bool use_dma_rx; /* enable DMA receiver */ 118 bool use_pdc_rx; /* enable PDC receiver */ 119 short pdc_rx_idx; /* current PDC RX buffer */ 120 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 121 122 bool use_dma_tx; /* enable DMA transmitter */ 123 bool use_pdc_tx; /* enable PDC transmitter */ 124 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 125 126 spinlock_t lock_tx; /* port lock */ 127 spinlock_t lock_rx; /* port lock */ 128 struct dma_chan *chan_tx; 129 struct dma_chan *chan_rx; 130 struct dma_async_tx_descriptor *desc_tx; 131 struct dma_async_tx_descriptor *desc_rx; 132 dma_cookie_t cookie_tx; 133 dma_cookie_t cookie_rx; 134 struct scatterlist sg_tx; 135 struct scatterlist sg_rx; 136 struct tasklet_struct tasklet_rx; 137 struct tasklet_struct tasklet_tx; 138 atomic_t tasklet_shutdown; 139 unsigned int irq_status_prev; 140 unsigned int tx_len; 141 142 struct circ_buf rx_ring; 143 144 struct mctrl_gpios *gpios; 145 u32 backup_mode; /* MR saved during iso7816 operations */ 146 u32 backup_brgr; /* BRGR saved during iso7816 operations */ 147 unsigned int tx_done_mask; 148 u32 fifo_size; 149 u32 rts_high; 150 u32 rts_low; 151 bool ms_irq_enabled; 152 u32 rtor; /* address of receiver timeout register if it exists */ 153 bool has_frac_baudrate; 154 bool has_hw_timer; 155 struct timer_list uart_timer; 156 157 bool tx_stopped; 158 bool suspended; 159 unsigned int pending; 160 unsigned int pending_status; 161 spinlock_t lock_suspended; 162 163 bool hd_start_rx; /* can start RX during half-duplex operation */ 164 165 /* ISO7816 */ 166 unsigned int fidi_min; 167 unsigned int fidi_max; 168 169 struct { 170 u32 cr; 171 u32 mr; 172 u32 imr; 173 u32 brgr; 174 u32 rtor; 175 u32 ttgr; 176 u32 fmr; 177 u32 fimr; 178 } cache; 179 180 int (*prepare_rx)(struct uart_port *port); 181 int (*prepare_tx)(struct uart_port *port); 182 void (*schedule_rx)(struct uart_port *port); 183 void (*schedule_tx)(struct uart_port *port); 184 void (*release_rx)(struct uart_port *port); 185 void (*release_tx)(struct uart_port *port); 186 }; 187 188 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 189 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 190 191 #if defined(CONFIG_OF) 192 static const struct of_device_id atmel_serial_dt_ids[] = { 193 { .compatible = "atmel,at91rm9200-usart-serial" }, 194 { /* sentinel */ } 195 }; 196 #endif 197 198 static inline struct atmel_uart_port * 199 to_atmel_uart_port(struct uart_port *uart) 200 { 201 return container_of(uart, struct atmel_uart_port, uart); 202 } 203 204 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 205 { 206 return __raw_readl(port->membase + reg); 207 } 208 209 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 210 { 211 __raw_writel(value, port->membase + reg); 212 } 213 214 static inline u8 atmel_uart_read_char(struct uart_port *port) 215 { 216 return __raw_readb(port->membase + ATMEL_US_RHR); 217 } 218 219 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 220 { 221 __raw_writeb(value, port->membase + ATMEL_US_THR); 222 } 223 224 static inline int atmel_uart_is_half_duplex(struct uart_port *port) 225 { 226 return ((port->rs485.flags & SER_RS485_ENABLED) && 227 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 228 (port->iso7816.flags & SER_ISO7816_ENABLED); 229 } 230 231 #ifdef CONFIG_SERIAL_ATMEL_PDC 232 static bool atmel_use_pdc_rx(struct uart_port *port) 233 { 234 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 235 236 return atmel_port->use_pdc_rx; 237 } 238 239 static bool atmel_use_pdc_tx(struct uart_port *port) 240 { 241 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 242 243 return atmel_port->use_pdc_tx; 244 } 245 #else 246 static bool atmel_use_pdc_rx(struct uart_port *port) 247 { 248 return false; 249 } 250 251 static bool atmel_use_pdc_tx(struct uart_port *port) 252 { 253 return false; 254 } 255 #endif 256 257 static bool atmel_use_dma_tx(struct uart_port *port) 258 { 259 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 260 261 return atmel_port->use_dma_tx; 262 } 263 264 static bool atmel_use_dma_rx(struct uart_port *port) 265 { 266 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 267 268 return atmel_port->use_dma_rx; 269 } 270 271 static bool atmel_use_fifo(struct uart_port *port) 272 { 273 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 274 275 return atmel_port->fifo_size; 276 } 277 278 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 279 struct tasklet_struct *t) 280 { 281 if (!atomic_read(&atmel_port->tasklet_shutdown)) 282 tasklet_schedule(t); 283 } 284 285 /* Enable or disable the rs485 support */ 286 static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios, 287 struct serial_rs485 *rs485conf) 288 { 289 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 290 unsigned int mode; 291 292 /* Disable interrupts */ 293 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 294 295 mode = atmel_uart_readl(port, ATMEL_US_MR); 296 297 if (rs485conf->flags & SER_RS485_ENABLED) { 298 dev_dbg(port->dev, "Setting UART to RS485\n"); 299 if (rs485conf->flags & SER_RS485_RX_DURING_TX) 300 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 301 else 302 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 303 304 atmel_uart_writel(port, ATMEL_US_TTGR, 305 rs485conf->delay_rts_after_send); 306 mode &= ~ATMEL_US_USMODE; 307 mode |= ATMEL_US_USMODE_RS485; 308 } else { 309 dev_dbg(port->dev, "Setting UART to RS232\n"); 310 if (atmel_use_pdc_tx(port)) 311 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 312 ATMEL_US_TXBUFE; 313 else 314 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 315 } 316 atmel_uart_writel(port, ATMEL_US_MR, mode); 317 318 /* Enable interrupts */ 319 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 320 321 return 0; 322 } 323 324 static unsigned int atmel_calc_cd(struct uart_port *port, 325 struct serial_iso7816 *iso7816conf) 326 { 327 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 328 unsigned int cd; 329 u64 mck_rate; 330 331 mck_rate = (u64)clk_get_rate(atmel_port->clk); 332 do_div(mck_rate, iso7816conf->clk); 333 cd = mck_rate; 334 return cd; 335 } 336 337 static unsigned int atmel_calc_fidi(struct uart_port *port, 338 struct serial_iso7816 *iso7816conf) 339 { 340 u64 fidi = 0; 341 342 if (iso7816conf->sc_fi && iso7816conf->sc_di) { 343 fidi = (u64)iso7816conf->sc_fi; 344 do_div(fidi, iso7816conf->sc_di); 345 } 346 return (u32)fidi; 347 } 348 349 /* Enable or disable the iso7816 support */ 350 /* Called with interrupts disabled */ 351 static int atmel_config_iso7816(struct uart_port *port, 352 struct serial_iso7816 *iso7816conf) 353 { 354 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 355 unsigned int mode; 356 unsigned int cd, fidi; 357 int ret = 0; 358 359 /* Disable interrupts */ 360 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 361 362 mode = atmel_uart_readl(port, ATMEL_US_MR); 363 364 if (iso7816conf->flags & SER_ISO7816_ENABLED) { 365 mode &= ~ATMEL_US_USMODE; 366 367 if (iso7816conf->tg > 255) { 368 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); 369 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 370 ret = -EINVAL; 371 goto err_out; 372 } 373 374 if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 375 == SER_ISO7816_T(0)) { 376 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; 377 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 378 == SER_ISO7816_T(1)) { 379 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; 380 } else { 381 dev_err(port->dev, "ISO7816: Type not supported\n"); 382 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 383 ret = -EINVAL; 384 goto err_out; 385 } 386 387 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); 388 389 /* select mck clock, and output */ 390 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 391 /* set parity for normal/inverse mode + max iterations */ 392 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); 393 394 cd = atmel_calc_cd(port, iso7816conf); 395 fidi = atmel_calc_fidi(port, iso7816conf); 396 if (fidi == 0) { 397 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); 398 } else if (fidi < atmel_port->fidi_min 399 || fidi > atmel_port->fidi_max) { 400 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); 401 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 402 ret = -EINVAL; 403 goto err_out; 404 } 405 406 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { 407 /* port not yet in iso7816 mode: store configuration */ 408 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); 409 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 410 } 411 412 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); 413 atmel_uart_writel(port, ATMEL_US_BRGR, cd); 414 atmel_uart_writel(port, ATMEL_US_FIDI, fidi); 415 416 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); 417 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; 418 } else { 419 dev_dbg(port->dev, "Setting UART back to RS232\n"); 420 /* back to last RS232 settings */ 421 mode = atmel_port->backup_mode; 422 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 423 atmel_uart_writel(port, ATMEL_US_TTGR, 0); 424 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); 425 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); 426 427 if (atmel_use_pdc_tx(port)) 428 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 429 ATMEL_US_TXBUFE; 430 else 431 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 432 } 433 434 port->iso7816 = *iso7816conf; 435 436 atmel_uart_writel(port, ATMEL_US_MR, mode); 437 438 err_out: 439 /* Enable interrupts */ 440 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 441 442 return ret; 443 } 444 445 /* 446 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 447 */ 448 static u_int atmel_tx_empty(struct uart_port *port) 449 { 450 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 451 452 if (atmel_port->tx_stopped) 453 return TIOCSER_TEMT; 454 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 455 TIOCSER_TEMT : 456 0; 457 } 458 459 /* 460 * Set state of the modem control output lines 461 */ 462 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 463 { 464 unsigned int control = 0; 465 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 466 unsigned int rts_paused, rts_ready; 467 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 468 469 /* override mode to RS485 if needed, otherwise keep the current mode */ 470 if (port->rs485.flags & SER_RS485_ENABLED) { 471 atmel_uart_writel(port, ATMEL_US_TTGR, 472 port->rs485.delay_rts_after_send); 473 mode &= ~ATMEL_US_USMODE; 474 mode |= ATMEL_US_USMODE_RS485; 475 } 476 477 /* set the RTS line state according to the mode */ 478 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 479 /* force RTS line to high level */ 480 rts_paused = ATMEL_US_RTSEN; 481 482 /* give the control of the RTS line back to the hardware */ 483 rts_ready = ATMEL_US_RTSDIS; 484 } else { 485 /* force RTS line to high level */ 486 rts_paused = ATMEL_US_RTSDIS; 487 488 /* force RTS line to low level */ 489 rts_ready = ATMEL_US_RTSEN; 490 } 491 492 if (mctrl & TIOCM_RTS) 493 control |= rts_ready; 494 else 495 control |= rts_paused; 496 497 if (mctrl & TIOCM_DTR) 498 control |= ATMEL_US_DTREN; 499 else 500 control |= ATMEL_US_DTRDIS; 501 502 atmel_uart_writel(port, ATMEL_US_CR, control); 503 504 mctrl_gpio_set(atmel_port->gpios, mctrl); 505 506 /* Local loopback mode? */ 507 mode &= ~ATMEL_US_CHMODE; 508 if (mctrl & TIOCM_LOOP) 509 mode |= ATMEL_US_CHMODE_LOC_LOOP; 510 else 511 mode |= ATMEL_US_CHMODE_NORMAL; 512 513 atmel_uart_writel(port, ATMEL_US_MR, mode); 514 } 515 516 /* 517 * Get state of the modem control input lines 518 */ 519 static u_int atmel_get_mctrl(struct uart_port *port) 520 { 521 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 522 unsigned int ret = 0, status; 523 524 status = atmel_uart_readl(port, ATMEL_US_CSR); 525 526 /* 527 * The control signals are active low. 528 */ 529 if (!(status & ATMEL_US_DCD)) 530 ret |= TIOCM_CD; 531 if (!(status & ATMEL_US_CTS)) 532 ret |= TIOCM_CTS; 533 if (!(status & ATMEL_US_DSR)) 534 ret |= TIOCM_DSR; 535 if (!(status & ATMEL_US_RI)) 536 ret |= TIOCM_RI; 537 538 return mctrl_gpio_get(atmel_port->gpios, &ret); 539 } 540 541 /* 542 * Stop transmitting. 543 */ 544 static void atmel_stop_tx(struct uart_port *port) 545 { 546 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 547 548 if (atmel_use_pdc_tx(port)) { 549 /* disable PDC transmit */ 550 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 551 } 552 553 /* 554 * Disable the transmitter. 555 * This is mandatory when DMA is used, otherwise the DMA buffer 556 * is fully transmitted. 557 */ 558 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 559 atmel_port->tx_stopped = true; 560 561 /* Disable interrupts */ 562 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 563 564 if (atmel_uart_is_half_duplex(port)) 565 if (!atomic_read(&atmel_port->tasklet_shutdown)) 566 atmel_start_rx(port); 567 568 } 569 570 /* 571 * Start transmitting. 572 */ 573 static void atmel_start_tx(struct uart_port *port) 574 { 575 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 576 577 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 578 & ATMEL_PDC_TXTEN)) 579 /* The transmitter is already running. Yes, we 580 really need this.*/ 581 return; 582 583 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 584 if (atmel_uart_is_half_duplex(port)) 585 atmel_stop_rx(port); 586 587 if (atmel_use_pdc_tx(port)) 588 /* re-enable PDC transmit */ 589 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 590 591 /* Enable interrupts */ 592 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 593 594 /* re-enable the transmitter */ 595 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 596 atmel_port->tx_stopped = false; 597 } 598 599 /* 600 * start receiving - port is in process of being opened. 601 */ 602 static void atmel_start_rx(struct uart_port *port) 603 { 604 /* reset status and receiver */ 605 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 606 607 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 608 609 if (atmel_use_pdc_rx(port)) { 610 /* enable PDC controller */ 611 atmel_uart_writel(port, ATMEL_US_IER, 612 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 613 port->read_status_mask); 614 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 615 } else { 616 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 617 } 618 } 619 620 /* 621 * Stop receiving - port is in process of being closed. 622 */ 623 static void atmel_stop_rx(struct uart_port *port) 624 { 625 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 626 627 if (atmel_use_pdc_rx(port)) { 628 /* disable PDC receive */ 629 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 630 atmel_uart_writel(port, ATMEL_US_IDR, 631 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 632 port->read_status_mask); 633 } else { 634 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 635 } 636 } 637 638 /* 639 * Enable modem status interrupts 640 */ 641 static void atmel_enable_ms(struct uart_port *port) 642 { 643 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 644 uint32_t ier = 0; 645 646 /* 647 * Interrupt should not be enabled twice 648 */ 649 if (atmel_port->ms_irq_enabled) 650 return; 651 652 atmel_port->ms_irq_enabled = true; 653 654 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 655 ier |= ATMEL_US_CTSIC; 656 657 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 658 ier |= ATMEL_US_DSRIC; 659 660 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 661 ier |= ATMEL_US_RIIC; 662 663 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 664 ier |= ATMEL_US_DCDIC; 665 666 atmel_uart_writel(port, ATMEL_US_IER, ier); 667 668 mctrl_gpio_enable_ms(atmel_port->gpios); 669 } 670 671 /* 672 * Disable modem status interrupts 673 */ 674 static void atmel_disable_ms(struct uart_port *port) 675 { 676 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 677 uint32_t idr = 0; 678 679 /* 680 * Interrupt should not be disabled twice 681 */ 682 if (!atmel_port->ms_irq_enabled) 683 return; 684 685 atmel_port->ms_irq_enabled = false; 686 687 mctrl_gpio_disable_ms(atmel_port->gpios); 688 689 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 690 idr |= ATMEL_US_CTSIC; 691 692 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 693 idr |= ATMEL_US_DSRIC; 694 695 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 696 idr |= ATMEL_US_RIIC; 697 698 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 699 idr |= ATMEL_US_DCDIC; 700 701 atmel_uart_writel(port, ATMEL_US_IDR, idr); 702 } 703 704 /* 705 * Control the transmission of a break signal 706 */ 707 static void atmel_break_ctl(struct uart_port *port, int break_state) 708 { 709 if (break_state != 0) 710 /* start break */ 711 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 712 else 713 /* stop break */ 714 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 715 } 716 717 /* 718 * Stores the incoming character in the ring buffer 719 */ 720 static void 721 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 722 unsigned int ch) 723 { 724 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 725 struct circ_buf *ring = &atmel_port->rx_ring; 726 struct atmel_uart_char *c; 727 728 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 729 /* Buffer overflow, ignore char */ 730 return; 731 732 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 733 c->status = status; 734 c->ch = ch; 735 736 /* Make sure the character is stored before we update head. */ 737 smp_wmb(); 738 739 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 740 } 741 742 /* 743 * Deal with parity, framing and overrun errors. 744 */ 745 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 746 { 747 /* clear error */ 748 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 749 750 if (status & ATMEL_US_RXBRK) { 751 /* ignore side-effect */ 752 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 753 port->icount.brk++; 754 } 755 if (status & ATMEL_US_PARE) 756 port->icount.parity++; 757 if (status & ATMEL_US_FRAME) 758 port->icount.frame++; 759 if (status & ATMEL_US_OVRE) 760 port->icount.overrun++; 761 } 762 763 /* 764 * Characters received (called from interrupt handler) 765 */ 766 static void atmel_rx_chars(struct uart_port *port) 767 { 768 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 769 unsigned int status, ch; 770 771 status = atmel_uart_readl(port, ATMEL_US_CSR); 772 while (status & ATMEL_US_RXRDY) { 773 ch = atmel_uart_read_char(port); 774 775 /* 776 * note that the error handling code is 777 * out of the main execution path 778 */ 779 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 780 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 781 || atmel_port->break_active)) { 782 783 /* clear error */ 784 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 785 786 if (status & ATMEL_US_RXBRK 787 && !atmel_port->break_active) { 788 atmel_port->break_active = 1; 789 atmel_uart_writel(port, ATMEL_US_IER, 790 ATMEL_US_RXBRK); 791 } else { 792 /* 793 * This is either the end-of-break 794 * condition or we've received at 795 * least one character without RXBRK 796 * being set. In both cases, the next 797 * RXBRK will indicate start-of-break. 798 */ 799 atmel_uart_writel(port, ATMEL_US_IDR, 800 ATMEL_US_RXBRK); 801 status &= ~ATMEL_US_RXBRK; 802 atmel_port->break_active = 0; 803 } 804 } 805 806 atmel_buffer_rx_char(port, status, ch); 807 status = atmel_uart_readl(port, ATMEL_US_CSR); 808 } 809 810 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 811 } 812 813 /* 814 * Transmit characters (called from tasklet with TXRDY interrupt 815 * disabled) 816 */ 817 static void atmel_tx_chars(struct uart_port *port) 818 { 819 struct circ_buf *xmit = &port->state->xmit; 820 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 821 822 if (port->x_char && 823 (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) { 824 atmel_uart_write_char(port, port->x_char); 825 port->icount.tx++; 826 port->x_char = 0; 827 } 828 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 829 return; 830 831 while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) { 832 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 833 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 834 port->icount.tx++; 835 if (uart_circ_empty(xmit)) 836 break; 837 } 838 839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 840 uart_write_wakeup(port); 841 842 if (!uart_circ_empty(xmit)) { 843 /* we still have characters to transmit, so we should continue 844 * transmitting them when TX is ready, regardless of 845 * mode or duplexity 846 */ 847 atmel_port->tx_done_mask |= ATMEL_US_TXRDY; 848 849 /* Enable interrupts */ 850 atmel_uart_writel(port, ATMEL_US_IER, 851 atmel_port->tx_done_mask); 852 } else { 853 if (atmel_uart_is_half_duplex(port)) 854 atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY; 855 } 856 } 857 858 static void atmel_complete_tx_dma(void *arg) 859 { 860 struct atmel_uart_port *atmel_port = arg; 861 struct uart_port *port = &atmel_port->uart; 862 struct circ_buf *xmit = &port->state->xmit; 863 struct dma_chan *chan = atmel_port->chan_tx; 864 unsigned long flags; 865 866 spin_lock_irqsave(&port->lock, flags); 867 868 if (chan) 869 dmaengine_terminate_all(chan); 870 xmit->tail += atmel_port->tx_len; 871 xmit->tail &= UART_XMIT_SIZE - 1; 872 873 port->icount.tx += atmel_port->tx_len; 874 875 spin_lock_irq(&atmel_port->lock_tx); 876 async_tx_ack(atmel_port->desc_tx); 877 atmel_port->cookie_tx = -EINVAL; 878 atmel_port->desc_tx = NULL; 879 spin_unlock_irq(&atmel_port->lock_tx); 880 881 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 882 uart_write_wakeup(port); 883 884 /* 885 * xmit is a circular buffer so, if we have just send data from 886 * xmit->tail to the end of xmit->buf, now we have to transmit the 887 * remaining data from the beginning of xmit->buf to xmit->head. 888 */ 889 if (!uart_circ_empty(xmit)) 890 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 891 else if (atmel_uart_is_half_duplex(port)) { 892 /* 893 * DMA done, re-enable TXEMPTY and signal that we can stop 894 * TX and start RX for RS485 895 */ 896 atmel_port->hd_start_rx = true; 897 atmel_uart_writel(port, ATMEL_US_IER, 898 atmel_port->tx_done_mask); 899 } 900 901 spin_unlock_irqrestore(&port->lock, flags); 902 } 903 904 static void atmel_release_tx_dma(struct uart_port *port) 905 { 906 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 907 struct dma_chan *chan = atmel_port->chan_tx; 908 909 if (chan) { 910 dmaengine_terminate_all(chan); 911 dma_release_channel(chan); 912 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 913 DMA_TO_DEVICE); 914 } 915 916 atmel_port->desc_tx = NULL; 917 atmel_port->chan_tx = NULL; 918 atmel_port->cookie_tx = -EINVAL; 919 } 920 921 /* 922 * Called from tasklet with TXRDY interrupt is disabled. 923 */ 924 static void atmel_tx_dma(struct uart_port *port) 925 { 926 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 927 struct circ_buf *xmit = &port->state->xmit; 928 struct dma_chan *chan = atmel_port->chan_tx; 929 struct dma_async_tx_descriptor *desc; 930 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 931 unsigned int tx_len, part1_len, part2_len, sg_len; 932 dma_addr_t phys_addr; 933 934 /* Make sure we have an idle channel */ 935 if (atmel_port->desc_tx != NULL) 936 return; 937 938 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 939 /* 940 * DMA is idle now. 941 * Port xmit buffer is already mapped, 942 * and it is one page... Just adjust 943 * offsets and lengths. Since it is a circular buffer, 944 * we have to transmit till the end, and then the rest. 945 * Take the port lock to get a 946 * consistent xmit buffer state. 947 */ 948 tx_len = CIRC_CNT_TO_END(xmit->head, 949 xmit->tail, 950 UART_XMIT_SIZE); 951 952 if (atmel_port->fifo_size) { 953 /* multi data mode */ 954 part1_len = (tx_len & ~0x3); /* DWORD access */ 955 part2_len = (tx_len & 0x3); /* BYTE access */ 956 } else { 957 /* single data (legacy) mode */ 958 part1_len = 0; 959 part2_len = tx_len; /* BYTE access only */ 960 } 961 962 sg_init_table(sgl, 2); 963 sg_len = 0; 964 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 965 if (part1_len) { 966 sg = &sgl[sg_len++]; 967 sg_dma_address(sg) = phys_addr; 968 sg_dma_len(sg) = part1_len; 969 970 phys_addr += part1_len; 971 } 972 973 if (part2_len) { 974 sg = &sgl[sg_len++]; 975 sg_dma_address(sg) = phys_addr; 976 sg_dma_len(sg) = part2_len; 977 } 978 979 /* 980 * save tx_len so atmel_complete_tx_dma() will increase 981 * xmit->tail correctly 982 */ 983 atmel_port->tx_len = tx_len; 984 985 desc = dmaengine_prep_slave_sg(chan, 986 sgl, 987 sg_len, 988 DMA_MEM_TO_DEV, 989 DMA_PREP_INTERRUPT | 990 DMA_CTRL_ACK); 991 if (!desc) { 992 dev_err(port->dev, "Failed to send via dma!\n"); 993 return; 994 } 995 996 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 997 998 atmel_port->desc_tx = desc; 999 desc->callback = atmel_complete_tx_dma; 1000 desc->callback_param = atmel_port; 1001 atmel_port->cookie_tx = dmaengine_submit(desc); 1002 if (dma_submit_error(atmel_port->cookie_tx)) { 1003 dev_err(port->dev, "dma_submit_error %d\n", 1004 atmel_port->cookie_tx); 1005 return; 1006 } 1007 1008 dma_async_issue_pending(chan); 1009 } 1010 1011 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1012 uart_write_wakeup(port); 1013 } 1014 1015 static int atmel_prepare_tx_dma(struct uart_port *port) 1016 { 1017 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1018 struct device *mfd_dev = port->dev->parent; 1019 dma_cap_mask_t mask; 1020 struct dma_slave_config config; 1021 int ret, nent; 1022 1023 dma_cap_zero(mask); 1024 dma_cap_set(DMA_SLAVE, mask); 1025 1026 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); 1027 if (atmel_port->chan_tx == NULL) 1028 goto chan_err; 1029 dev_info(port->dev, "using %s for tx DMA transfers\n", 1030 dma_chan_name(atmel_port->chan_tx)); 1031 1032 spin_lock_init(&atmel_port->lock_tx); 1033 sg_init_table(&atmel_port->sg_tx, 1); 1034 /* UART circular tx buffer is an aligned page. */ 1035 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 1036 sg_set_page(&atmel_port->sg_tx, 1037 virt_to_page(port->state->xmit.buf), 1038 UART_XMIT_SIZE, 1039 offset_in_page(port->state->xmit.buf)); 1040 nent = dma_map_sg(port->dev, 1041 &atmel_port->sg_tx, 1042 1, 1043 DMA_TO_DEVICE); 1044 1045 if (!nent) { 1046 dev_dbg(port->dev, "need to release resource of dma\n"); 1047 goto chan_err; 1048 } else { 1049 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1050 sg_dma_len(&atmel_port->sg_tx), 1051 port->state->xmit.buf, 1052 &sg_dma_address(&atmel_port->sg_tx)); 1053 } 1054 1055 /* Configure the slave DMA */ 1056 memset(&config, 0, sizeof(config)); 1057 config.direction = DMA_MEM_TO_DEV; 1058 config.dst_addr_width = (atmel_port->fifo_size) ? 1059 DMA_SLAVE_BUSWIDTH_4_BYTES : 1060 DMA_SLAVE_BUSWIDTH_1_BYTE; 1061 config.dst_addr = port->mapbase + ATMEL_US_THR; 1062 config.dst_maxburst = 1; 1063 1064 ret = dmaengine_slave_config(atmel_port->chan_tx, 1065 &config); 1066 if (ret) { 1067 dev_err(port->dev, "DMA tx slave configuration failed\n"); 1068 goto chan_err; 1069 } 1070 1071 return 0; 1072 1073 chan_err: 1074 dev_err(port->dev, "TX channel not available, switch to pio\n"); 1075 atmel_port->use_dma_tx = false; 1076 if (atmel_port->chan_tx) 1077 atmel_release_tx_dma(port); 1078 return -EINVAL; 1079 } 1080 1081 static void atmel_complete_rx_dma(void *arg) 1082 { 1083 struct uart_port *port = arg; 1084 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1085 1086 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1087 } 1088 1089 static void atmel_release_rx_dma(struct uart_port *port) 1090 { 1091 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1092 struct dma_chan *chan = atmel_port->chan_rx; 1093 1094 if (chan) { 1095 dmaengine_terminate_all(chan); 1096 dma_release_channel(chan); 1097 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1098 DMA_FROM_DEVICE); 1099 } 1100 1101 atmel_port->desc_rx = NULL; 1102 atmel_port->chan_rx = NULL; 1103 atmel_port->cookie_rx = -EINVAL; 1104 } 1105 1106 static void atmel_rx_from_dma(struct uart_port *port) 1107 { 1108 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1109 struct tty_port *tport = &port->state->port; 1110 struct circ_buf *ring = &atmel_port->rx_ring; 1111 struct dma_chan *chan = atmel_port->chan_rx; 1112 struct dma_tx_state state; 1113 enum dma_status dmastat; 1114 size_t count; 1115 1116 1117 /* Reset the UART timeout early so that we don't miss one */ 1118 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1119 dmastat = dmaengine_tx_status(chan, 1120 atmel_port->cookie_rx, 1121 &state); 1122 /* Restart a new tasklet if DMA status is error */ 1123 if (dmastat == DMA_ERROR) { 1124 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1125 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1126 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1127 return; 1128 } 1129 1130 /* CPU claims ownership of RX DMA buffer */ 1131 dma_sync_sg_for_cpu(port->dev, 1132 &atmel_port->sg_rx, 1133 1, 1134 DMA_FROM_DEVICE); 1135 1136 /* 1137 * ring->head points to the end of data already written by the DMA. 1138 * ring->tail points to the beginning of data to be read by the 1139 * framework. 1140 * The current transfer size should not be larger than the dma buffer 1141 * length. 1142 */ 1143 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1144 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1145 /* 1146 * At this point ring->head may point to the first byte right after the 1147 * last byte of the dma buffer: 1148 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1149 * 1150 * However ring->tail must always points inside the dma buffer: 1151 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1152 * 1153 * Since we use a ring buffer, we have to handle the case 1154 * where head is lower than tail. In such a case, we first read from 1155 * tail to the end of the buffer then reset tail. 1156 */ 1157 if (ring->head < ring->tail) { 1158 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1159 1160 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1161 ring->tail = 0; 1162 port->icount.rx += count; 1163 } 1164 1165 /* Finally we read data from tail to head */ 1166 if (ring->tail < ring->head) { 1167 count = ring->head - ring->tail; 1168 1169 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1170 /* Wrap ring->head if needed */ 1171 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1172 ring->head = 0; 1173 ring->tail = ring->head; 1174 port->icount.rx += count; 1175 } 1176 1177 /* USART retreives ownership of RX DMA buffer */ 1178 dma_sync_sg_for_device(port->dev, 1179 &atmel_port->sg_rx, 1180 1, 1181 DMA_FROM_DEVICE); 1182 1183 tty_flip_buffer_push(tport); 1184 1185 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1186 } 1187 1188 static int atmel_prepare_rx_dma(struct uart_port *port) 1189 { 1190 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1191 struct device *mfd_dev = port->dev->parent; 1192 struct dma_async_tx_descriptor *desc; 1193 dma_cap_mask_t mask; 1194 struct dma_slave_config config; 1195 struct circ_buf *ring; 1196 int ret, nent; 1197 1198 ring = &atmel_port->rx_ring; 1199 1200 dma_cap_zero(mask); 1201 dma_cap_set(DMA_CYCLIC, mask); 1202 1203 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); 1204 if (atmel_port->chan_rx == NULL) 1205 goto chan_err; 1206 dev_info(port->dev, "using %s for rx DMA transfers\n", 1207 dma_chan_name(atmel_port->chan_rx)); 1208 1209 spin_lock_init(&atmel_port->lock_rx); 1210 sg_init_table(&atmel_port->sg_rx, 1); 1211 /* UART circular rx buffer is an aligned page. */ 1212 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1213 sg_set_page(&atmel_port->sg_rx, 1214 virt_to_page(ring->buf), 1215 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1216 offset_in_page(ring->buf)); 1217 nent = dma_map_sg(port->dev, 1218 &atmel_port->sg_rx, 1219 1, 1220 DMA_FROM_DEVICE); 1221 1222 if (!nent) { 1223 dev_dbg(port->dev, "need to release resource of dma\n"); 1224 goto chan_err; 1225 } else { 1226 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1227 sg_dma_len(&atmel_port->sg_rx), 1228 ring->buf, 1229 &sg_dma_address(&atmel_port->sg_rx)); 1230 } 1231 1232 /* Configure the slave DMA */ 1233 memset(&config, 0, sizeof(config)); 1234 config.direction = DMA_DEV_TO_MEM; 1235 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1236 config.src_addr = port->mapbase + ATMEL_US_RHR; 1237 config.src_maxburst = 1; 1238 1239 ret = dmaengine_slave_config(atmel_port->chan_rx, 1240 &config); 1241 if (ret) { 1242 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1243 goto chan_err; 1244 } 1245 /* 1246 * Prepare a cyclic dma transfer, assign 2 descriptors, 1247 * each one is half ring buffer size 1248 */ 1249 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1250 sg_dma_address(&atmel_port->sg_rx), 1251 sg_dma_len(&atmel_port->sg_rx), 1252 sg_dma_len(&atmel_port->sg_rx)/2, 1253 DMA_DEV_TO_MEM, 1254 DMA_PREP_INTERRUPT); 1255 if (!desc) { 1256 dev_err(port->dev, "Preparing DMA cyclic failed\n"); 1257 goto chan_err; 1258 } 1259 desc->callback = atmel_complete_rx_dma; 1260 desc->callback_param = port; 1261 atmel_port->desc_rx = desc; 1262 atmel_port->cookie_rx = dmaengine_submit(desc); 1263 if (dma_submit_error(atmel_port->cookie_rx)) { 1264 dev_err(port->dev, "dma_submit_error %d\n", 1265 atmel_port->cookie_rx); 1266 goto chan_err; 1267 } 1268 1269 dma_async_issue_pending(atmel_port->chan_rx); 1270 1271 return 0; 1272 1273 chan_err: 1274 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1275 atmel_port->use_dma_rx = false; 1276 if (atmel_port->chan_rx) 1277 atmel_release_rx_dma(port); 1278 return -EINVAL; 1279 } 1280 1281 static void atmel_uart_timer_callback(struct timer_list *t) 1282 { 1283 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, 1284 uart_timer); 1285 struct uart_port *port = &atmel_port->uart; 1286 1287 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1288 tasklet_schedule(&atmel_port->tasklet_rx); 1289 mod_timer(&atmel_port->uart_timer, 1290 jiffies + uart_poll_timeout(port)); 1291 } 1292 } 1293 1294 /* 1295 * receive interrupt handler. 1296 */ 1297 static void 1298 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1299 { 1300 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1301 1302 if (atmel_use_pdc_rx(port)) { 1303 /* 1304 * PDC receive. Just schedule the tasklet and let it 1305 * figure out the details. 1306 * 1307 * TODO: We're not handling error flags correctly at 1308 * the moment. 1309 */ 1310 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1311 atmel_uart_writel(port, ATMEL_US_IDR, 1312 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1313 atmel_tasklet_schedule(atmel_port, 1314 &atmel_port->tasklet_rx); 1315 } 1316 1317 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1318 ATMEL_US_FRAME | ATMEL_US_PARE)) 1319 atmel_pdc_rxerr(port, pending); 1320 } 1321 1322 if (atmel_use_dma_rx(port)) { 1323 if (pending & ATMEL_US_TIMEOUT) { 1324 atmel_uart_writel(port, ATMEL_US_IDR, 1325 ATMEL_US_TIMEOUT); 1326 atmel_tasklet_schedule(atmel_port, 1327 &atmel_port->tasklet_rx); 1328 } 1329 } 1330 1331 /* Interrupt receive */ 1332 if (pending & ATMEL_US_RXRDY) 1333 atmel_rx_chars(port); 1334 else if (pending & ATMEL_US_RXBRK) { 1335 /* 1336 * End of break detected. If it came along with a 1337 * character, atmel_rx_chars will handle it. 1338 */ 1339 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1340 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1341 atmel_port->break_active = 0; 1342 } 1343 } 1344 1345 /* 1346 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1347 */ 1348 static void 1349 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1350 { 1351 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1352 1353 if (pending & atmel_port->tx_done_mask) { 1354 atmel_uart_writel(port, ATMEL_US_IDR, 1355 atmel_port->tx_done_mask); 1356 1357 /* Start RX if flag was set and FIFO is empty */ 1358 if (atmel_port->hd_start_rx) { 1359 if (!(atmel_uart_readl(port, ATMEL_US_CSR) 1360 & ATMEL_US_TXEMPTY)) 1361 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); 1362 1363 atmel_port->hd_start_rx = false; 1364 atmel_start_rx(port); 1365 } 1366 1367 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1368 } 1369 } 1370 1371 /* 1372 * status flags interrupt handler. 1373 */ 1374 static void 1375 atmel_handle_status(struct uart_port *port, unsigned int pending, 1376 unsigned int status) 1377 { 1378 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1379 unsigned int status_change; 1380 1381 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1382 | ATMEL_US_CTSIC)) { 1383 status_change = status ^ atmel_port->irq_status_prev; 1384 atmel_port->irq_status_prev = status; 1385 1386 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1387 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1388 /* TODO: All reads to CSR will clear these interrupts! */ 1389 if (status_change & ATMEL_US_RI) 1390 port->icount.rng++; 1391 if (status_change & ATMEL_US_DSR) 1392 port->icount.dsr++; 1393 if (status_change & ATMEL_US_DCD) 1394 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1395 if (status_change & ATMEL_US_CTS) 1396 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1397 1398 wake_up_interruptible(&port->state->port.delta_msr_wait); 1399 } 1400 } 1401 1402 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) 1403 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); 1404 } 1405 1406 /* 1407 * Interrupt handler 1408 */ 1409 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1410 { 1411 struct uart_port *port = dev_id; 1412 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1413 unsigned int status, pending, mask, pass_counter = 0; 1414 1415 spin_lock(&atmel_port->lock_suspended); 1416 1417 do { 1418 status = atmel_uart_readl(port, ATMEL_US_CSR); 1419 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1420 pending = status & mask; 1421 if (!pending) 1422 break; 1423 1424 if (atmel_port->suspended) { 1425 atmel_port->pending |= pending; 1426 atmel_port->pending_status = status; 1427 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1428 pm_system_wakeup(); 1429 break; 1430 } 1431 1432 atmel_handle_receive(port, pending); 1433 atmel_handle_status(port, pending, status); 1434 atmel_handle_transmit(port, pending); 1435 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1436 1437 spin_unlock(&atmel_port->lock_suspended); 1438 1439 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1440 } 1441 1442 static void atmel_release_tx_pdc(struct uart_port *port) 1443 { 1444 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1445 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1446 1447 dma_unmap_single(port->dev, 1448 pdc->dma_addr, 1449 pdc->dma_size, 1450 DMA_TO_DEVICE); 1451 } 1452 1453 /* 1454 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1455 */ 1456 static void atmel_tx_pdc(struct uart_port *port) 1457 { 1458 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1459 struct circ_buf *xmit = &port->state->xmit; 1460 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1461 int count; 1462 1463 /* nothing left to transmit? */ 1464 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1465 return; 1466 1467 xmit->tail += pdc->ofs; 1468 xmit->tail &= UART_XMIT_SIZE - 1; 1469 1470 port->icount.tx += pdc->ofs; 1471 pdc->ofs = 0; 1472 1473 /* more to transmit - setup next transfer */ 1474 1475 /* disable PDC transmit */ 1476 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1477 1478 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1479 dma_sync_single_for_device(port->dev, 1480 pdc->dma_addr, 1481 pdc->dma_size, 1482 DMA_TO_DEVICE); 1483 1484 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1485 pdc->ofs = count; 1486 1487 atmel_uart_writel(port, ATMEL_PDC_TPR, 1488 pdc->dma_addr + xmit->tail); 1489 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1490 /* re-enable PDC transmit */ 1491 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1492 /* Enable interrupts */ 1493 atmel_uart_writel(port, ATMEL_US_IER, 1494 atmel_port->tx_done_mask); 1495 } else { 1496 if (atmel_uart_is_half_duplex(port)) { 1497 /* DMA done, stop TX, start RX for RS485 */ 1498 atmel_start_rx(port); 1499 } 1500 } 1501 1502 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1503 uart_write_wakeup(port); 1504 } 1505 1506 static int atmel_prepare_tx_pdc(struct uart_port *port) 1507 { 1508 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1509 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1510 struct circ_buf *xmit = &port->state->xmit; 1511 1512 pdc->buf = xmit->buf; 1513 pdc->dma_addr = dma_map_single(port->dev, 1514 pdc->buf, 1515 UART_XMIT_SIZE, 1516 DMA_TO_DEVICE); 1517 pdc->dma_size = UART_XMIT_SIZE; 1518 pdc->ofs = 0; 1519 1520 return 0; 1521 } 1522 1523 static void atmel_rx_from_ring(struct uart_port *port) 1524 { 1525 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1526 struct circ_buf *ring = &atmel_port->rx_ring; 1527 unsigned int flg; 1528 unsigned int status; 1529 1530 while (ring->head != ring->tail) { 1531 struct atmel_uart_char c; 1532 1533 /* Make sure c is loaded after head. */ 1534 smp_rmb(); 1535 1536 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1537 1538 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1539 1540 port->icount.rx++; 1541 status = c.status; 1542 flg = TTY_NORMAL; 1543 1544 /* 1545 * note that the error handling code is 1546 * out of the main execution path 1547 */ 1548 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1549 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1550 if (status & ATMEL_US_RXBRK) { 1551 /* ignore side-effect */ 1552 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1553 1554 port->icount.brk++; 1555 if (uart_handle_break(port)) 1556 continue; 1557 } 1558 if (status & ATMEL_US_PARE) 1559 port->icount.parity++; 1560 if (status & ATMEL_US_FRAME) 1561 port->icount.frame++; 1562 if (status & ATMEL_US_OVRE) 1563 port->icount.overrun++; 1564 1565 status &= port->read_status_mask; 1566 1567 if (status & ATMEL_US_RXBRK) 1568 flg = TTY_BREAK; 1569 else if (status & ATMEL_US_PARE) 1570 flg = TTY_PARITY; 1571 else if (status & ATMEL_US_FRAME) 1572 flg = TTY_FRAME; 1573 } 1574 1575 1576 if (uart_handle_sysrq_char(port, c.ch)) 1577 continue; 1578 1579 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1580 } 1581 1582 tty_flip_buffer_push(&port->state->port); 1583 } 1584 1585 static void atmel_release_rx_pdc(struct uart_port *port) 1586 { 1587 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1588 int i; 1589 1590 for (i = 0; i < 2; i++) { 1591 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1592 1593 dma_unmap_single(port->dev, 1594 pdc->dma_addr, 1595 pdc->dma_size, 1596 DMA_FROM_DEVICE); 1597 kfree(pdc->buf); 1598 } 1599 } 1600 1601 static void atmel_rx_from_pdc(struct uart_port *port) 1602 { 1603 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1604 struct tty_port *tport = &port->state->port; 1605 struct atmel_dma_buffer *pdc; 1606 int rx_idx = atmel_port->pdc_rx_idx; 1607 unsigned int head; 1608 unsigned int tail; 1609 unsigned int count; 1610 1611 do { 1612 /* Reset the UART timeout early so that we don't miss one */ 1613 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1614 1615 pdc = &atmel_port->pdc_rx[rx_idx]; 1616 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1617 tail = pdc->ofs; 1618 1619 /* If the PDC has switched buffers, RPR won't contain 1620 * any address within the current buffer. Since head 1621 * is unsigned, we just need a one-way comparison to 1622 * find out. 1623 * 1624 * In this case, we just need to consume the entire 1625 * buffer and resubmit it for DMA. This will clear the 1626 * ENDRX bit as well, so that we can safely re-enable 1627 * all interrupts below. 1628 */ 1629 head = min(head, pdc->dma_size); 1630 1631 if (likely(head != tail)) { 1632 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1633 pdc->dma_size, DMA_FROM_DEVICE); 1634 1635 /* 1636 * head will only wrap around when we recycle 1637 * the DMA buffer, and when that happens, we 1638 * explicitly set tail to 0. So head will 1639 * always be greater than tail. 1640 */ 1641 count = head - tail; 1642 1643 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1644 count); 1645 1646 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1647 pdc->dma_size, DMA_FROM_DEVICE); 1648 1649 port->icount.rx += count; 1650 pdc->ofs = head; 1651 } 1652 1653 /* 1654 * If the current buffer is full, we need to check if 1655 * the next one contains any additional data. 1656 */ 1657 if (head >= pdc->dma_size) { 1658 pdc->ofs = 0; 1659 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1660 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1661 1662 rx_idx = !rx_idx; 1663 atmel_port->pdc_rx_idx = rx_idx; 1664 } 1665 } while (head >= pdc->dma_size); 1666 1667 tty_flip_buffer_push(tport); 1668 1669 atmel_uart_writel(port, ATMEL_US_IER, 1670 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1671 } 1672 1673 static int atmel_prepare_rx_pdc(struct uart_port *port) 1674 { 1675 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1676 int i; 1677 1678 for (i = 0; i < 2; i++) { 1679 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1680 1681 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1682 if (pdc->buf == NULL) { 1683 if (i != 0) { 1684 dma_unmap_single(port->dev, 1685 atmel_port->pdc_rx[0].dma_addr, 1686 PDC_BUFFER_SIZE, 1687 DMA_FROM_DEVICE); 1688 kfree(atmel_port->pdc_rx[0].buf); 1689 } 1690 atmel_port->use_pdc_rx = false; 1691 return -ENOMEM; 1692 } 1693 pdc->dma_addr = dma_map_single(port->dev, 1694 pdc->buf, 1695 PDC_BUFFER_SIZE, 1696 DMA_FROM_DEVICE); 1697 pdc->dma_size = PDC_BUFFER_SIZE; 1698 pdc->ofs = 0; 1699 } 1700 1701 atmel_port->pdc_rx_idx = 0; 1702 1703 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1704 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1705 1706 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1707 atmel_port->pdc_rx[1].dma_addr); 1708 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1709 1710 return 0; 1711 } 1712 1713 /* 1714 * tasklet handling tty stuff outside the interrupt handler. 1715 */ 1716 static void atmel_tasklet_rx_func(struct tasklet_struct *t) 1717 { 1718 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, 1719 tasklet_rx); 1720 struct uart_port *port = &atmel_port->uart; 1721 1722 /* The interrupt handler does not take the lock */ 1723 spin_lock(&port->lock); 1724 atmel_port->schedule_rx(port); 1725 spin_unlock(&port->lock); 1726 } 1727 1728 static void atmel_tasklet_tx_func(struct tasklet_struct *t) 1729 { 1730 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, 1731 tasklet_tx); 1732 struct uart_port *port = &atmel_port->uart; 1733 1734 /* The interrupt handler does not take the lock */ 1735 spin_lock(&port->lock); 1736 atmel_port->schedule_tx(port); 1737 spin_unlock(&port->lock); 1738 } 1739 1740 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1741 struct platform_device *pdev) 1742 { 1743 struct device_node *np = pdev->dev.of_node; 1744 1745 /* DMA/PDC usage specification */ 1746 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1747 if (of_property_read_bool(np, "dmas")) { 1748 atmel_port->use_dma_rx = true; 1749 atmel_port->use_pdc_rx = false; 1750 } else { 1751 atmel_port->use_dma_rx = false; 1752 atmel_port->use_pdc_rx = true; 1753 } 1754 } else { 1755 atmel_port->use_dma_rx = false; 1756 atmel_port->use_pdc_rx = false; 1757 } 1758 1759 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1760 if (of_property_read_bool(np, "dmas")) { 1761 atmel_port->use_dma_tx = true; 1762 atmel_port->use_pdc_tx = false; 1763 } else { 1764 atmel_port->use_dma_tx = false; 1765 atmel_port->use_pdc_tx = true; 1766 } 1767 } else { 1768 atmel_port->use_dma_tx = false; 1769 atmel_port->use_pdc_tx = false; 1770 } 1771 } 1772 1773 static void atmel_set_ops(struct uart_port *port) 1774 { 1775 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1776 1777 if (atmel_use_dma_rx(port)) { 1778 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1779 atmel_port->schedule_rx = &atmel_rx_from_dma; 1780 atmel_port->release_rx = &atmel_release_rx_dma; 1781 } else if (atmel_use_pdc_rx(port)) { 1782 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1783 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1784 atmel_port->release_rx = &atmel_release_rx_pdc; 1785 } else { 1786 atmel_port->prepare_rx = NULL; 1787 atmel_port->schedule_rx = &atmel_rx_from_ring; 1788 atmel_port->release_rx = NULL; 1789 } 1790 1791 if (atmel_use_dma_tx(port)) { 1792 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1793 atmel_port->schedule_tx = &atmel_tx_dma; 1794 atmel_port->release_tx = &atmel_release_tx_dma; 1795 } else if (atmel_use_pdc_tx(port)) { 1796 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1797 atmel_port->schedule_tx = &atmel_tx_pdc; 1798 atmel_port->release_tx = &atmel_release_tx_pdc; 1799 } else { 1800 atmel_port->prepare_tx = NULL; 1801 atmel_port->schedule_tx = &atmel_tx_chars; 1802 atmel_port->release_tx = NULL; 1803 } 1804 } 1805 1806 /* 1807 * Get ip name usart or uart 1808 */ 1809 static void atmel_get_ip_name(struct uart_port *port) 1810 { 1811 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1812 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1813 u32 version; 1814 u32 usart, dbgu_uart, new_uart; 1815 /* ASCII decoding for IP version */ 1816 usart = 0x55534152; /* USAR(T) */ 1817 dbgu_uart = 0x44424755; /* DBGU */ 1818 new_uart = 0x55415254; /* UART */ 1819 1820 /* 1821 * Only USART devices from at91sam9260 SOC implement fractional 1822 * baudrate. It is available for all asynchronous modes, with the 1823 * following restriction: the sampling clock's duty cycle is not 1824 * constant. 1825 */ 1826 atmel_port->has_frac_baudrate = false; 1827 atmel_port->has_hw_timer = false; 1828 1829 if (name == new_uart) { 1830 dev_dbg(port->dev, "Uart with hw timer"); 1831 atmel_port->has_hw_timer = true; 1832 atmel_port->rtor = ATMEL_UA_RTOR; 1833 } else if (name == usart) { 1834 dev_dbg(port->dev, "Usart\n"); 1835 atmel_port->has_frac_baudrate = true; 1836 atmel_port->has_hw_timer = true; 1837 atmel_port->rtor = ATMEL_US_RTOR; 1838 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1839 switch (version) { 1840 case 0x814: /* sama5d2 */ 1841 fallthrough; 1842 case 0x701: /* sama5d4 */ 1843 atmel_port->fidi_min = 3; 1844 atmel_port->fidi_max = 65535; 1845 break; 1846 case 0x502: /* sam9x5, sama5d3 */ 1847 atmel_port->fidi_min = 3; 1848 atmel_port->fidi_max = 2047; 1849 break; 1850 default: 1851 atmel_port->fidi_min = 1; 1852 atmel_port->fidi_max = 2047; 1853 } 1854 } else if (name == dbgu_uart) { 1855 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1856 } else { 1857 /* fallback for older SoCs: use version field */ 1858 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1859 switch (version) { 1860 case 0x302: 1861 case 0x10213: 1862 case 0x10302: 1863 dev_dbg(port->dev, "This version is usart\n"); 1864 atmel_port->has_frac_baudrate = true; 1865 atmel_port->has_hw_timer = true; 1866 atmel_port->rtor = ATMEL_US_RTOR; 1867 break; 1868 case 0x203: 1869 case 0x10202: 1870 dev_dbg(port->dev, "This version is uart\n"); 1871 break; 1872 default: 1873 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1874 } 1875 } 1876 } 1877 1878 /* 1879 * Perform initialization and enable port for reception 1880 */ 1881 static int atmel_startup(struct uart_port *port) 1882 { 1883 struct platform_device *pdev = to_platform_device(port->dev); 1884 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1885 int retval; 1886 1887 /* 1888 * Ensure that no interrupts are enabled otherwise when 1889 * request_irq() is called we could get stuck trying to 1890 * handle an unexpected interrupt 1891 */ 1892 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1893 atmel_port->ms_irq_enabled = false; 1894 1895 /* 1896 * Allocate the IRQ 1897 */ 1898 retval = request_irq(port->irq, atmel_interrupt, 1899 IRQF_SHARED | IRQF_COND_SUSPEND, 1900 dev_name(&pdev->dev), port); 1901 if (retval) { 1902 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1903 return retval; 1904 } 1905 1906 atomic_set(&atmel_port->tasklet_shutdown, 0); 1907 tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func); 1908 tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func); 1909 1910 /* 1911 * Initialize DMA (if necessary) 1912 */ 1913 atmel_init_property(atmel_port, pdev); 1914 atmel_set_ops(port); 1915 1916 if (atmel_port->prepare_rx) { 1917 retval = atmel_port->prepare_rx(port); 1918 if (retval < 0) 1919 atmel_set_ops(port); 1920 } 1921 1922 if (atmel_port->prepare_tx) { 1923 retval = atmel_port->prepare_tx(port); 1924 if (retval < 0) 1925 atmel_set_ops(port); 1926 } 1927 1928 /* 1929 * Enable FIFO when available 1930 */ 1931 if (atmel_port->fifo_size) { 1932 unsigned int txrdym = ATMEL_US_ONE_DATA; 1933 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1934 unsigned int fmr; 1935 1936 atmel_uart_writel(port, ATMEL_US_CR, 1937 ATMEL_US_FIFOEN | 1938 ATMEL_US_RXFCLR | 1939 ATMEL_US_TXFLCLR); 1940 1941 if (atmel_use_dma_tx(port)) 1942 txrdym = ATMEL_US_FOUR_DATA; 1943 1944 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1945 if (atmel_port->rts_high && 1946 atmel_port->rts_low) 1947 fmr |= ATMEL_US_FRTSC | 1948 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1949 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1950 1951 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1952 } 1953 1954 /* Save current CSR for comparison in atmel_tasklet_func() */ 1955 atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR); 1956 1957 /* 1958 * Finally, enable the serial port 1959 */ 1960 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1961 /* enable xmit & rcvr */ 1962 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1963 atmel_port->tx_stopped = false; 1964 1965 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); 1966 1967 if (atmel_use_pdc_rx(port)) { 1968 /* set UART timeout */ 1969 if (!atmel_port->has_hw_timer) { 1970 mod_timer(&atmel_port->uart_timer, 1971 jiffies + uart_poll_timeout(port)); 1972 /* set USART timeout */ 1973 } else { 1974 atmel_uart_writel(port, atmel_port->rtor, 1975 PDC_RX_TIMEOUT); 1976 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1977 1978 atmel_uart_writel(port, ATMEL_US_IER, 1979 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1980 } 1981 /* enable PDC controller */ 1982 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 1983 } else if (atmel_use_dma_rx(port)) { 1984 /* set UART timeout */ 1985 if (!atmel_port->has_hw_timer) { 1986 mod_timer(&atmel_port->uart_timer, 1987 jiffies + uart_poll_timeout(port)); 1988 /* set USART timeout */ 1989 } else { 1990 atmel_uart_writel(port, atmel_port->rtor, 1991 PDC_RX_TIMEOUT); 1992 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1993 1994 atmel_uart_writel(port, ATMEL_US_IER, 1995 ATMEL_US_TIMEOUT); 1996 } 1997 } else { 1998 /* enable receive only */ 1999 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 2000 } 2001 2002 return 0; 2003 } 2004 2005 /* 2006 * Flush any TX data submitted for DMA. Called when the TX circular 2007 * buffer is reset. 2008 */ 2009 static void atmel_flush_buffer(struct uart_port *port) 2010 { 2011 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2012 2013 if (atmel_use_pdc_tx(port)) { 2014 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 2015 atmel_port->pdc_tx.ofs = 0; 2016 } 2017 /* 2018 * in uart_flush_buffer(), the xmit circular buffer has just 2019 * been cleared, so we have to reset tx_len accordingly. 2020 */ 2021 atmel_port->tx_len = 0; 2022 } 2023 2024 /* 2025 * Disable the port 2026 */ 2027 static void atmel_shutdown(struct uart_port *port) 2028 { 2029 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2030 2031 /* Disable modem control lines interrupts */ 2032 atmel_disable_ms(port); 2033 2034 /* Disable interrupts at device level */ 2035 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2036 2037 /* Prevent spurious interrupts from scheduling the tasklet */ 2038 atomic_inc(&atmel_port->tasklet_shutdown); 2039 2040 /* 2041 * Prevent any tasklets being scheduled during 2042 * cleanup 2043 */ 2044 del_timer_sync(&atmel_port->uart_timer); 2045 2046 /* Make sure that no interrupt is on the fly */ 2047 synchronize_irq(port->irq); 2048 2049 /* 2050 * Clear out any scheduled tasklets before 2051 * we destroy the buffers 2052 */ 2053 tasklet_kill(&atmel_port->tasklet_rx); 2054 tasklet_kill(&atmel_port->tasklet_tx); 2055 2056 /* 2057 * Ensure everything is stopped and 2058 * disable port and break condition. 2059 */ 2060 atmel_stop_rx(port); 2061 atmel_stop_tx(port); 2062 2063 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 2064 2065 /* 2066 * Shut-down the DMA. 2067 */ 2068 if (atmel_port->release_rx) 2069 atmel_port->release_rx(port); 2070 if (atmel_port->release_tx) 2071 atmel_port->release_tx(port); 2072 2073 /* 2074 * Reset ring buffer pointers 2075 */ 2076 atmel_port->rx_ring.head = 0; 2077 atmel_port->rx_ring.tail = 0; 2078 2079 /* 2080 * Free the interrupts 2081 */ 2082 free_irq(port->irq, port); 2083 2084 atmel_flush_buffer(port); 2085 } 2086 2087 /* 2088 * Power / Clock management. 2089 */ 2090 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2091 unsigned int oldstate) 2092 { 2093 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2094 2095 switch (state) { 2096 case UART_PM_STATE_ON: 2097 /* 2098 * Enable the peripheral clock for this serial port. 2099 * This is called on uart_open() or a resume event. 2100 */ 2101 clk_prepare_enable(atmel_port->clk); 2102 2103 /* re-enable interrupts if we disabled some on suspend */ 2104 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2105 break; 2106 case UART_PM_STATE_OFF: 2107 /* Back up the interrupt mask and disable all interrupts */ 2108 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2109 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2110 2111 /* 2112 * Disable the peripheral clock for this serial port. 2113 * This is called on uart_close() or a suspend event. 2114 */ 2115 clk_disable_unprepare(atmel_port->clk); 2116 break; 2117 default: 2118 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2119 } 2120 } 2121 2122 /* 2123 * Change the port parameters 2124 */ 2125 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2126 struct ktermios *old) 2127 { 2128 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2129 unsigned long flags; 2130 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2131 2132 /* save the current mode register */ 2133 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2134 2135 /* reset the mode, clock divisor, parity, stop bits and data size */ 2136 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2137 ATMEL_US_PAR | ATMEL_US_USMODE); 2138 2139 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2140 2141 /* byte size */ 2142 switch (termios->c_cflag & CSIZE) { 2143 case CS5: 2144 mode |= ATMEL_US_CHRL_5; 2145 break; 2146 case CS6: 2147 mode |= ATMEL_US_CHRL_6; 2148 break; 2149 case CS7: 2150 mode |= ATMEL_US_CHRL_7; 2151 break; 2152 default: 2153 mode |= ATMEL_US_CHRL_8; 2154 break; 2155 } 2156 2157 /* stop bits */ 2158 if (termios->c_cflag & CSTOPB) 2159 mode |= ATMEL_US_NBSTOP_2; 2160 2161 /* parity */ 2162 if (termios->c_cflag & PARENB) { 2163 /* Mark or Space parity */ 2164 if (termios->c_cflag & CMSPAR) { 2165 if (termios->c_cflag & PARODD) 2166 mode |= ATMEL_US_PAR_MARK; 2167 else 2168 mode |= ATMEL_US_PAR_SPACE; 2169 } else if (termios->c_cflag & PARODD) 2170 mode |= ATMEL_US_PAR_ODD; 2171 else 2172 mode |= ATMEL_US_PAR_EVEN; 2173 } else 2174 mode |= ATMEL_US_PAR_NONE; 2175 2176 spin_lock_irqsave(&port->lock, flags); 2177 2178 port->read_status_mask = ATMEL_US_OVRE; 2179 if (termios->c_iflag & INPCK) 2180 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2181 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2182 port->read_status_mask |= ATMEL_US_RXBRK; 2183 2184 if (atmel_use_pdc_rx(port)) 2185 /* need to enable error interrupts */ 2186 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2187 2188 /* 2189 * Characters to ignore 2190 */ 2191 port->ignore_status_mask = 0; 2192 if (termios->c_iflag & IGNPAR) 2193 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2194 if (termios->c_iflag & IGNBRK) { 2195 port->ignore_status_mask |= ATMEL_US_RXBRK; 2196 /* 2197 * If we're ignoring parity and break indicators, 2198 * ignore overruns too (for real raw support). 2199 */ 2200 if (termios->c_iflag & IGNPAR) 2201 port->ignore_status_mask |= ATMEL_US_OVRE; 2202 } 2203 /* TODO: Ignore all characters if CREAD is set.*/ 2204 2205 /* update the per-port timeout */ 2206 uart_update_timeout(port, termios->c_cflag, baud); 2207 2208 /* 2209 * save/disable interrupts. The tty layer will ensure that the 2210 * transmitter is empty if requested by the caller, so there's 2211 * no need to wait for it here. 2212 */ 2213 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2214 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2215 2216 /* disable receiver and transmitter */ 2217 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2218 atmel_port->tx_stopped = true; 2219 2220 /* mode */ 2221 if (port->rs485.flags & SER_RS485_ENABLED) { 2222 atmel_uart_writel(port, ATMEL_US_TTGR, 2223 port->rs485.delay_rts_after_send); 2224 mode |= ATMEL_US_USMODE_RS485; 2225 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { 2226 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); 2227 /* select mck clock, and output */ 2228 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 2229 /* set max iterations */ 2230 mode |= ATMEL_US_MAX_ITER(3); 2231 if ((port->iso7816.flags & SER_ISO7816_T_PARAM) 2232 == SER_ISO7816_T(0)) 2233 mode |= ATMEL_US_USMODE_ISO7816_T0; 2234 else 2235 mode |= ATMEL_US_USMODE_ISO7816_T1; 2236 } else if (termios->c_cflag & CRTSCTS) { 2237 /* RS232 with hardware handshake (RTS/CTS) */ 2238 if (atmel_use_fifo(port) && 2239 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2240 /* 2241 * with ATMEL_US_USMODE_HWHS set, the controller will 2242 * be able to drive the RTS pin high/low when the RX 2243 * FIFO is above RXFTHRES/below RXFTHRES2. 2244 * It will also disable the transmitter when the CTS 2245 * pin is high. 2246 * This mode is not activated if CTS pin is a GPIO 2247 * because in this case, the transmitter is always 2248 * disabled (there must be an internal pull-up 2249 * responsible for this behaviour). 2250 * If the RTS pin is a GPIO, the controller won't be 2251 * able to drive it according to the FIFO thresholds, 2252 * but it will be handled by the driver. 2253 */ 2254 mode |= ATMEL_US_USMODE_HWHS; 2255 } else { 2256 /* 2257 * For platforms without FIFO, the flow control is 2258 * handled by the driver. 2259 */ 2260 mode |= ATMEL_US_USMODE_NORMAL; 2261 } 2262 } else { 2263 /* RS232 without hadware handshake */ 2264 mode |= ATMEL_US_USMODE_NORMAL; 2265 } 2266 2267 /* 2268 * Set the baud rate: 2269 * Fractional baudrate allows to setup output frequency more 2270 * accurately. This feature is enabled only when using normal mode. 2271 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2272 * Currently, OVER is always set to 0 so we get 2273 * baudrate = selected clock / (16 * (CD + FP / 8)) 2274 * then 2275 * 8 CD + FP = selected clock / (2 * baudrate) 2276 */ 2277 if (atmel_port->has_frac_baudrate) { 2278 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2279 cd = div >> 3; 2280 fp = div & ATMEL_US_FP_MASK; 2281 } else { 2282 cd = uart_get_divisor(port, baud); 2283 } 2284 2285 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2286 cd /= 8; 2287 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2288 } 2289 quot = cd | fp << ATMEL_US_FP_OFFSET; 2290 2291 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) 2292 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2293 2294 /* set the mode, clock divisor, parity, stop bits and data size */ 2295 atmel_uart_writel(port, ATMEL_US_MR, mode); 2296 2297 /* 2298 * when switching the mode, set the RTS line state according to the 2299 * new mode, otherwise keep the former state 2300 */ 2301 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2302 unsigned int rts_state; 2303 2304 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2305 /* let the hardware control the RTS line */ 2306 rts_state = ATMEL_US_RTSDIS; 2307 } else { 2308 /* force RTS line to low level */ 2309 rts_state = ATMEL_US_RTSEN; 2310 } 2311 2312 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2313 } 2314 2315 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2316 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2317 atmel_port->tx_stopped = false; 2318 2319 /* restore interrupts */ 2320 atmel_uart_writel(port, ATMEL_US_IER, imr); 2321 2322 /* CTS flow-control and modem-status interrupts */ 2323 if (UART_ENABLE_MS(port, termios->c_cflag)) 2324 atmel_enable_ms(port); 2325 else 2326 atmel_disable_ms(port); 2327 2328 spin_unlock_irqrestore(&port->lock, flags); 2329 } 2330 2331 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2332 { 2333 if (termios->c_line == N_PPS) { 2334 port->flags |= UPF_HARDPPS_CD; 2335 spin_lock_irq(&port->lock); 2336 atmel_enable_ms(port); 2337 spin_unlock_irq(&port->lock); 2338 } else { 2339 port->flags &= ~UPF_HARDPPS_CD; 2340 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2341 spin_lock_irq(&port->lock); 2342 atmel_disable_ms(port); 2343 spin_unlock_irq(&port->lock); 2344 } 2345 } 2346 } 2347 2348 /* 2349 * Return string describing the specified port 2350 */ 2351 static const char *atmel_type(struct uart_port *port) 2352 { 2353 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2354 } 2355 2356 /* 2357 * Release the memory region(s) being used by 'port'. 2358 */ 2359 static void atmel_release_port(struct uart_port *port) 2360 { 2361 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2362 int size = resource_size(mpdev->resource); 2363 2364 release_mem_region(port->mapbase, size); 2365 2366 if (port->flags & UPF_IOREMAP) { 2367 iounmap(port->membase); 2368 port->membase = NULL; 2369 } 2370 } 2371 2372 /* 2373 * Request the memory region(s) being used by 'port'. 2374 */ 2375 static int atmel_request_port(struct uart_port *port) 2376 { 2377 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2378 int size = resource_size(mpdev->resource); 2379 2380 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2381 return -EBUSY; 2382 2383 if (port->flags & UPF_IOREMAP) { 2384 port->membase = ioremap(port->mapbase, size); 2385 if (port->membase == NULL) { 2386 release_mem_region(port->mapbase, size); 2387 return -ENOMEM; 2388 } 2389 } 2390 2391 return 0; 2392 } 2393 2394 /* 2395 * Configure/autoconfigure the port. 2396 */ 2397 static void atmel_config_port(struct uart_port *port, int flags) 2398 { 2399 if (flags & UART_CONFIG_TYPE) { 2400 port->type = PORT_ATMEL; 2401 atmel_request_port(port); 2402 } 2403 } 2404 2405 /* 2406 * Verify the new serial_struct (for TIOCSSERIAL). 2407 */ 2408 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2409 { 2410 int ret = 0; 2411 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2412 ret = -EINVAL; 2413 if (port->irq != ser->irq) 2414 ret = -EINVAL; 2415 if (ser->io_type != SERIAL_IO_MEM) 2416 ret = -EINVAL; 2417 if (port->uartclk / 16 != ser->baud_base) 2418 ret = -EINVAL; 2419 if (port->mapbase != (unsigned long)ser->iomem_base) 2420 ret = -EINVAL; 2421 if (port->iobase != ser->port) 2422 ret = -EINVAL; 2423 if (ser->hub6 != 0) 2424 ret = -EINVAL; 2425 return ret; 2426 } 2427 2428 #ifdef CONFIG_CONSOLE_POLL 2429 static int atmel_poll_get_char(struct uart_port *port) 2430 { 2431 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2432 cpu_relax(); 2433 2434 return atmel_uart_read_char(port); 2435 } 2436 2437 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2438 { 2439 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2440 cpu_relax(); 2441 2442 atmel_uart_write_char(port, ch); 2443 } 2444 #endif 2445 2446 static const struct uart_ops atmel_pops = { 2447 .tx_empty = atmel_tx_empty, 2448 .set_mctrl = atmel_set_mctrl, 2449 .get_mctrl = atmel_get_mctrl, 2450 .stop_tx = atmel_stop_tx, 2451 .start_tx = atmel_start_tx, 2452 .stop_rx = atmel_stop_rx, 2453 .enable_ms = atmel_enable_ms, 2454 .break_ctl = atmel_break_ctl, 2455 .startup = atmel_startup, 2456 .shutdown = atmel_shutdown, 2457 .flush_buffer = atmel_flush_buffer, 2458 .set_termios = atmel_set_termios, 2459 .set_ldisc = atmel_set_ldisc, 2460 .type = atmel_type, 2461 .release_port = atmel_release_port, 2462 .request_port = atmel_request_port, 2463 .config_port = atmel_config_port, 2464 .verify_port = atmel_verify_port, 2465 .pm = atmel_serial_pm, 2466 #ifdef CONFIG_CONSOLE_POLL 2467 .poll_get_char = atmel_poll_get_char, 2468 .poll_put_char = atmel_poll_put_char, 2469 #endif 2470 }; 2471 2472 static const struct serial_rs485 atmel_rs485_supported = { 2473 .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX, 2474 .delay_rts_before_send = 1, 2475 .delay_rts_after_send = 1, 2476 }; 2477 2478 /* 2479 * Configure the port from the platform device resource info. 2480 */ 2481 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2482 struct platform_device *pdev) 2483 { 2484 int ret; 2485 struct uart_port *port = &atmel_port->uart; 2486 struct platform_device *mpdev = to_platform_device(pdev->dev.parent); 2487 2488 atmel_init_property(atmel_port, pdev); 2489 atmel_set_ops(port); 2490 2491 port->iotype = UPIO_MEM; 2492 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; 2493 port->ops = &atmel_pops; 2494 port->fifosize = 1; 2495 port->dev = &pdev->dev; 2496 port->mapbase = mpdev->resource[0].start; 2497 port->irq = platform_get_irq(mpdev, 0); 2498 port->rs485_config = atmel_config_rs485; 2499 port->rs485_supported = atmel_rs485_supported; 2500 port->iso7816_config = atmel_config_iso7816; 2501 port->membase = NULL; 2502 2503 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2504 2505 ret = uart_get_rs485_mode(port); 2506 if (ret) 2507 return ret; 2508 2509 port->uartclk = clk_get_rate(atmel_port->clk); 2510 2511 /* 2512 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or 2513 * ENDTX|TXBUFE 2514 */ 2515 if (atmel_uart_is_half_duplex(port)) 2516 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2517 else if (atmel_use_pdc_tx(port)) { 2518 port->fifosize = PDC_BUFFER_SIZE; 2519 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2520 } else { 2521 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2522 } 2523 2524 return 0; 2525 } 2526 2527 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2528 static void atmel_console_putchar(struct uart_port *port, unsigned char ch) 2529 { 2530 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2531 cpu_relax(); 2532 atmel_uart_write_char(port, ch); 2533 } 2534 2535 /* 2536 * Interrupts are disabled on entering 2537 */ 2538 static void atmel_console_write(struct console *co, const char *s, u_int count) 2539 { 2540 struct uart_port *port = &atmel_ports[co->index].uart; 2541 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2542 unsigned int status, imr; 2543 unsigned int pdc_tx; 2544 2545 /* 2546 * First, save IMR and then disable interrupts 2547 */ 2548 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2549 atmel_uart_writel(port, ATMEL_US_IDR, 2550 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2551 2552 /* Store PDC transmit status and disable it */ 2553 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2554 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2555 2556 /* Make sure that tx path is actually able to send characters */ 2557 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2558 atmel_port->tx_stopped = false; 2559 2560 uart_console_write(port, s, count, atmel_console_putchar); 2561 2562 /* 2563 * Finally, wait for transmitter to become empty 2564 * and restore IMR 2565 */ 2566 do { 2567 status = atmel_uart_readl(port, ATMEL_US_CSR); 2568 } while (!(status & ATMEL_US_TXRDY)); 2569 2570 /* Restore PDC transmit status */ 2571 if (pdc_tx) 2572 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2573 2574 /* set interrupts back the way they were */ 2575 atmel_uart_writel(port, ATMEL_US_IER, imr); 2576 } 2577 2578 /* 2579 * If the port was already initialised (eg, by a boot loader), 2580 * try to determine the current setup. 2581 */ 2582 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2583 int *parity, int *bits) 2584 { 2585 unsigned int mr, quot; 2586 2587 /* 2588 * If the baud rate generator isn't running, the port wasn't 2589 * initialized by the boot loader. 2590 */ 2591 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2592 if (!quot) 2593 return; 2594 2595 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2596 if (mr == ATMEL_US_CHRL_8) 2597 *bits = 8; 2598 else 2599 *bits = 7; 2600 2601 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2602 if (mr == ATMEL_US_PAR_EVEN) 2603 *parity = 'e'; 2604 else if (mr == ATMEL_US_PAR_ODD) 2605 *parity = 'o'; 2606 2607 /* 2608 * The serial core only rounds down when matching this to a 2609 * supported baud rate. Make sure we don't end up slightly 2610 * lower than one of those, as it would make us fall through 2611 * to a much lower baud rate than we really want. 2612 */ 2613 *baud = port->uartclk / (16 * (quot - 1)); 2614 } 2615 2616 static int __init atmel_console_setup(struct console *co, char *options) 2617 { 2618 struct uart_port *port = &atmel_ports[co->index].uart; 2619 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2620 int baud = 115200; 2621 int bits = 8; 2622 int parity = 'n'; 2623 int flow = 'n'; 2624 2625 if (port->membase == NULL) { 2626 /* Port not initialized yet - delay setup */ 2627 return -ENODEV; 2628 } 2629 2630 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2631 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2632 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2633 atmel_port->tx_stopped = false; 2634 2635 if (options) 2636 uart_parse_options(options, &baud, &parity, &bits, &flow); 2637 else 2638 atmel_console_get_options(port, &baud, &parity, &bits); 2639 2640 return uart_set_options(port, co, baud, parity, bits, flow); 2641 } 2642 2643 static struct uart_driver atmel_uart; 2644 2645 static struct console atmel_console = { 2646 .name = ATMEL_DEVICENAME, 2647 .write = atmel_console_write, 2648 .device = uart_console_device, 2649 .setup = atmel_console_setup, 2650 .flags = CON_PRINTBUFFER, 2651 .index = -1, 2652 .data = &atmel_uart, 2653 }; 2654 2655 static void atmel_serial_early_write(struct console *con, const char *s, 2656 unsigned int n) 2657 { 2658 struct earlycon_device *dev = con->data; 2659 2660 uart_console_write(&dev->port, s, n, atmel_console_putchar); 2661 } 2662 2663 static int __init atmel_early_console_setup(struct earlycon_device *device, 2664 const char *options) 2665 { 2666 if (!device->port.membase) 2667 return -ENODEV; 2668 2669 device->con->write = atmel_serial_early_write; 2670 2671 return 0; 2672 } 2673 2674 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart", 2675 atmel_early_console_setup); 2676 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart", 2677 atmel_early_console_setup); 2678 2679 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2680 2681 #else 2682 #define ATMEL_CONSOLE_DEVICE NULL 2683 #endif 2684 2685 static struct uart_driver atmel_uart = { 2686 .owner = THIS_MODULE, 2687 .driver_name = "atmel_serial", 2688 .dev_name = ATMEL_DEVICENAME, 2689 .major = SERIAL_ATMEL_MAJOR, 2690 .minor = MINOR_START, 2691 .nr = ATMEL_MAX_UART, 2692 .cons = ATMEL_CONSOLE_DEVICE, 2693 }; 2694 2695 static bool atmel_serial_clk_will_stop(void) 2696 { 2697 #ifdef CONFIG_ARCH_AT91 2698 return at91_suspend_entering_slow_clock(); 2699 #else 2700 return false; 2701 #endif 2702 } 2703 2704 static int __maybe_unused atmel_serial_suspend(struct device *dev) 2705 { 2706 struct uart_port *port = dev_get_drvdata(dev); 2707 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2708 2709 if (uart_console(port) && console_suspend_enabled) { 2710 /* Drain the TX shifter */ 2711 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2712 ATMEL_US_TXEMPTY)) 2713 cpu_relax(); 2714 } 2715 2716 if (uart_console(port) && !console_suspend_enabled) { 2717 /* Cache register values as we won't get a full shutdown/startup 2718 * cycle 2719 */ 2720 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); 2721 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); 2722 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 2723 atmel_port->cache.rtor = atmel_uart_readl(port, 2724 atmel_port->rtor); 2725 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); 2726 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); 2727 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); 2728 } 2729 2730 /* we can not wake up if we're running on slow clock */ 2731 atmel_port->may_wakeup = device_may_wakeup(dev); 2732 if (atmel_serial_clk_will_stop()) { 2733 unsigned long flags; 2734 2735 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2736 atmel_port->suspended = true; 2737 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2738 device_set_wakeup_enable(dev, 0); 2739 } 2740 2741 uart_suspend_port(&atmel_uart, port); 2742 2743 return 0; 2744 } 2745 2746 static int __maybe_unused atmel_serial_resume(struct device *dev) 2747 { 2748 struct uart_port *port = dev_get_drvdata(dev); 2749 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2750 unsigned long flags; 2751 2752 if (uart_console(port) && !console_suspend_enabled) { 2753 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); 2754 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); 2755 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); 2756 atmel_uart_writel(port, atmel_port->rtor, 2757 atmel_port->cache.rtor); 2758 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); 2759 2760 if (atmel_port->fifo_size) { 2761 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | 2762 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); 2763 atmel_uart_writel(port, ATMEL_US_FMR, 2764 atmel_port->cache.fmr); 2765 atmel_uart_writel(port, ATMEL_US_FIER, 2766 atmel_port->cache.fimr); 2767 } 2768 atmel_start_rx(port); 2769 } 2770 2771 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2772 if (atmel_port->pending) { 2773 atmel_handle_receive(port, atmel_port->pending); 2774 atmel_handle_status(port, atmel_port->pending, 2775 atmel_port->pending_status); 2776 atmel_handle_transmit(port, atmel_port->pending); 2777 atmel_port->pending = 0; 2778 } 2779 atmel_port->suspended = false; 2780 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2781 2782 uart_resume_port(&atmel_uart, port); 2783 device_set_wakeup_enable(dev, atmel_port->may_wakeup); 2784 2785 return 0; 2786 } 2787 2788 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2789 struct platform_device *pdev) 2790 { 2791 atmel_port->fifo_size = 0; 2792 atmel_port->rts_low = 0; 2793 atmel_port->rts_high = 0; 2794 2795 if (of_property_read_u32(pdev->dev.of_node, 2796 "atmel,fifo-size", 2797 &atmel_port->fifo_size)) 2798 return; 2799 2800 if (!atmel_port->fifo_size) 2801 return; 2802 2803 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2804 atmel_port->fifo_size = 0; 2805 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2806 return; 2807 } 2808 2809 /* 2810 * 0 <= rts_low <= rts_high <= fifo_size 2811 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2812 * to flush their internal TX FIFO, commonly up to 16 data, before 2813 * actually stopping to send new data. So we try to set the RTS High 2814 * Threshold to a reasonably high value respecting this 16 data 2815 * empirical rule when possible. 2816 */ 2817 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2818 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2819 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2820 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2821 2822 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2823 atmel_port->fifo_size); 2824 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2825 atmel_port->rts_high); 2826 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2827 atmel_port->rts_low); 2828 } 2829 2830 static int atmel_serial_probe(struct platform_device *pdev) 2831 { 2832 struct atmel_uart_port *atmel_port; 2833 struct device_node *np = pdev->dev.parent->of_node; 2834 void *data; 2835 int ret; 2836 bool rs485_enabled; 2837 2838 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2839 2840 /* 2841 * In device tree there is no node with "atmel,at91rm9200-usart-serial" 2842 * as compatible string. This driver is probed by at91-usart mfd driver 2843 * which is just a wrapper over the atmel_serial driver and 2844 * spi-at91-usart driver. All attributes needed by this driver are 2845 * found in of_node of parent. 2846 */ 2847 pdev->dev.of_node = np; 2848 2849 ret = of_alias_get_id(np, "serial"); 2850 if (ret < 0) 2851 /* port id not found in platform data nor device-tree aliases: 2852 * auto-enumerate it */ 2853 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2854 2855 if (ret >= ATMEL_MAX_UART) { 2856 ret = -ENODEV; 2857 goto err; 2858 } 2859 2860 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2861 /* port already in use */ 2862 ret = -EBUSY; 2863 goto err; 2864 } 2865 2866 atmel_port = &atmel_ports[ret]; 2867 atmel_port->backup_imr = 0; 2868 atmel_port->uart.line = ret; 2869 atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE); 2870 atmel_serial_probe_fifos(atmel_port, pdev); 2871 2872 atomic_set(&atmel_port->tasklet_shutdown, 0); 2873 spin_lock_init(&atmel_port->lock_suspended); 2874 2875 atmel_port->clk = devm_clk_get(&pdev->dev, "usart"); 2876 if (IS_ERR(atmel_port->clk)) { 2877 ret = PTR_ERR(atmel_port->clk); 2878 goto err; 2879 } 2880 ret = clk_prepare_enable(atmel_port->clk); 2881 if (ret) 2882 goto err; 2883 2884 ret = atmel_init_port(atmel_port, pdev); 2885 if (ret) 2886 goto err_clk_disable_unprepare; 2887 2888 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2889 if (IS_ERR(atmel_port->gpios)) { 2890 ret = PTR_ERR(atmel_port->gpios); 2891 goto err_clk_disable_unprepare; 2892 } 2893 2894 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2895 ret = -ENOMEM; 2896 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, 2897 sizeof(struct atmel_uart_char), 2898 GFP_KERNEL); 2899 if (!data) 2900 goto err_clk_disable_unprepare; 2901 atmel_port->rx_ring.buf = data; 2902 } 2903 2904 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2905 2906 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2907 if (ret) 2908 goto err_add_port; 2909 2910 device_init_wakeup(&pdev->dev, 1); 2911 platform_set_drvdata(pdev, atmel_port); 2912 2913 if (rs485_enabled) { 2914 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2915 ATMEL_US_USMODE_NORMAL); 2916 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2917 ATMEL_US_RTSEN); 2918 } 2919 2920 /* 2921 * Get port name of usart or uart 2922 */ 2923 atmel_get_ip_name(&atmel_port->uart); 2924 2925 /* 2926 * The peripheral clock can now safely be disabled till the port 2927 * is used 2928 */ 2929 clk_disable_unprepare(atmel_port->clk); 2930 2931 return 0; 2932 2933 err_add_port: 2934 kfree(atmel_port->rx_ring.buf); 2935 atmel_port->rx_ring.buf = NULL; 2936 err_clk_disable_unprepare: 2937 clk_disable_unprepare(atmel_port->clk); 2938 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 2939 err: 2940 return ret; 2941 } 2942 2943 /* 2944 * Even if the driver is not modular, it makes sense to be able to 2945 * unbind a device: there can be many bound devices, and there are 2946 * situations where dynamic binding and unbinding can be useful. 2947 * 2948 * For example, a connected device can require a specific firmware update 2949 * protocol that needs bitbanging on IO lines, but use the regular serial 2950 * port in the normal case. 2951 */ 2952 static int atmel_serial_remove(struct platform_device *pdev) 2953 { 2954 struct uart_port *port = platform_get_drvdata(pdev); 2955 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2956 int ret = 0; 2957 2958 tasklet_kill(&atmel_port->tasklet_rx); 2959 tasklet_kill(&atmel_port->tasklet_tx); 2960 2961 device_init_wakeup(&pdev->dev, 0); 2962 2963 ret = uart_remove_one_port(&atmel_uart, port); 2964 2965 kfree(atmel_port->rx_ring.buf); 2966 2967 /* "port" is allocated statically, so we shouldn't free it */ 2968 2969 clear_bit(port->line, atmel_ports_in_use); 2970 2971 pdev->dev.of_node = NULL; 2972 2973 return ret; 2974 } 2975 2976 static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend, 2977 atmel_serial_resume); 2978 2979 static struct platform_driver atmel_serial_driver = { 2980 .probe = atmel_serial_probe, 2981 .remove = atmel_serial_remove, 2982 .driver = { 2983 .name = "atmel_usart_serial", 2984 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 2985 .pm = pm_ptr(&atmel_serial_pm_ops), 2986 }, 2987 }; 2988 2989 static int __init atmel_serial_init(void) 2990 { 2991 int ret; 2992 2993 ret = uart_register_driver(&atmel_uart); 2994 if (ret) 2995 return ret; 2996 2997 ret = platform_driver_register(&atmel_serial_driver); 2998 if (ret) 2999 uart_unregister_driver(&atmel_uart); 3000 3001 return ret; 3002 } 3003 device_initcall(atmel_serial_init); 3004